From 38d707c54df4ca58cd9ceae2ddcbd6f606b99e9f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 8 Mar 2024 11:58:54 +0100 Subject: sched/balancing: Change 'enum cpu_idle_type' to have more natural definitions The cpu_idle_type enum has the confusingly inverted property that 'not idle' is 1, and 'idle' is '0'. This resulted in a number of unnecessary complications in the code. Reverse the order, remove the CPU_NOT_IDLE type, and convert all code to a natural boolean form. It's much more readable: - enum cpu_idle_type idle = this_rq->idle_balance ? - CPU_IDLE : CPU_NOT_IDLE; - + enum cpu_idle_type idle = this_rq->idle_balance; -------------------------------- - if (env->idle == CPU_NOT_IDLE || !busiest->sum_nr_running) + if (!env->idle || !busiest->sum_nr_running) -------------------------------- And gets rid of the double negation in these usages: - if (env->idle != CPU_NOT_IDLE && env->src_rq->nr_running <= 1) + if (env->idle && env->src_rq->nr_running <= 1) Furthermore, this makes code much more obvious where there's differentiation between CPU_IDLE and CPU_NEWLY_IDLE. Signed-off-by: Ingo Molnar Reviewed-by: Valentin Schneider Reviewed-by: Vincent Guittot Cc: "Gautham R. Shenoy" Link: https://lore.kernel.org/r/20240308105901.1096078-4-mingo@kernel.org --- include/linux/sched/idle.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h index 478084f9105e..e670ac282333 100644 --- a/include/linux/sched/idle.h +++ b/include/linux/sched/idle.h @@ -5,8 +5,8 @@ #include enum cpu_idle_type { + __CPU_NOT_IDLE = 0, CPU_IDLE, - CPU_NOT_IDLE, CPU_NEWLY_IDLE, CPU_MAX_IDLE_TYPES }; -- cgit From 86dd6c04ef9f213e14d60c9f64bce1cc019f816e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 8 Mar 2024 12:18:08 +0100 Subject: sched/balancing: Rename scheduler_tick() => sched_tick() - Standardize on prefixing scheduler-internal functions defined in with sched_*() prefix. scheduler_tick() was the only function using the scheduler_ prefix. Harmonize it. - The other reason to rename it is the NOHZ scheduler tick handling functions are already named sched_tick_*(). Make the 'git grep sched_tick' more meaningful. Signed-off-by: Ingo Molnar Acked-by: Valentin Schneider Reviewed-by: Shrikanth Hegde Link: https://lore.kernel.org/r/20240308111819.1101550-3-mingo@kernel.org --- Documentation/scheduler/sched-domains.rst | 4 ++-- Documentation/translations/zh_CN/scheduler/sched-domains.rst | 4 ++-- include/linux/sched.h | 2 +- kernel/sched/core.c | 4 ++-- kernel/sched/loadavg.c | 2 +- kernel/time/timer.c | 2 +- kernel/workqueue.c | 2 +- tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/Documentation/scheduler/sched-domains.rst b/Documentation/scheduler/sched-domains.rst index 6577b068f921..541d6c617971 100644 --- a/Documentation/scheduler/sched-domains.rst +++ b/Documentation/scheduler/sched-domains.rst @@ -32,13 +32,13 @@ load of each of its member CPUs, and only when the load of a group becomes out of balance are tasks moved between groups. In kernel/sched/core.c, trigger_load_balance() is run periodically on each CPU -through scheduler_tick(). It raises a softirq after the next regularly scheduled +through sched_tick(). It raises a softirq after the next regularly scheduled rebalancing event for the current runqueue has arrived. The actual load balancing workhorse, sched_balance_softirq()->rebalance_domains(), is then run in softirq context (SCHED_SOFTIRQ). The latter function takes two arguments: the runqueue of current CPU and whether -the CPU was idle at the time the scheduler_tick() happened and iterates over all +the CPU was idle at the time the sched_tick() happened and iterates over all sched domains our CPU is on, starting from its base domain and going up the ->parent chain. While doing that, it checks to see if the current domain has exhausted its rebalance interval. If so, it runs load_balance() on that domain. It then checks diff --git a/Documentation/translations/zh_CN/scheduler/sched-domains.rst b/Documentation/translations/zh_CN/scheduler/sched-domains.rst index fbc326668e37..fa0c0bcc6ba5 100644 --- a/Documentation/translations/zh_CN/scheduler/sched-domains.rst +++ b/Documentation/translations/zh_CN/scheduler/sched-domains.rst @@ -34,12 +34,12 @@ CPU共享。任意两个组的CPU掩码的交集不一定为空,如果是这 调度域中的负载均衡发生在调度组中。也就是说,每个组被视为一个实体。组的负载被定义为它 管辖的每个CPU的负载之和。仅当组的负载不均衡后,任务才在组之间发生迁移。 -在kernel/sched/core.c中,trigger_load_balance()在每个CPU上通过scheduler_tick() +在kernel/sched/core.c中,trigger_load_balance()在每个CPU上通过sched_tick() 周期执行。在当前运行队列下一个定期调度再平衡事件到达后,它引发一个软中断。负载均衡真正 的工作由sched_balance_softirq()->rebalance_domains()完成,在软中断上下文中执行 (SCHED_SOFTIRQ)。 -后一个函数有两个入参:当前CPU的运行队列、它在scheduler_tick()调用时是否空闲。函数会从 +后一个函数有两个入参:当前CPU的运行队列、它在sched_tick()调用时是否空闲。函数会从 当前CPU所在的基调度域开始迭代执行,并沿着parent指针链向上进入更高层级的调度域。在迭代 过程中,函数会检查当前调度域是否已经耗尽了再平衡的时间间隔,如果是,它在该调度域运行 load_balance()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。 diff --git a/include/linux/sched.h b/include/linux/sched.h index 17cb0761ff65..7eb7f31af796 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -301,7 +301,7 @@ enum { TASK_COMM_LEN = 16, }; -extern void scheduler_tick(void); +extern void sched_tick(void); #define MAX_SCHEDULE_TIMEOUT LONG_MAX diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d44efa0d0611..71b7a08a6502 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5662,7 +5662,7 @@ static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. */ -void scheduler_tick(void) +void sched_tick(void) { int cpu = smp_processor_id(); struct rq *rq = cpu_rq(cpu); @@ -6585,7 +6585,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * paths. For example, see arch/x86/entry_64.S. * * To drive preemption between tasks, the scheduler sets the flag in timer - * interrupt handler scheduler_tick(). + * interrupt handler sched_tick(). * * 3. Wakeups don't really cause entry into schedule(). They add a * task to the run-queue and that's it. diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 52c8f8226b0d..ca9da66cc894 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -379,7 +379,7 @@ void calc_global_load(void) } /* - * Called from scheduler_tick() to periodically update this CPU's + * Called from sched_tick() to periodically update this CPU's * active count. */ void calc_global_load_tick(struct rq *this_rq) diff --git a/kernel/time/timer.c b/kernel/time/timer.c index e69e75d3858c..ff49ddcc9800 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -2478,7 +2478,7 @@ void update_process_times(int user_tick) if (in_irq()) irq_work_tick(); #endif - scheduler_tick(); + sched_tick(); if (IS_ENABLED(CONFIG_POSIX_TIMERS)) run_posix_cpu_timers(); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index bf2bdac46843..8fbb0ec39079 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1464,7 +1464,7 @@ void wq_worker_sleeping(struct task_struct *task) * wq_worker_tick - a scheduler tick occurred while a kworker is running * @task: task currently running * - * Called from scheduler_tick(). We're in the IRQ context and the current + * Called from sched_tick(). We're in the IRQ context and the current * worker's fields which follow the 'K' locking rule can be accessed safely. */ void wq_worker_tick(struct task_struct *task) diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc index 25432b8cd5bd..073a748b9380 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc @@ -19,7 +19,7 @@ fail() { # mesg FILTER=set_ftrace_filter FUNC1="schedule" -FUNC2="scheduler_tick" +FUNC2="sched_tick" ALL_FUNCS="#### all functions enabled ####" -- cgit From 4c3e509ea9f249458e8692f8298cceac73105948 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Fri, 8 Mar 2024 12:18:11 +0100 Subject: sched/balancing: Rename load_balance() => sched_balance_rq() Standardize scheduler load-balancing function names on the sched_balance_() prefix. Also load_balance() has become somewhat of a misnomer: historically it was the first and primary load-balancing function that was called, but with the introduction of sched domains, it's become a lower layer function that balances runqueues. Rename it to sched_balance_rq() accordingly. Signed-off-by: Ingo Molnar Reviewed-by: Shrikanth Hegde Link: https://lore.kernel.org/r/20240308111819.1101550-6-mingo@kernel.org --- Documentation/scheduler/sched-domains.rst | 4 +-- Documentation/scheduler/sched-stats.rst | 32 +++++++++++----------- .../translations/zh_CN/scheduler/sched-domains.rst | 4 +-- .../translations/zh_CN/scheduler/sched-stats.rst | 30 ++++++++++---------- include/linux/sched/topology.h | 2 +- kernel/sched/fair.c | 10 +++---- 6 files changed, 41 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/Documentation/scheduler/sched-domains.rst b/Documentation/scheduler/sched-domains.rst index 5d8e8b8b269e..5e996fe973b1 100644 --- a/Documentation/scheduler/sched-domains.rst +++ b/Documentation/scheduler/sched-domains.rst @@ -41,11 +41,11 @@ The latter function takes two arguments: the runqueue of current CPU and whether the CPU was idle at the time the sched_tick() happened and iterates over all sched domains our CPU is on, starting from its base domain and going up the ->parent chain. While doing that, it checks to see if the current domain has exhausted its -rebalance interval. If so, it runs load_balance() on that domain. It then checks +rebalance interval. If so, it runs sched_balance_rq() on that domain. It then checks the parent sched_domain (if it exists), and the parent of the parent and so forth. -Initially, load_balance() finds the busiest group in the current sched domain. +Initially, sched_balance_rq() finds the busiest group in the current sched domain. If it succeeds, it looks for the busiest runqueue of all the CPUs' runqueues in that group. If it manages to find such a runqueue, it locks both our initial CPU's runqueue and the newly found busiest one and starts moving tasks from it diff --git a/Documentation/scheduler/sched-stats.rst b/Documentation/scheduler/sched-stats.rst index 73c412666655..7c2b16c4729d 100644 --- a/Documentation/scheduler/sched-stats.rst +++ b/Documentation/scheduler/sched-stats.rst @@ -77,53 +77,53 @@ domain 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 The first field is a bit mask indicating what cpus this domain operates over. -The next 24 are a variety of load_balance() statistics in grouped into types +The next 24 are a variety of sched_balance_rq() statistics in grouped into types of idleness (idle, busy, and newly idle): - 1) # of times in this domain load_balance() was called when the + 1) # of times in this domain sched_balance_rq() was called when the cpu was idle - 2) # of times in this domain load_balance() checked but found + 2) # of times in this domain sched_balance_rq() checked but found the load did not require balancing when the cpu was idle - 3) # of times in this domain load_balance() tried to move one or + 3) # of times in this domain sched_balance_rq() tried to move one or more tasks and failed, when the cpu was idle 4) sum of imbalances discovered (if any) with each call to - load_balance() in this domain when the cpu was idle + sched_balance_rq() in this domain when the cpu was idle 5) # of times in this domain pull_task() was called when the cpu was idle 6) # of times in this domain pull_task() was called even though the target task was cache-hot when idle - 7) # of times in this domain load_balance() was called but did + 7) # of times in this domain sched_balance_rq() was called but did not find a busier queue while the cpu was idle 8) # of times in this domain a busier queue was found while the cpu was idle but no busier group was found - 9) # of times in this domain load_balance() was called when the + 9) # of times in this domain sched_balance_rq() was called when the cpu was busy - 10) # of times in this domain load_balance() checked but found the + 10) # of times in this domain sched_balance_rq() checked but found the load did not require balancing when busy - 11) # of times in this domain load_balance() tried to move one or + 11) # of times in this domain sched_balance_rq() tried to move one or more tasks and failed, when the cpu was busy 12) sum of imbalances discovered (if any) with each call to - load_balance() in this domain when the cpu was busy + sched_balance_rq() in this domain when the cpu was busy 13) # of times in this domain pull_task() was called when busy 14) # of times in this domain pull_task() was called even though the target task was cache-hot when busy - 15) # of times in this domain load_balance() was called but did not + 15) # of times in this domain sched_balance_rq() was called but did not find a busier queue while the cpu was busy 16) # of times in this domain a busier queue was found while the cpu was busy but no busier group was found - 17) # of times in this domain load_balance() was called when the + 17) # of times in this domain sched_balance_rq() was called when the cpu was just becoming idle - 18) # of times in this domain load_balance() checked but found the + 18) # of times in this domain sched_balance_rq() checked but found the load did not require balancing when the cpu was just becoming idle - 19) # of times in this domain load_balance() tried to move one or more + 19) # of times in this domain sched_balance_rq() tried to move one or more tasks and failed, when the cpu was just becoming idle 20) sum of imbalances discovered (if any) with each call to - load_balance() in this domain when the cpu was just becoming idle + sched_balance_rq() in this domain when the cpu was just becoming idle 21) # of times in this domain pull_task() was called when newly idle 22) # of times in this domain pull_task() was called even though the target task was cache-hot when just becoming idle - 23) # of times in this domain load_balance() was called but did not + 23) # of times in this domain sched_balance_rq() was called but did not find a busier queue while the cpu was just becoming idle 24) # of times in this domain a busier queue was found while the cpu was just becoming idle but no busier group was found diff --git a/Documentation/translations/zh_CN/scheduler/sched-domains.rst b/Documentation/translations/zh_CN/scheduler/sched-domains.rst index e6590fd80640..06363169c56b 100644 --- a/Documentation/translations/zh_CN/scheduler/sched-domains.rst +++ b/Documentation/translations/zh_CN/scheduler/sched-domains.rst @@ -42,9 +42,9 @@ CPU共享。任意两个组的CPU掩码的交集不一定为空,如果是这 后一个函数有两个入参:当前CPU的运行队列、它在sched_tick()调用时是否空闲。函数会从 当前CPU所在的基调度域开始迭代执行,并沿着parent指针链向上进入更高层级的调度域。在迭代 过程中,函数会检查当前调度域是否已经耗尽了再平衡的时间间隔,如果是,它在该调度域运行 -load_balance()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。 +sched_balance_rq()。接下来它检查父调度域(如果存在),再后来父调度域的父调度域,以此类推。 -起初,load_balance()查找当前调度域中最繁忙的调度组。如果成功,在该调度组管辖的全部CPU +起初,sched_balance_rq()查找当前调度域中最繁忙的调度组。如果成功,在该调度组管辖的全部CPU 的运行队列中找出最繁忙的运行队列。如能找到,对当前的CPU运行队列和新找到的最繁忙运行 队列均加锁,并把任务从最繁忙队列中迁移到当前CPU上。被迁移的任务数量等于在先前迭代执行 中计算出的该调度域的调度组的不均衡值。 diff --git a/Documentation/translations/zh_CN/scheduler/sched-stats.rst b/Documentation/translations/zh_CN/scheduler/sched-stats.rst index c5e0be663837..09eee2517610 100644 --- a/Documentation/translations/zh_CN/scheduler/sched-stats.rst +++ b/Documentation/translations/zh_CN/scheduler/sched-stats.rst @@ -75,42 +75,42 @@ domain 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 繁忙,新空闲): - 1) 当CPU空闲时,load_balance()在这个调度域中被调用了#次 - 2) 当CPU空闲时,load_balance()在这个调度域中被调用,但是发现负载无需 + 1) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用了#次 + 2) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需 均衡#次 - 3) 当CPU空闲时,load_balance()在这个调度域中被调用,试图迁移1个或更多 + 3) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多 任务且失败了#次 - 4) 当CPU空闲时,load_balance()在这个调度域中被调用,发现不均衡(如果有) + 4) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有) #次 5) 当CPU空闲时,pull_task()在这个调度域中被调用#次 6) 当CPU空闲时,尽管目标任务是热缓存状态,pull_task()依然被调用#次 - 7) 当CPU空闲时,load_balance()在这个调度域中被调用,未能找到更繁忙的 + 7) 当CPU空闲时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的 队列#次 8) 当CPU空闲时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组 #次 - 9) 当CPU繁忙时,load_balance()在这个调度域中被调用了#次 - 10) 当CPU繁忙时,load_balance()在这个调度域中被调用,但是发现负载无需 + 9) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用了#次 + 10) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需 均衡#次 - 11) 当CPU繁忙时,load_balance()在这个调度域中被调用,试图迁移1个或更多 + 11) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多 任务且失败了#次 - 12) 当CPU繁忙时,load_balance()在这个调度域中被调用,发现不均衡(如果有) + 12) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有) #次 13) 当CPU繁忙时,pull_task()在这个调度域中被调用#次 14) 当CPU繁忙时,尽管目标任务是热缓存状态,pull_task()依然被调用#次 - 15) 当CPU繁忙时,load_balance()在这个调度域中被调用,未能找到更繁忙的 + 15) 当CPU繁忙时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的 队列#次 16) 当CPU繁忙时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组 #次 - 17) 当CPU新空闲时,load_balance()在这个调度域中被调用了#次 - 18) 当CPU新空闲时,load_balance()在这个调度域中被调用,但是发现负载无需 + 17) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用了#次 + 18) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,但是发现负载无需 均衡#次 - 19) 当CPU新空闲时,load_balance()在这个调度域中被调用,试图迁移1个或更多 + 19) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,试图迁移1个或更多 任务且失败了#次 - 20) 当CPU新空闲时,load_balance()在这个调度域中被调用,发现不均衡(如果有) + 20) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,发现不均衡(如果有) #次 21) 当CPU新空闲时,pull_task()在这个调度域中被调用#次 22) 当CPU新空闲时,尽管目标任务是热缓存状态,pull_task()依然被调用#次 - 23) 当CPU新空闲时,load_balance()在这个调度域中被调用,未能找到更繁忙的 + 23) 当CPU新空闲时,sched_balance_rq()在这个调度域中被调用,未能找到更繁忙的 队列#次 24) 当CPU新空闲时,在调度域中找到了更繁忙的队列,但未找到更繁忙的调度组 #次 diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index 18572c9ea724..c8fe9bab981b 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -110,7 +110,7 @@ struct sched_domain { unsigned long last_decay_max_lb_cost; #ifdef CONFIG_SCHEDSTATS - /* load_balance() stats */ + /* sched_balance_rq() stats */ unsigned int lb_count[CPU_MAX_IDLE_TYPES]; unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 330788b0c617..0d2753c50be9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6866,7 +6866,7 @@ dequeue_throttle: #ifdef CONFIG_SMP -/* Working cpumask for: load_balance, load_balance_newidle. */ +/* Working cpumask for: sched_balance_rq, load_balance_newidle. */ static DEFINE_PER_CPU(cpumask_var_t, load_balance_mask); static DEFINE_PER_CPU(cpumask_var_t, select_rq_mask); static DEFINE_PER_CPU(cpumask_var_t, should_we_balance_tmpmask); @@ -11242,7 +11242,7 @@ static int should_we_balance(struct lb_env *env) * Check this_cpu to ensure it is balanced within domain. Attempt to move * tasks if there is an imbalance. */ -static int load_balance(int this_cpu, struct rq *this_rq, +static int sched_balance_rq(int this_cpu, struct rq *this_rq, struct sched_domain *sd, enum cpu_idle_type idle, int *continue_balancing) { @@ -11647,7 +11647,7 @@ out_unlock: static atomic_t sched_balance_running = ATOMIC_INIT(0); /* - * Scale the max load_balance interval with the number of CPUs in the system. + * Scale the max sched_balance_rq interval with the number of CPUs in the system. * This trades load-balance latency on larger machines for less cross talk. */ void update_max_interval(void) @@ -11727,7 +11727,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle) } if (time_after_eq(jiffies, sd->last_balance + interval)) { - if (load_balance(cpu, rq, sd, idle, &continue_balancing)) { + if (sched_balance_rq(cpu, rq, sd, idle, &continue_balancing)) { /* * The LBF_DST_PINNED logic could have changed * env->dst_cpu, so we can't know our idle @@ -12353,7 +12353,7 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf) if (sd->flags & SD_BALANCE_NEWIDLE) { - pulled_task = load_balance(this_cpu, this_rq, + pulled_task = sched_balance_rq(this_cpu, this_rq, sd, CPU_NEWLY_IDLE, &continue_balancing); -- cgit From e27b02e23a701e5761f1d6028643e1203a1c56de Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 9 Mar 2024 14:45:38 -0800 Subject: ssb: drop use of non-existing CONFIG_SSB_DEBUG symbol MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CONFIG_SSB_DEBUG is used in one header file and nowhere else, so remove it and the now-empty inline function that contained it. Also remove the call to the empty inline function. The empty "default:" case is kept to prevent 2 compiler warnings: drivers/ssb/main.c:1133:9: warning: enumeration value 'SSB_BUSTYPE_PCMCIA' not handled in switch [-Wswitch] drivers/ssb/main.c:1133:9: warning: enumeration value 'SSB_BUSTYPE_SDIO' not handled in switch [-Wswitch] Reported-by: Ying Sun Link: https://lore.kernel.org/lkml/4e8525fe.607e2.18a8ddfdce8.Coremail.sunying@isrc.iscas.ac.cn/ Signed-off-by: Randy Dunlap Cc: Michael Büsch Cc: linux-wireless@vger.kernel.org Cc: Kalle Valo Acked-by: Michael Büsch Signed-off-by: Kalle Valo Link: https://msgid.link/20240309224540.22682-1-rdunlap@infradead.org --- drivers/ssb/main.c | 1 - include/linux/ssb/ssb.h | 8 -------- 2 files changed, 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c index bdb6595ffd2d..8210bf28a340 100644 --- a/drivers/ssb/main.c +++ b/drivers/ssb/main.c @@ -1146,7 +1146,6 @@ u32 ssb_dma_translation(struct ssb_device *dev) return SSB_PCI_DMA; } default: - __ssb_dma_not_implemented(dev); } return 0; } diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 1f326da289d3..a2257380c3f1 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -621,14 +621,6 @@ extern u32 ssb_dma_translation(struct ssb_device *dev); #define SSB_DMA_TRANSLATION_MASK 0xC0000000 #define SSB_DMA_TRANSLATION_SHIFT 30 -static inline void __cold __ssb_dma_not_implemented(struct ssb_device *dev) -{ -#ifdef CONFIG_SSB_DEBUG - printk(KERN_ERR "SSB: BUG! Calling DMA API for " - "unsupported bustype %d\n", dev->bus->bustype); -#endif /* DEBUG */ -} - #ifdef CONFIG_SSB_PCIHOST /* PCI-host wrapper driver */ extern int ssb_pcihost_register(struct pci_driver *driver); -- cgit From 64be03575f9e9772ebdebc7f067d533348602083 Mon Sep 17 00:00:00 2001 From: Fiona Klute Date: Mon, 11 Mar 2024 11:37:13 +0100 Subject: wifi: rtw88: SDIO device driver for RTL8723CS This driver uses the new rtw8703b chip driver code. Acked-by: Ping-Ke Shih Acked-by: Ulf Hansson # For SDIO Tested-by: Pavel Machek Signed-off-by: Fiona Klute Signed-off-by: Kalle Valo Link: https://msgid.link/20240311103735.615541-10-fiona.klute@gmx.de --- drivers/net/wireless/realtek/rtw88/Kconfig | 18 ++++++++++++++ drivers/net/wireless/realtek/rtw88/Makefile | 6 +++++ drivers/net/wireless/realtek/rtw88/rtw8723cs.c | 34 ++++++++++++++++++++++++++ include/linux/mmc/sdio_ids.h | 1 + 4 files changed, 59 insertions(+) create mode 100644 drivers/net/wireless/realtek/rtw88/rtw8723cs.c (limited to 'include/linux') diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig index 07b5b2f6eef7..22838ede03cd 100644 --- a/drivers/net/wireless/realtek/rtw88/Kconfig +++ b/drivers/net/wireless/realtek/rtw88/Kconfig @@ -31,6 +31,10 @@ config RTW88_8822C config RTW88_8723X tristate +config RTW88_8703B + tristate + select RTW88_8723X + config RTW88_8723D tristate select RTW88_8723X @@ -126,6 +130,20 @@ config RTW88_8723DS 802.11n SDIO wireless network adapter +config RTW88_8723CS + tristate "Realtek 8723CS SDIO wireless network adapter" + depends on MMC + select RTW88_CORE + select RTW88_SDIO + select RTW88_8703B + help + Select this option to enable support for 8723CS chipset (EXPERIMENTAL) + + This module adds support for the 8723CS 802.11n SDIO + wireless network adapter. + + If you choose to build a module, it'll be called rtw88_8723cs. + config RTW88_8723DU tristate "Realtek 8723DU USB wireless network adapter" depends on USB diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile index 22516c984608..8f47359b4380 100644 --- a/drivers/net/wireless/realtek/rtw88/Makefile +++ b/drivers/net/wireless/realtek/rtw88/Makefile @@ -47,6 +47,12 @@ rtw88_8822cu-objs := rtw8822cu.o obj-$(CONFIG_RTW88_8723X) += rtw88_8723x.o rtw88_8723x-objs := rtw8723x.o +obj-$(CONFIG_RTW88_8703B) += rtw88_8703b.o +rtw88_8703b-objs := rtw8703b.o rtw8703b_tables.o + +obj-$(CONFIG_RTW88_8723CS) += rtw88_8723cs.o +rtw88_8723cs-objs := rtw8723cs.o + obj-$(CONFIG_RTW88_8723D) += rtw88_8723d.o rtw88_8723d-objs := rtw8723d.o rtw8723d_table.o diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c new file mode 100644 index 000000000000..8d38d36be8c0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright Fiona Klute */ + +#include +#include +#include +#include "main.h" +#include "rtw8703b.h" +#include "sdio.h" + +static const struct sdio_device_id rtw_8723cs_id_table[] = { + { + SDIO_DEVICE(SDIO_VENDOR_ID_REALTEK, + SDIO_DEVICE_ID_REALTEK_RTW8723CS), + .driver_data = (kernel_ulong_t)&rtw8703b_hw_spec, + }, + {} +}; +MODULE_DEVICE_TABLE(sdio, rtw_8723cs_id_table); + +static struct sdio_driver rtw_8723cs_driver = { + .name = "rtw8723cs", + .id_table = rtw_8723cs_id_table, + .probe = rtw_sdio_probe, + .remove = rtw_sdio_remove, + .drv = { + .pm = &rtw_sdio_pm_ops, + .shutdown = rtw_sdio_shutdown + }}; +module_sdio_driver(rtw_8723cs_driver); + +MODULE_AUTHOR("Fiona Klute "); +MODULE_DESCRIPTION("Realtek 802.11n wireless 8723cs driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 7fada7a714fe..7cddfdac2f57 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -124,6 +124,7 @@ #define SDIO_DEVICE_ID_REALTEK_RTW8723DS_2ANT 0xd723 #define SDIO_DEVICE_ID_REALTEK_RTW8723DS_1ANT 0xd724 #define SDIO_DEVICE_ID_REALTEK_RTW8821DS 0xd821 +#define SDIO_DEVICE_ID_REALTEK_RTW8723CS 0xb703 #define SDIO_VENDOR_ID_SIANO 0x039a #define SDIO_DEVICE_ID_SIANO_NOVA_B0 0x0201 -- cgit From 7d2cc63eca0c993c99d18893214abf8f85d566d8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 8 Mar 2024 06:38:07 +0100 Subject: bpf: Take return from set_memory_ro() into account with bpf_prog_lock_ro() set_memory_ro() can fail, leaving memory unprotected. Check its return and take it into account as an error. Link: https://github.com/KSPP/linux/issues/7 Signed-off-by: Christophe Leroy Cc: linux-hardening@vger.kernel.org Reviewed-by: Kees Cook Message-ID: <286def78955e04382b227cb3e4b6ba272a7442e3.1709850515.git.christophe.leroy@csgroup.eu> Signed-off-by: Alexei Starovoitov --- include/linux/filter.h | 5 +++-- kernel/bpf/core.c | 4 +++- kernel/bpf/verifier.c | 8 ++++++-- 3 files changed, 12 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index c99bc3df2d28..9107ee1de66f 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -887,14 +887,15 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) -static inline void bpf_prog_lock_ro(struct bpf_prog *fp) +static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp) { #ifndef CONFIG_BPF_JIT_ALWAYS_ON if (!fp->jited) { set_vm_flush_reset_perms(fp); - set_memory_ro((unsigned long)fp, fp->pages); + return set_memory_ro((unsigned long)fp, fp->pages); } #endif + return 0; } static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 696bc55de8e8..63f100def31b 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2403,7 +2403,9 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) } finalize: - bpf_prog_lock_ro(fp); + *err = bpf_prog_lock_ro(fp); + if (*err) + return fp; /* The tail call compatibility check can only be done at * this late stage as we need to determine, if we deal diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 7b208e5d38f6..de7813947981 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19263,10 +19263,14 @@ static int jit_subprogs(struct bpf_verifier_env *env) * bpf_prog_load will add the kallsyms for the main program. */ for (i = 1; i < env->subprog_cnt; i++) { - bpf_prog_lock_ro(func[i]); - bpf_prog_kallsyms_add(func[i]); + err = bpf_prog_lock_ro(func[i]); + if (err) + goto out_free; } + for (i = 1; i < env->subprog_cnt; i++) + bpf_prog_kallsyms_add(func[i]); + /* Last step: make now unused interpreter insns from main * prog consistent for later dump requests, so they can * later look the same as if they were interpreted only. -- cgit From e60adf513275c3a38e5cb67f7fd12387e43a3ff5 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Fri, 8 Mar 2024 06:38:08 +0100 Subject: bpf: Take return from set_memory_rox() into account with bpf_jit_binary_lock_ro() set_memory_rox() can fail, leaving memory unprotected. Check return and bail out when bpf_jit_binary_lock_ro() returns an error. Link: https://github.com/KSPP/linux/issues/7 Signed-off-by: Christophe Leroy Cc: linux-hardening@vger.kernel.org Reviewed-by: Kees Cook Reviewed-by: Puranjay Mohan Reviewed-by: Ilya Leoshkevich # s390x Acked-by: Tiezhu Yang # LoongArch Reviewed-by: Johan Almbladh # MIPS Part Message-ID: <036b6393f23a2032ce75a1c92220b2afcb798d5d.1709850515.git.christophe.leroy@csgroup.eu> Signed-off-by: Alexei Starovoitov --- arch/arm/net/bpf_jit_32.c | 25 ++++++++++++------------- arch/loongarch/net/bpf_jit.c | 22 ++++++++++++++++------ arch/mips/net/bpf_jit_comp.c | 3 ++- arch/parisc/net/bpf_jit_core.c | 8 +++++++- arch/s390/net/bpf_jit_comp.c | 6 +++++- arch/sparc/net/bpf_jit_comp_64.c | 6 +++++- arch/x86/net/bpf_jit_comp32.c | 3 +-- include/linux/filter.h | 5 +++-- 8 files changed, 51 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 1d672457d02f..01516f83a95a 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -2222,28 +2222,21 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) /* If building the body of the JITed code fails somehow, * we fall back to the interpretation. */ - if (build_body(&ctx) < 0) { - image_ptr = NULL; - bpf_jit_binary_free(header); - prog = orig_prog; - goto out_imms; - } + if (build_body(&ctx) < 0) + goto out_free; build_epilogue(&ctx); /* 3.) Extra pass to validate JITed Code */ - if (validate_code(&ctx)) { - image_ptr = NULL; - bpf_jit_binary_free(header); - prog = orig_prog; - goto out_imms; - } + if (validate_code(&ctx)) + goto out_free; flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); if (bpf_jit_enable > 1) /* there are 2 passes here */ bpf_jit_dump(prog->len, image_size, 2, ctx.target); - bpf_jit_binary_lock_ro(header); + if (bpf_jit_binary_lock_ro(header)) + goto out_free; prog->bpf_func = (void *)ctx.target; prog->jited = 1; prog->jited_len = image_size; @@ -2260,5 +2253,11 @@ out: bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog); return prog; + +out_free: + image_ptr = NULL; + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_imms; } diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c index e73323d759d0..7dbefd4ba210 100644 --- a/arch/loongarch/net/bpf_jit.c +++ b/arch/loongarch/net/bpf_jit.c @@ -1294,16 +1294,19 @@ skip_init_ctx: flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx)); if (!prog->is_func || extra_pass) { + int err; + if (extra_pass && ctx.idx != jit_data->ctx.idx) { pr_err_once("multi-func JIT bug %d != %d\n", ctx.idx, jit_data->ctx.idx); - bpf_jit_binary_free(header); - prog->bpf_func = NULL; - prog->jited = 0; - prog->jited_len = 0; - goto out_offset; + goto out_free; + } + err = bpf_jit_binary_lock_ro(header); + if (err) { + pr_err_once("bpf_jit_binary_lock_ro() returned %d\n", + err); + goto out_free; } - bpf_jit_binary_lock_ro(header); } else { jit_data->ctx = ctx; jit_data->image = image_ptr; @@ -1334,6 +1337,13 @@ out: out_offset = -1; return prog; + +out_free: + bpf_jit_binary_free(header); + prog->bpf_func = NULL; + prog->jited = 0; + prog->jited_len = 0; + goto out_offset; } /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */ diff --git a/arch/mips/net/bpf_jit_comp.c b/arch/mips/net/bpf_jit_comp.c index a40d926b6513..e355dfca4400 100644 --- a/arch/mips/net/bpf_jit_comp.c +++ b/arch/mips/net/bpf_jit_comp.c @@ -1012,7 +1012,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]); /* Set as read-only exec and flush instruction cache */ - bpf_jit_binary_lock_ro(header); + if (bpf_jit_binary_lock_ro(header)) + goto out_err; flush_icache_range((unsigned long)header, (unsigned long)&ctx.target[ctx.jit_index]); diff --git a/arch/parisc/net/bpf_jit_core.c b/arch/parisc/net/bpf_jit_core.c index d6ee2fd45550..979f45d4d1fb 100644 --- a/arch/parisc/net/bpf_jit_core.c +++ b/arch/parisc/net/bpf_jit_core.c @@ -167,7 +167,13 @@ skip_init_ctx: bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); if (!prog->is_func || extra_pass) { - bpf_jit_binary_lock_ro(jit_data->header); + if (bpf_jit_binary_lock_ro(jit_data->header)) { + bpf_jit_binary_free(jit_data->header); + prog->bpf_func = NULL; + prog->jited = 0; + prog->jited_len = 0; + goto out_offset; + } prologue_len = ctx->epilogue_offset - ctx->body_len; for (i = 0; i < prog->len; i++) ctx->offset[i] += prologue_len; diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index b418333bb086..e613eebfd349 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -2111,7 +2111,11 @@ skip_init_ctx: print_fn_code(jit.prg_buf, jit.size_prg); } if (!fp->is_func || extra_pass) { - bpf_jit_binary_lock_ro(header); + if (bpf_jit_binary_lock_ro(header)) { + bpf_jit_binary_free(header); + fp = orig_fp; + goto free_addrs; + } } else { jit_data->header = header; jit_data->ctx = jit; diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index fa0759bfe498..73bf0aea8baf 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c @@ -1602,7 +1602,11 @@ skip_init_ctx: bpf_flush_icache(header, (u8 *)header + header->size); if (!prog->is_func || extra_pass) { - bpf_jit_binary_lock_ro(header); + if (bpf_jit_binary_lock_ro(header)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } } else { jit_data->ctx = ctx; jit_data->image = image_ptr; diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c index c10083a8e68e..de0f9e5f9f73 100644 --- a/arch/x86/net/bpf_jit_comp32.c +++ b/arch/x86/net/bpf_jit_comp32.c @@ -2600,8 +2600,7 @@ out_image: if (bpf_jit_enable > 1) bpf_jit_dump(prog->len, proglen, pass + 1, image); - if (image) { - bpf_jit_binary_lock_ro(header); + if (image && !bpf_jit_binary_lock_ro(header)) { prog->bpf_func = (void *)image; prog->jited = 1; prog->jited_len = proglen; diff --git a/include/linux/filter.h b/include/linux/filter.h index 9107ee1de66f..c0d51bff8f96 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -898,10 +898,11 @@ static inline int __must_check bpf_prog_lock_ro(struct bpf_prog *fp) return 0; } -static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) +static inline int __must_check +bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) { set_vm_flush_reset_perms(hdr); - set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); + return set_memory_rox((unsigned long)hdr, hdr->size >> PAGE_SHIFT); } int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); -- cgit From 01c0cce88c5480cc2505b79330246ef12eda938f Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 28 Feb 2024 08:35:32 +0200 Subject: drm/omapdrm: Fix console with deferred ops Commit 95da53d63dcf ("drm/omapdrm: Use regular fbdev I/O helpers") stopped console from updating for command mode displays because there is no damage handling in fb_sys_write() unlike we had earlier in drm_fb_helper_sys_write(). Let's fix the issue by adding FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS and FB_DMAMEM_HELPERS_DEFERRED as suggested by Thomas. We cannot use the FB_DEFAULT_DEFERRED_OPS as fb_deferred_io_mmap() won't work properly for write-combine. Fixes: 95da53d63dcf ("drm/omapdrm: Use regular fbdev I/O helpers") Suggested-by: Thomas Zimmermann Reviewed-by: Thomas Zimmermann Signed-off-by: Tony Lindgren Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20240228063540.4444-3-tony@atomide.com --- drivers/gpu/drm/omapdrm/Kconfig | 2 +- drivers/gpu/drm/omapdrm/omap_fbdev.c | 28 ++++++++++++++++++++++------ drivers/video/fbdev/core/Kconfig | 6 ++++++ include/linux/fb.h | 4 ++++ 4 files changed, 33 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index b715301ec79f..6c49270cb290 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -4,7 +4,7 @@ config DRM_OMAP depends on DRM && OF depends on ARCH_OMAP2PLUS select DRM_KMS_HELPER - select FB_DMAMEM_HELPERS if DRM_FBDEV_EMULATION + select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION select VIDEOMODE_HELPERS select HDMI default n diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 7c5af3de1e72..523be34682ca 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -51,6 +51,10 @@ static void pan_worker(struct work_struct *work) omap_gem_roll(bo, fbi->var.yoffset * npages); } +FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(omap_fbdev, + drm_fb_helper_damage_range, + drm_fb_helper_damage_area) + static int omap_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *fbi) { @@ -78,11 +82,9 @@ fallback: static int omap_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { - struct drm_fb_helper *helper = info->par; - struct drm_framebuffer *fb = helper->fb; - struct drm_gem_object *bo = drm_gem_fb_get_obj(fb, 0); + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - return drm_gem_mmap_obj(bo, omap_gem_mmap_size(bo), vma); + return fb_deferred_io_mmap(info, vma); } static void omap_fbdev_fb_destroy(struct fb_info *info) @@ -94,6 +96,7 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) DBG(); + fb_deferred_io_cleanup(info); drm_fb_helper_fini(helper); omap_gem_unpin(bo); @@ -104,15 +107,19 @@ static void omap_fbdev_fb_destroy(struct fb_info *info) kfree(fbdev); } +/* + * For now, we cannot use FB_DEFAULT_DEFERRED_OPS and fb_deferred_io_mmap() + * because we use write-combine. + */ static const struct fb_ops omap_fb_ops = { .owner = THIS_MODULE, - __FB_DEFAULT_DMAMEM_OPS_RDWR, + __FB_DEFAULT_DEFERRED_OPS_RDWR(omap_fbdev), .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_setcmap = drm_fb_helper_setcmap, .fb_blank = drm_fb_helper_blank, .fb_pan_display = omap_fbdev_pan_display, - __FB_DEFAULT_DMAMEM_OPS_DRAW, + __FB_DEFAULT_DEFERRED_OPS_DRAW(omap_fbdev), .fb_ioctl = drm_fb_helper_ioctl, .fb_mmap = omap_fbdev_fb_mmap, .fb_destroy = omap_fbdev_fb_destroy, @@ -213,6 +220,15 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, fbi->fix.smem_start = dma_addr; fbi->fix.smem_len = bo->size; + /* deferred I/O */ + helper->fbdefio.delay = HZ / 20; + helper->fbdefio.deferred_io = drm_fb_helper_deferred_io; + + fbi->fbdefio = &helper->fbdefio; + ret = fb_deferred_io_init(fbi); + if (ret) + goto fail; + /* if we have DMM, then we can use it for scrolling by just * shuffling pages around in DMM rather than doing sw blit. */ diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig index db09fe87fcd4..0ab8848ba2f1 100644 --- a/drivers/video/fbdev/core/Kconfig +++ b/drivers/video/fbdev/core/Kconfig @@ -144,6 +144,12 @@ config FB_DMAMEM_HELPERS select FB_SYS_IMAGEBLIT select FB_SYSMEM_FOPS +config FB_DMAMEM_HELPERS_DEFERRED + bool + depends on FB_CORE + select FB_DEFERRED_IO + select FB_DMAMEM_HELPERS + config FB_IOMEM_FOPS tristate depends on FB_CORE diff --git a/include/linux/fb.h b/include/linux/fb.h index 708e6a177b1b..5e210bf72fc9 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -688,6 +688,10 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start, __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \ __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys) +#define FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(__prefix, __damage_range, __damage_area) \ + __FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \ + __FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys) + /* * Initializes struct fb_ops for deferred I/O. */ -- cgit From e3362acd796789dc0562eb1a3937007b0beb0c5b Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 16 Mar 2024 08:35:40 +0100 Subject: bpf: Remove arch_unprotect_bpf_trampoline() Last user of arch_unprotect_bpf_trampoline() was removed by commit 187e2af05abe ("bpf: struct_ops supports more than one page for trampolines.") Remove arch_unprotect_bpf_trampoline() Reported-by: Daniel Borkmann Fixes: 187e2af05abe ("bpf: struct_ops supports more than one page for trampolines.") Signed-off-by: Christophe Leroy Link: https://lore.kernel.org/r/42c635bb54d3af91db0f9b85d724c7c290069f67.1710574353.git.christophe.leroy@csgroup.eu Signed-off-by: Martin KaFai Lau --- arch/arm64/net/bpf_jit_comp.c | 4 ---- arch/x86/net/bpf_jit_comp.c | 4 ---- include/linux/bpf.h | 1 - kernel/bpf/trampoline.c | 7 ------- 4 files changed, 16 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index c5b461dda438..132c8ffba109 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2180,10 +2180,6 @@ void arch_protect_bpf_trampoline(void *image, unsigned int size) { } -void arch_unprotect_bpf_trampoline(void *image, unsigned int size) -{ -} - int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, void *ro_image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index a7ba8e178645..7a56d2d84512 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -3008,10 +3008,6 @@ void arch_protect_bpf_trampoline(void *image, unsigned int size) { } -void arch_unprotect_bpf_trampoline(void *image, unsigned int size) -{ -} - int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 4f20f62f9d63..d89bdefb42e2 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1117,7 +1117,6 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i void *arch_alloc_bpf_trampoline(unsigned int size); void arch_free_bpf_trampoline(void *image, unsigned int size); void arch_protect_bpf_trampoline(void *image, unsigned int size); -void arch_unprotect_bpf_trampoline(void *image, unsigned int size); int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr); diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index db7599c59c78..04fd1abd3661 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -1078,13 +1078,6 @@ void __weak arch_protect_bpf_trampoline(void *image, unsigned int size) set_memory_rox((long)image, 1); } -void __weak arch_unprotect_bpf_trampoline(void *image, unsigned int size) -{ - WARN_ON_ONCE(size > PAGE_SIZE); - set_memory_nx((long)image, 1); - set_memory_rw((long)image, 1); -} - int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { -- cgit From c733239f8f530872a1f80d8c45dcafbaff368737 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 16 Mar 2024 08:35:41 +0100 Subject: bpf: Check return from set_memory_rox() arch_protect_bpf_trampoline() and alloc_new_pack() call set_memory_rox() which can fail, leading to unprotected memory. Take into account return from set_memory_rox() function and add __must_check flag to arch_protect_bpf_trampoline(). Signed-off-by: Christophe Leroy Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/fe1c163c83767fde5cab31d209a4a6be3ddb3a73.1710574353.git.christophe.leroy@csgroup.eu Signed-off-by: Martin KaFai Lau --- arch/arm64/net/bpf_jit_comp.c | 3 ++- arch/x86/net/bpf_jit_comp.c | 3 ++- include/linux/bpf.h | 2 +- kernel/bpf/bpf_struct_ops.c | 8 ++++++-- kernel/bpf/core.c | 28 +++++++++++++++++++++------- kernel/bpf/trampoline.c | 8 +++++--- net/bpf/bpf_dummy_struct_ops.c | 4 +++- 7 files changed, 40 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 132c8ffba109..bc16eb694657 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -2176,8 +2176,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size) bpf_prog_pack_free(image, size); } -void arch_protect_bpf_trampoline(void *image, unsigned int size) +int arch_protect_bpf_trampoline(void *image, unsigned int size) { + return 0; } int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 7a56d2d84512..4900b1ee019f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -3004,8 +3004,9 @@ void arch_free_bpf_trampoline(void *image, unsigned int size) bpf_prog_pack_free(image, size); } -void arch_protect_bpf_trampoline(void *image, unsigned int size) +int arch_protect_bpf_trampoline(void *image, unsigned int size) { + return 0; } int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d89bdefb42e2..17843e66a1d3 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1116,7 +1116,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i void *func_addr); void *arch_alloc_bpf_trampoline(unsigned int size); void arch_free_bpf_trampoline(void *image, unsigned int size); -void arch_protect_bpf_trampoline(void *image, unsigned int size); +int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size); int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr); diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 3fcd35314ce5..86c7884abaf8 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -740,8 +740,12 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, if (err) goto reset_unlock; } - for (i = 0; i < st_map->image_pages_cnt; i++) - arch_protect_bpf_trampoline(st_map->image_pages[i], PAGE_SIZE); + for (i = 0; i < st_map->image_pages_cnt; i++) { + err = arch_protect_bpf_trampoline(st_map->image_pages[i], + PAGE_SIZE); + if (err) + goto reset_unlock; + } if (st_map->map.map_flags & BPF_F_LINK) { err = 0; diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 63f100def31b..5aacb1d3c4cc 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -908,23 +908,30 @@ static LIST_HEAD(pack_list); static struct bpf_prog_pack *alloc_new_pack(bpf_jit_fill_hole_t bpf_fill_ill_insns) { struct bpf_prog_pack *pack; + int err; pack = kzalloc(struct_size(pack, bitmap, BITS_TO_LONGS(BPF_PROG_CHUNK_COUNT)), GFP_KERNEL); if (!pack) return NULL; pack->ptr = bpf_jit_alloc_exec(BPF_PROG_PACK_SIZE); - if (!pack->ptr) { - kfree(pack); - return NULL; - } + if (!pack->ptr) + goto out; bpf_fill_ill_insns(pack->ptr, BPF_PROG_PACK_SIZE); bitmap_zero(pack->bitmap, BPF_PROG_PACK_SIZE / BPF_PROG_CHUNK_SIZE); - list_add_tail(&pack->list, &pack_list); set_vm_flush_reset_perms(pack->ptr); - set_memory_rox((unsigned long)pack->ptr, BPF_PROG_PACK_SIZE / PAGE_SIZE); + err = set_memory_rox((unsigned long)pack->ptr, + BPF_PROG_PACK_SIZE / PAGE_SIZE); + if (err) + goto out; + list_add_tail(&pack->list, &pack_list); return pack; + +out: + bpf_jit_free_exec(pack->ptr); + kfree(pack); + return NULL; } void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) @@ -939,9 +946,16 @@ void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns) size = round_up(size, PAGE_SIZE); ptr = bpf_jit_alloc_exec(size); if (ptr) { + int err; + bpf_fill_ill_insns(ptr, size); set_vm_flush_reset_perms(ptr); - set_memory_rox((unsigned long)ptr, size / PAGE_SIZE); + err = set_memory_rox((unsigned long)ptr, + size / PAGE_SIZE); + if (err) { + bpf_jit_free_exec(ptr); + ptr = NULL; + } } goto out; } diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 04fd1abd3661..cc50607f8d8c 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -456,7 +456,9 @@ again: if (err < 0) goto out_free; - arch_protect_bpf_trampoline(im->image, im->size); + err = arch_protect_bpf_trampoline(im->image, im->size); + if (err) + goto out_free; WARN_ON(tr->cur_image && total == 0); if (tr->cur_image) @@ -1072,10 +1074,10 @@ void __weak arch_free_bpf_trampoline(void *image, unsigned int size) bpf_jit_free_exec(image); } -void __weak arch_protect_bpf_trampoline(void *image, unsigned int size) +int __weak arch_protect_bpf_trampoline(void *image, unsigned int size) { WARN_ON_ONCE(size > PAGE_SIZE); - set_memory_rox((long)image, 1); + return set_memory_rox((long)image, 1); } int __weak arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index de33dc1b0daa..25b75844891a 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -133,7 +133,9 @@ int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, if (err < 0) goto out; - arch_protect_bpf_trampoline(image, PAGE_SIZE); + err = arch_protect_bpf_trampoline(image, PAGE_SIZE); + if (err) + goto out; prog_ret = dummy_ops_call_op(image, args); err = dummy_ops_copy_args(args); -- cgit From 8522f6b760ca588928eede740d5d69dd1e936b49 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 28 Feb 2024 15:00:02 +0100 Subject: printk: Let no_printk() use _printk() When printk-indexing is enabled, each printk() invocation emits a pi_entry structure, containing the format string and other information related to its location in the kernel sources. This is even true for no_printk(): while the actual code to print the message is optimized out by the compiler due to the always-false check, the pi_entry structure is still emitted. As the main purpose of no_printk() is to provide a helper to maintain printf()-style format checking when debugging is disabled, this leads to the inclusion in the index of lots of printk formats that cannot be emitted by the current kernel. Fix this by switching no_printk() from printk() to _printk(). This reduces the size of an arm64 defconfig kernel with CONFIG_PRINTK_INDEX=y by 576 KiB. Fixes: 337015573718b161 ("printk: Userspace format indexing support") Signed-off-by: Geert Uytterhoeven Reviewed-by: Andy Shevchenko Reviewed-by: Xiubo Li Reviewed-by: Chris Down Reviewed-by: Petr Mladek Link: https://lore.kernel.org/r/56cf92edccffea970e1f40a075334dd6cf5bb2a4.1709127473.git.geert+renesas@glider.be Signed-off-by: Petr Mladek --- include/linux/printk.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/printk.h b/include/linux/printk.h index 955e31860095..2fde40cc9677 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -126,7 +126,7 @@ struct va_format { #define no_printk(fmt, ...) \ ({ \ if (0) \ - printk(fmt, ##__VA_ARGS__); \ + _printk(fmt, ##__VA_ARGS__); \ 0; \ }) -- cgit From c26ec799042a3888935d59b599f33e41efedf5f8 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 28 Feb 2024 15:00:03 +0100 Subject: dev_printk: Add and use dev_no_printk() When printk-indexing is enabled, each dev_printk() invocation emits a pi_entry structure. This is even true when the dev_printk() is protected by an always-false check, as is typically the case for debug messages: while the actual code to print the message is optimized out by the compiler, the pi_entry structure is still emitted. Avoid emitting pi_entry structures for unavailable dev_printk() kernel messages by: 1. Introducing a dev_no_printk() helper, mimicked after the existing no_printk() helper, which calls _dev_printk() instead of dev_printk(), 2. Replacing all "if (0) dev_printk(...)" constructs by calls to the new helper. This reduces the size of an arm64 defconfig kernel with CONFIG_PRINTK_INDEX=y by 957 KiB. Fixes: ad7d61f159db7397 ("printk: index: Add indexing support to dev_printk") Signed-off-by: Geert Uytterhoeven Reviewed-by: Andy Shevchenko Reviewed-by: Xiubo Li Reviewed-by: Chris Down Reviewed-by: Petr Mladek Link: https://lore.kernel.org/r/8583d54f1687c801c6cda8edddf2cf0344c6e883.1709127473.git.geert+renesas@glider.be Signed-off-by: Petr Mladek --- include/linux/dev_printk.h | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index 6bfe70decc9f..ae80a303c216 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -129,6 +129,16 @@ void _dev_info(const struct device *dev, const char *fmt, ...) _dev_printk(level, dev, fmt, ##__VA_ARGS__); \ }) +/* + * Dummy dev_printk for disabled debugging statements to use whilst maintaining + * gcc's format checking. + */ +#define dev_no_printk(level, dev, fmt, ...) \ + ({ \ + if (0) \ + _dev_printk(level, dev, fmt, ##__VA_ARGS__); \ + }) + /* * #defines for all the dev_ macros to prefix with whatever * possible use of #define dev_fmt(fmt) ... @@ -158,10 +168,7 @@ void _dev_info(const struct device *dev, const char *fmt, ...) dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__) #else #define dev_dbg(dev, fmt, ...) \ -({ \ - if (0) \ - dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ -}) + dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__) #endif #ifdef CONFIG_PRINTK @@ -247,20 +254,14 @@ do { \ } while (0) #else #define dev_dbg_ratelimited(dev, fmt, ...) \ -do { \ - if (0) \ - dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ -} while (0) + dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__) #endif #ifdef VERBOSE_DEBUG #define dev_vdbg dev_dbg #else #define dev_vdbg(dev, fmt, ...) \ -({ \ - if (0) \ - dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \ -}) + dev_no_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__) #endif /* -- cgit From 0381e588949380704005dacdbef6e9a2920260df Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 28 Feb 2024 15:00:04 +0100 Subject: dyndbg: Use *no_printk() helpers When printk-indexing is enabled, each printk() or dev_printk() invocation emits a pi_entry structure. This is even true when the call is protected by an always-false check: while the actual code to print the message is optimized out by the compiler, the pi_entry structure is still emitted. Fix this by replacing "if (0) *printk(...)" constructs by calls to the corresponding *no_printk() helpers. Note that this has minimal impact, as most (all?) callers of dynamic_{pr,dev}_debug() are protected by checks for DYNAMIC_DEBUG anyway. Still, using the helpers serves as a good example to follow. Signed-off-by: Geert Uytterhoeven Reviewed-by: Andy Shevchenko Reviewed-by: Xiubo Li Reviewed-by: Chris Down Reviewed-by: Petr Mladek Link: https://lore.kernel.org/r/32e18ceb528b99d9147be9c1af578a1965e2bad9.1709127473.git.geert+renesas@glider.be Signed-off-by: Petr Mladek --- include/linux/dynamic_debug.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 4fcbf4d4fd0a..ff44ec346162 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h @@ -305,9 +305,9 @@ void __dynamic_ibdev_dbg(struct _ddebug *descriptor, #define DYNAMIC_DEBUG_BRANCH(descriptor) false #define dynamic_pr_debug(fmt, ...) \ - do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) + no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__) #define dynamic_dev_dbg(dev, fmt, ...) \ - do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) + dev_no_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__) #define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ groupsize, buf, len, ascii) \ do { if (0) \ -- cgit From b37cafacbf98ead69c2ecb220fb82b06aee916b9 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 28 Feb 2024 15:00:05 +0100 Subject: ceph: Use no_printk() helper When printk-indexing is enabled, each printk() invocation emits a pi_entry structure. This is even true when the call is protected by an always-false check: while the actual code to print the message is optimized out by the compiler, the pi_entry structure is still emitted. Fix this by replacing "if (0) printk(...)" constructs by calls to the no_printk() helper. This reduces the size of an arm64 kernel with CONFIG_PRINTK_INDEX=y and CONFIG_CEPH_FS=y by ca. 4 KiB. Signed-off-by: Geert Uytterhoeven Reviewed-by: Andy Shevchenko Reviewed-by: Xiubo Li Reviewed-by: Chris Down Reviewed-by: Petr Mladek Link: https://lore.kernel.org/r/a21767eccb823435a7f18cdf115d7d572b4e945d.1709127473.git.geert+renesas@glider.be Signed-off-by: Petr Mladek --- include/linux/ceph/ceph_debug.h | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h index 11a92a946016..5f904591fa5f 100644 --- a/include/linux/ceph/ceph_debug.h +++ b/include/linux/ceph/ceph_debug.h @@ -27,17 +27,13 @@ ##__VA_ARGS__) # else /* faux printk call just to see any compiler warnings. */ -# define dout(fmt, ...) do { \ - if (0) \ - printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ - } while (0) -# define doutc(client, fmt, ...) do { \ - if (0) \ - printk(KERN_DEBUG "[%pU %llu] " fmt, \ - &client->fsid, \ - client->monc.auth->global_id, \ - ##__VA_ARGS__); \ - } while (0) +# define dout(fmt, ...) \ + no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) +# define doutc(client, fmt, ...) \ + no_printk(KERN_DEBUG "[%pU %llu] " fmt, \ + &client->fsid, \ + client->monc.auth->global_id, \ + ##__VA_ARGS__) # endif #else -- cgit From d4dfc5700e867b22ab94f960f9a9972696a637d5 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 19 Mar 2024 16:38:49 -0700 Subject: bpf: pass whole link instead of prog when triggering raw tracepoint Instead of passing prog as an argument to bpf_trace_runX() helpers, that are called from tracepoint triggering calls, store BPF link itself (struct bpf_raw_tp_link for raw tracepoints). This will allow to pass extra information like BPF cookie into raw tracepoint registration. Instead of replacing `struct bpf_prog *prog = __data;` with corresponding `struct bpf_raw_tp_link *link = __data;` assignment in `__bpf_trace_##call` I just passed `__data` through into underlying bpf_trace_runX() call. This works well because we implicitly cast `void *`, and it also avoids naming clashes with arguments coming from tracepoint's "proto" list. We could have run into the same problem with "prog", we just happened to not have a tracepoint that has "prog" input argument. We are less lucky with "link", as there are tracepoints using "link" argument name already. So instead of trying to avoid naming conflicts, let's just remove intermediate local variable. It doesn't hurt readibility, it's either way a bit of a maze of calls and macros, that requires careful reading. Acked-by: Stanislav Fomichev Signed-off-by: Andrii Nakryiko Message-ID: <20240319233852.1977493-3-andrii@kernel.org> Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 5 +++++ include/linux/trace_events.h | 36 ++++++++++++++++++++---------------- include/trace/bpf_probe.h | 3 +-- kernel/bpf/syscall.c | 9 ++------- kernel/trace/bpf_trace.c | 18 ++++++++++-------- 5 files changed, 38 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 17843e66a1d3..2ea8ce59f582 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1607,6 +1607,11 @@ struct bpf_tracing_link { struct bpf_prog *tgt_prog; }; +struct bpf_raw_tp_link { + struct bpf_link link; + struct bpf_raw_event_map *btp; +}; + struct bpf_link_primer { struct bpf_link *link; struct file *file; diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index d68ff9b1247f..a7fc6fb6de3c 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -759,8 +759,11 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); -int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); -int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog); + +struct bpf_raw_tp_link; +int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link); +int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link); + struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name); void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp); int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, @@ -788,11 +791,12 @@ perf_event_query_prog_array(struct perf_event *event, void __user *info) { return -EOPNOTSUPP; } -static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p) +struct bpf_raw_tp_link; +static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) { return -EOPNOTSUPP; } -static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p) +static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) { return -EOPNOTSUPP; } @@ -903,31 +907,31 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); void perf_event_free_bpf_prog(struct perf_event *event); -void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); -void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); -void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run1(struct bpf_raw_tp_link *link, u64 arg1); +void bpf_trace_run2(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2); +void bpf_trace_run3(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3); -void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run4(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4); -void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run5(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5); -void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run6(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6); -void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run7(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7); -void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run8(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8); -void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run9(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9); -void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run10(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10); -void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run11(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10, u64 arg11); -void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2, +void bpf_trace_run12(struct bpf_raw_tp_link *link, u64 arg1, u64 arg2, u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12); void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx, diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index e609cd7da47e..a2ea11cc912e 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -46,8 +46,7 @@ static notrace void \ __bpf_trace_##call(void *__data, proto) \ { \ - struct bpf_prog *prog = __data; \ - CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(prog, CAST_TO_U64(args)); \ + CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \ } #undef DECLARE_EVENT_CLASS diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ae2ff73bde7e..1cb4c3809af4 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3469,17 +3469,12 @@ out_put_prog: return err; } -struct bpf_raw_tp_link { - struct bpf_link link; - struct bpf_raw_event_map *btp; -}; - static void bpf_raw_tp_link_release(struct bpf_link *link) { struct bpf_raw_tp_link *raw_tp = container_of(link, struct bpf_raw_tp_link, link); - bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog); + bpf_probe_unregister(raw_tp->btp, raw_tp); bpf_put_raw_tracepoint(raw_tp->btp); } @@ -3833,7 +3828,7 @@ static int bpf_raw_tp_link_attach(struct bpf_prog *prog, goto out_put_btp; } - err = bpf_probe_register(link->btp, prog); + err = bpf_probe_register(link->btp, link); if (err) { bpf_link_cleanup(&link_primer); goto out_put_btp; diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 30ecf62f8a17..17de91ad4a1f 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2366,8 +2366,10 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp) } static __always_inline -void __bpf_trace_run(struct bpf_prog *prog, u64 *args) +void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args) { + struct bpf_prog *prog = link->link.prog; + cant_sleep(); if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { bpf_prog_inc_misses_counter(prog); @@ -2404,12 +2406,12 @@ out: #define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 #define BPF_TRACE_DEFN_x(x) \ - void bpf_trace_run##x(struct bpf_prog *prog, \ + void bpf_trace_run##x(struct bpf_raw_tp_link *link, \ REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \ { \ u64 args[x]; \ REPEAT(x, COPY, __DL_SEM, __SEQ_0_11); \ - __bpf_trace_run(prog, args); \ + __bpf_trace_run(link, args); \ } \ EXPORT_SYMBOL_GPL(bpf_trace_run##x) BPF_TRACE_DEFN_x(1); @@ -2425,9 +2427,10 @@ BPF_TRACE_DEFN_x(10); BPF_TRACE_DEFN_x(11); BPF_TRACE_DEFN_x(12); -int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) +int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) { struct tracepoint *tp = btp->tp; + struct bpf_prog *prog = link->link.prog; /* * check that program doesn't access arguments beyond what's @@ -2439,13 +2442,12 @@ int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) if (prog->aux->max_tp_access > btp->writable_size) return -EINVAL; - return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, - prog); + return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func, link); } -int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) +int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_raw_tp_link *link) { - return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); + return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, link); } int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, -- cgit From 68ca5d4eebb8c4de246ee5f634eee26bc689562d Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Tue, 19 Mar 2024 16:38:50 -0700 Subject: bpf: support BPF cookie in raw tracepoint (raw_tp, tp_btf) programs Wire up BPF cookie for raw tracepoint programs (both BTF and non-BTF aware variants). This brings them up to part w.r.t. BPF cookie usage with classic tracepoint and fentry/fexit programs. Acked-by: Stanislav Fomichev Acked-by: Eduard Zingerman Signed-off-by: Andrii Nakryiko Message-ID: <20240319233852.1977493-4-andrii@kernel.org> Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + include/uapi/linux/bpf.h | 6 ++++-- kernel/bpf/syscall.c | 13 +++++++++---- kernel/trace/bpf_trace.c | 13 +++++++++++++ tools/include/uapi/linux/bpf.h | 1 + 5 files changed, 28 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 2ea8ce59f582..62762390c93d 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1610,6 +1610,7 @@ struct bpf_tracing_link { struct bpf_raw_tp_link { struct bpf_link link; struct bpf_raw_event_map *btp; + u64 cookie; }; struct bpf_link_primer { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 3c42b9f1bada..9585f5345353 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1662,8 +1662,10 @@ union bpf_attr { } query; struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ - __u64 name; - __u32 prog_fd; + __u64 name; + __u32 prog_fd; + __u32 :32; + __aligned_u64 cookie; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 1cb4c3809af4..e44c276e8617 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3774,7 +3774,7 @@ static int bpf_perf_link_attach(const union bpf_attr *attr, struct bpf_prog *pro #endif /* CONFIG_PERF_EVENTS */ static int bpf_raw_tp_link_attach(struct bpf_prog *prog, - const char __user *user_tp_name) + const char __user *user_tp_name, u64 cookie) { struct bpf_link_primer link_primer; struct bpf_raw_tp_link *link; @@ -3821,6 +3821,7 @@ static int bpf_raw_tp_link_attach(struct bpf_prog *prog, bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT, &bpf_raw_tp_link_lops, prog); link->btp = btp; + link->cookie = cookie; err = bpf_link_prime(&link->link, &link_primer); if (err) { @@ -3841,11 +3842,13 @@ out_put_btp: return err; } -#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd +#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie static int bpf_raw_tracepoint_open(const union bpf_attr *attr) { struct bpf_prog *prog; + void __user *tp_name; + __u64 cookie; int fd; if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN)) @@ -3855,7 +3858,9 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) if (IS_ERR(prog)) return PTR_ERR(prog); - fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name)); + tp_name = u64_to_user_ptr(attr->raw_tracepoint.name); + cookie = attr->raw_tracepoint.cookie; + fd = bpf_raw_tp_link_attach(prog, tp_name, cookie); if (fd < 0) bpf_prog_put(prog); return fd; @@ -5193,7 +5198,7 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) goto out; } if (prog->expected_attach_type == BPF_TRACE_RAW_TP) - ret = bpf_raw_tp_link_attach(prog, NULL); + ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie); else if (prog->expected_attach_type == BPF_TRACE_ITER) ret = bpf_iter_link_attach(attr, uattr, prog); else if (prog->expected_attach_type == BPF_LSM_CGROUP) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 17de91ad4a1f..434e3ece6688 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -2004,6 +2004,8 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_get_stackid_proto_raw_tp; case BPF_FUNC_get_stack: return &bpf_get_stack_proto_raw_tp; + case BPF_FUNC_get_attach_cookie: + return &bpf_get_attach_cookie_proto_tracing; default: return bpf_tracing_func_proto(func_id, prog); } @@ -2066,6 +2068,9 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) case BPF_FUNC_get_func_arg_cnt: return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL; case BPF_FUNC_get_attach_cookie: + if (prog->type == BPF_PROG_TYPE_TRACING && + prog->expected_attach_type == BPF_TRACE_RAW_TP) + return &bpf_get_attach_cookie_proto_tracing; return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto_tracing : NULL; default: fn = raw_tp_prog_func_proto(func_id, prog); @@ -2369,15 +2374,23 @@ static __always_inline void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args) { struct bpf_prog *prog = link->link.prog; + struct bpf_run_ctx *old_run_ctx; + struct bpf_trace_run_ctx run_ctx; cant_sleep(); if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) { bpf_prog_inc_misses_counter(prog); goto out; } + + run_ctx.bpf_cookie = link->cookie; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + rcu_read_lock(); (void) bpf_prog_run(prog, args); rcu_read_unlock(); + + bpf_reset_run_ctx(old_run_ctx); out: this_cpu_dec(*(prog->active)); } diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 3c42b9f1bada..bf80b614c4db 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1664,6 +1664,7 @@ union bpf_attr { struct { /* anonymous struct used by BPF_RAW_TRACEPOINT_OPEN command */ __u64 name; __u32 prog_fd; + __aligned_u64 cookie; } raw_tracepoint; struct { /* anonymous struct for BPF_BTF_LOAD */ -- cgit From 91a1d97ef482c1e4c9d4c1c656a53b0f6b16d0ed Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 13 Mar 2024 19:01:03 +0100 Subject: jump_label,module: Don't alloc static_key_mod for __ro_after_init keys When a static_key is marked ro_after_init, its state will never change (after init), therefore jump_label_update() will never need to iterate the entries, and thus module load won't actually need to track this -- avoiding the static_key::next write. Therefore, mark these keys such that jump_label_add_module() might recognise them and avoid the modification. Use the special state: 'static_key_linked(key) && !static_key_mod(key)' to denote such keys. jump_label_add_module() does not exist under CONFIG_JUMP_LABEL=n, so the newly-introduced jump_label_init_ro() can be defined as a nop for that configuration. [ mingo: Renamed jump_label_ro() to jump_label_init_ro() ] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Valentin Schneider Signed-off-by: Ingo Molnar Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20240313180106.2917308-2-vschneid@redhat.com --- include/asm-generic/sections.h | 5 ++++ include/linux/jump_label.h | 3 +++ init/main.c | 1 + kernel/jump_label.c | 53 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 62 insertions(+) (limited to 'include/linux') diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index db13bb620f52..c768de6f19a9 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -180,6 +180,11 @@ static inline bool is_kernel_rodata(unsigned long addr) addr < (unsigned long)__end_rodata; } +static inline bool is_kernel_ro_after_init(unsigned long addr) +{ + return addr >= (unsigned long)__start_ro_after_init && + addr < (unsigned long)__end_ro_after_init; +} /** * is_kernel_inittext - checks if the pointer address is located in the * .init.text section diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index f0a949b7c973..f5a2727ca4a9 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h @@ -216,6 +216,7 @@ extern struct jump_entry __start___jump_table[]; extern struct jump_entry __stop___jump_table[]; extern void jump_label_init(void); +extern void jump_label_init_ro(void); extern void jump_label_lock(void); extern void jump_label_unlock(void); extern void arch_jump_label_transform(struct jump_entry *entry, @@ -265,6 +266,8 @@ static __always_inline void jump_label_init(void) static_key_initialized = true; } +static __always_inline void jump_label_init_ro(void) { } + static __always_inline bool static_key_false(struct static_key *key) { if (unlikely_notrace(static_key_count(key) > 0)) diff --git a/init/main.c b/init/main.c index 2ca52474d0c3..6c3f251d6ef8 100644 --- a/init/main.c +++ b/init/main.c @@ -1408,6 +1408,7 @@ static void mark_readonly(void) * insecure pages which are W+X. */ flush_module_init_free_work(); + jump_label_init_ro(); mark_rodata_ro(); debug_checkwx(); rodata_test(); diff --git a/kernel/jump_label.c b/kernel/jump_label.c index d9c822bbffb8..3218fa5688b9 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -530,6 +530,45 @@ void __init jump_label_init(void) cpus_read_unlock(); } +static inline bool static_key_sealed(struct static_key *key) +{ + return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK); +} + +static inline void static_key_seal(struct static_key *key) +{ + unsigned long type = key->type & JUMP_TYPE_TRUE; + key->type = JUMP_TYPE_LINKED | type; +} + +void jump_label_init_ro(void) +{ + struct jump_entry *iter_start = __start___jump_table; + struct jump_entry *iter_stop = __stop___jump_table; + struct jump_entry *iter; + + if (WARN_ON_ONCE(!static_key_initialized)) + return; + + cpus_read_lock(); + jump_label_lock(); + + for (iter = iter_start; iter < iter_stop; iter++) { + struct static_key *iterk = jump_entry_key(iter); + + if (!is_kernel_ro_after_init((unsigned long)iterk)) + continue; + + if (static_key_sealed(iterk)) + continue; + + static_key_seal(iterk); + } + + jump_label_unlock(); + cpus_read_unlock(); +} + #ifdef CONFIG_MODULES enum jump_label_type jump_label_init_type(struct jump_entry *entry) @@ -650,6 +689,15 @@ static int jump_label_add_module(struct module *mod) static_key_set_entries(key, iter); continue; } + + /* + * If the key was sealed at init, then there's no need to keep a + * reference to its module entries - just patch them now and be + * done with it. + */ + if (static_key_sealed(key)) + goto do_poke; + jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL); if (!jlm) return -ENOMEM; @@ -675,6 +723,7 @@ static int jump_label_add_module(struct module *mod) static_key_set_linked(key); /* Only update if we've changed from our initial state */ +do_poke: if (jump_label_type(iter) != jump_label_init_type(iter)) __jump_label_update(key, iter, iter_stop, true); } @@ -699,6 +748,10 @@ static void jump_label_del_module(struct module *mod) if (within_module((unsigned long)key, mod)) continue; + /* No @jlm allocated because key was sealed at init. */ + if (static_key_sealed(key)) + continue; + /* No memory during module load */ if (WARN_ON(!static_key_linked(key))) continue; -- cgit From 5edeb7d312628961046eec9b26a7e72f44baf846 Mon Sep 17 00:00:00 2001 From: Joy Zou Date: Mon, 18 Mar 2024 17:56:32 +0800 Subject: regulator: pca9450: add pca9451a support Adding support for new pmic pca9451a. Signed-off-by: Joy Zou Link: https://msgid.link/r/20240318095633.4079027-3-joy.zou@nxp.com Signed-off-by: Mark Brown --- drivers/regulator/pca9450-regulator.c | 194 +++++++++++++++++++++++++++++++++- include/linux/regulator/pca9450.h | 1 + 2 files changed, 193 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c index 2ab365d2749f..242ff25f5b83 100644 --- a/drivers/regulator/pca9450-regulator.c +++ b/drivers/regulator/pca9450-regulator.c @@ -106,6 +106,14 @@ static const struct linear_range pca9450_dvs_buck_volts[] = { REGULATOR_LINEAR_RANGE(600000, 0x00, 0x7F, 12500), }; +/* + * BUCK1/3 + * 0.65 to 2.2375V (12.5mV step) + */ +static const struct linear_range pca9451a_dvs_buck_volts[] = { + REGULATOR_LINEAR_RANGE(650000, 0x00, 0x7F, 12500), +}; + /* * BUCK4/5/6 * 0.6V to 3.4V (25mV step) @@ -662,6 +670,178 @@ static const struct pca9450_regulator_desc pca9450bc_regulators[] = { }, }; +static const struct pca9450_regulator_desc pca9451a_regulators[] = { + { + .desc = { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK1, + .ops = &pca9450_dvs_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK1_VOLTAGE_NUM, + .linear_ranges = pca9451a_dvs_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9451a_dvs_buck_volts), + .vsel_reg = PCA9450_REG_BUCK1OUT_DVS0, + .vsel_mask = BUCK1OUT_DVS0_MASK, + .enable_reg = PCA9450_REG_BUCK1CTRL, + .enable_mask = BUCK1_ENMODE_MASK, + .enable_val = BUCK_ENMODE_ONREQ, + .ramp_mask = BUCK1_RAMP_MASK, + .ramp_delay_table = pca9450_dvs_buck_ramp_table, + .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), + .owner = THIS_MODULE, + .of_parse_cb = pca9450_set_dvs_levels, + }, + .dvs = { + .run_reg = PCA9450_REG_BUCK1OUT_DVS0, + .run_mask = BUCK1OUT_DVS0_MASK, + .standby_reg = PCA9450_REG_BUCK1OUT_DVS1, + .standby_mask = BUCK1OUT_DVS1_MASK, + }, + }, + { + .desc = { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK2, + .ops = &pca9450_dvs_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK2_VOLTAGE_NUM, + .linear_ranges = pca9450_dvs_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_dvs_buck_volts), + .vsel_reg = PCA9450_REG_BUCK2OUT_DVS0, + .vsel_mask = BUCK2OUT_DVS0_MASK, + .enable_reg = PCA9450_REG_BUCK2CTRL, + .enable_mask = BUCK2_ENMODE_MASK, + .enable_val = BUCK_ENMODE_ONREQ_STBYREQ, + .ramp_mask = BUCK2_RAMP_MASK, + .ramp_delay_table = pca9450_dvs_buck_ramp_table, + .n_ramp_values = ARRAY_SIZE(pca9450_dvs_buck_ramp_table), + .owner = THIS_MODULE, + .of_parse_cb = pca9450_set_dvs_levels, + }, + .dvs = { + .run_reg = PCA9450_REG_BUCK2OUT_DVS0, + .run_mask = BUCK2OUT_DVS0_MASK, + .standby_reg = PCA9450_REG_BUCK2OUT_DVS1, + .standby_mask = BUCK2OUT_DVS1_MASK, + }, + }, + { + .desc = { + .name = "buck4", + .of_match = of_match_ptr("BUCK4"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK4, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK4_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK4OUT, + .vsel_mask = BUCK4OUT_MASK, + .enable_reg = PCA9450_REG_BUCK4CTRL, + .enable_mask = BUCK4_ENMODE_MASK, + .enable_val = BUCK_ENMODE_ONREQ, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "buck5", + .of_match = of_match_ptr("BUCK5"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK5, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK5_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK5OUT, + .vsel_mask = BUCK5OUT_MASK, + .enable_reg = PCA9450_REG_BUCK5CTRL, + .enable_mask = BUCK5_ENMODE_MASK, + .enable_val = BUCK_ENMODE_ONREQ, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "buck6", + .of_match = of_match_ptr("BUCK6"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_BUCK6, + .ops = &pca9450_buck_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_BUCK6_VOLTAGE_NUM, + .linear_ranges = pca9450_buck_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_buck_volts), + .vsel_reg = PCA9450_REG_BUCK6OUT, + .vsel_mask = BUCK6OUT_MASK, + .enable_reg = PCA9450_REG_BUCK6CTRL, + .enable_mask = BUCK6_ENMODE_MASK, + .enable_val = BUCK_ENMODE_ONREQ, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO1, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO1_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo1_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo1_volts), + .vsel_reg = PCA9450_REG_LDO1CTRL, + .vsel_mask = LDO1OUT_MASK, + .enable_reg = PCA9450_REG_LDO1CTRL, + .enable_mask = LDO1_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo4", + .of_match = of_match_ptr("LDO4"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO4, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO4_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo34_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo34_volts), + .vsel_reg = PCA9450_REG_LDO4CTRL, + .vsel_mask = LDO4OUT_MASK, + .enable_reg = PCA9450_REG_LDO4CTRL, + .enable_mask = LDO4_EN_MASK, + .owner = THIS_MODULE, + }, + }, + { + .desc = { + .name = "ldo5", + .of_match = of_match_ptr("LDO5"), + .regulators_node = of_match_ptr("regulators"), + .id = PCA9450_LDO5, + .ops = &pca9450_ldo_regulator_ops, + .type = REGULATOR_VOLTAGE, + .n_voltages = PCA9450_LDO5_VOLTAGE_NUM, + .linear_ranges = pca9450_ldo5_volts, + .n_linear_ranges = ARRAY_SIZE(pca9450_ldo5_volts), + .vsel_reg = PCA9450_REG_LDO5CTRL_H, + .vsel_mask = LDO5HOUT_MASK, + .enable_reg = PCA9450_REG_LDO5CTRL_H, + .enable_mask = LDO5H_EN_MASK, + .owner = THIS_MODULE, + }, + }, +}; + static irqreturn_t pca9450_irq_handler(int irq, void *data) { struct pca9450 *pca9450 = data; @@ -729,6 +909,10 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) regulator_desc = pca9450bc_regulators; pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators); break; + case PCA9450_TYPE_PCA9451A: + regulator_desc = pca9451a_regulators; + pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators); + break; default: dev_err(&i2c->dev, "Unknown device type"); return -EINVAL; @@ -755,7 +939,8 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) /* Check your board and dts for match the right pmic */ if (((device_id >> 4) != 0x1 && type == PCA9450_TYPE_PCA9450A) || - ((device_id >> 4) != 0x3 && type == PCA9450_TYPE_PCA9450BC)) { + ((device_id >> 4) != 0x3 && type == PCA9450_TYPE_PCA9450BC) || + ((device_id >> 4) != 0x9 && type == PCA9450_TYPE_PCA9451A)) { dev_err(&i2c->dev, "Device id(%x) mismatched\n", device_id >> 4); return -EINVAL; @@ -846,7 +1031,8 @@ static int pca9450_i2c_probe(struct i2c_client *i2c) } dev_info(&i2c->dev, "%s probed.\n", - type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc"); + type == PCA9450_TYPE_PCA9450A ? "pca9450a" : + (type == PCA9450_TYPE_PCA9451A ? "pca9451a" : "pca9450bc")); return 0; } @@ -864,6 +1050,10 @@ static const struct of_device_id pca9450_of_match[] = { .compatible = "nxp,pca9450c", .data = (void *)PCA9450_TYPE_PCA9450BC, }, + { + .compatible = "nxp,pca9451a", + .data = (void *)PCA9450_TYPE_PCA9451A, + }, { } }; MODULE_DEVICE_TABLE(of, pca9450_of_match); diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h index 505c908dbb81..243633c8dceb 100644 --- a/include/linux/regulator/pca9450.h +++ b/include/linux/regulator/pca9450.h @@ -9,6 +9,7 @@ enum pca9450_chip_type { PCA9450_TYPE_PCA9450A = 0, PCA9450_TYPE_PCA9450BC, + PCA9450_TYPE_PCA9451A, PCA9450_TYPE_AMOUNT, }; -- cgit From d650d1c46d8471bf8ebf556629ac13077f13e647 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 8 Mar 2024 18:27:46 +0200 Subject: spi: xilinx: Fix kernel documentation in the xilinx_spi.h While updating the data structure layout the kernel documentation became outdated. Synchronize kernel documentation with the actual data structure layout. Fixes: 1dd46599f83a ("spi: xilinx: add force_irq for QSPI mode") Fixes: 082339bc63cc ("spi: spi-xilinx: Add run run-time endian detection") Reviewed-by: Michal Simek Signed-off-by: Andy Shevchenko Link: https://msgid.link/r/20240308162920.46816-2-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- include/linux/spi/xilinx_spi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h index 3934ce789d87..fd6add419e94 100644 --- a/include/linux/spi/xilinx_spi.h +++ b/include/linux/spi/xilinx_spi.h @@ -5,10 +5,10 @@ /** * struct xspi_platform_data - Platform data of the Xilinx SPI driver * @num_chipselect: Number of chip select by the IP. - * @little_endian: If registers should be accessed little endian or not. * @bits_per_word: Number of bits per word. * @devices: Devices to add when the driver is probed. * @num_devices: Number of devices in the devices array. + * @force_irq: If set, forces QSPI transaction requirements. */ struct xspi_platform_data { u16 num_chipselect; -- cgit From 8f40647d87610ecff6637d673024fe7bd045c913 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 8 Mar 2024 18:27:47 +0200 Subject: spi: xilinx: Add necessary inclusion and forward declaration xilinx_spi.h is mnissing inclusion and forward declaration, add them. Signed-off-by: Andy Shevchenko Acked-by: Michal Simek Link: https://msgid.link/r/20240308162920.46816-3-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- include/linux/spi/xilinx_spi.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h index fd6add419e94..4ba8f53ce570 100644 --- a/include/linux/spi/xilinx_spi.h +++ b/include/linux/spi/xilinx_spi.h @@ -2,6 +2,10 @@ #ifndef __LINUX_SPI_XILINX_SPI_H #define __LINUX_SPI_XILINX_SPI_H +#include + +struct spi_board_info; + /** * struct xspi_platform_data - Platform data of the Xilinx SPI driver * @num_chipselect: Number of chip select by the IP. -- cgit From a39111b1cf0864b1782f30f9a1fa65260d057327 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 8 Mar 2024 18:27:48 +0200 Subject: spi: xilinx: Make num_chipselect 8-bit in the struct xspi_platform_data There is no use for whole 16-bit for the number of chip select pins. Drop it to 8 bits and reshuffle the data structure layout to avoid unnecessary paddings. Signed-off-by: Andy Shevchenko Reviewed-by: Michal Simek Link: https://msgid.link/r/20240308162920.46816-4-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- include/linux/spi/xilinx_spi.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/spi/xilinx_spi.h b/include/linux/spi/xilinx_spi.h index 4ba8f53ce570..1b8d984668b6 100644 --- a/include/linux/spi/xilinx_spi.h +++ b/include/linux/spi/xilinx_spi.h @@ -8,17 +8,17 @@ struct spi_board_info; /** * struct xspi_platform_data - Platform data of the Xilinx SPI driver - * @num_chipselect: Number of chip select by the IP. - * @bits_per_word: Number of bits per word. * @devices: Devices to add when the driver is probed. * @num_devices: Number of devices in the devices array. + * @num_chipselect: Number of chip select by the IP. + * @bits_per_word: Number of bits per word. * @force_irq: If set, forces QSPI transaction requirements. */ struct xspi_platform_data { - u16 num_chipselect; - u8 bits_per_word; struct spi_board_info *devices; u8 num_devices; + u8 num_chipselect; + u8 bits_per_word; bool force_irq; }; -- cgit From 7fd54c205f104317b853fc417ac7e9d0b9531ddb Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 7 Mar 2024 21:47:45 +0200 Subject: spi: pxa2xx: Kill pxa2xx_set_spi_info() There is the only one user of the pxa2xx_set_spi_info(). Unexport it and inline to the actual user. Signed-off-by: Andy Shevchenko Acked-by: Arnd Bergmann Link: https://msgid.link/r/20240307195056.4059864-2-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- arch/arm/mach-pxa/devices.c | 18 ------------------ arch/arm/mach-pxa/spitz.c | 14 +++++++++++++- include/linux/spi/pxa2xx_spi.h | 8 -------- 3 files changed, 13 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index 661b3fc43275..1e4cd502340e 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -665,23 +664,6 @@ struct platform_device pxa27x_device_gpio = { .resource = pxa_resource_gpio, }; -/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. - * See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */ -void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info) -{ - struct platform_device *pd; - - pd = platform_device_alloc("pxa2xx-spi", id); - if (pd == NULL) { - printk(KERN_ERR "pxa2xx-spi: failed to allocate device id %d\n", - id); - return; - } - - pd->dev.platform_data = info; - platform_device_add(pd); -} - static struct resource pxa_dma_resource[] = { [0] = { .start = 0x40000000, diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index cc691b199429..318402ad685e 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -585,6 +585,9 @@ static struct gpiod_lookup_table spitz_spi_gpio_table = { static void __init spitz_spi_init(void) { + struct platform_device *pd; + int id = 2; + if (machine_is_akita()) gpiod_add_lookup_table(&akita_lcdcon_gpio_table); else @@ -592,7 +595,16 @@ static void __init spitz_spi_init(void) gpiod_add_lookup_table(&spitz_ads7846_gpio_table); gpiod_add_lookup_table(&spitz_spi_gpio_table); - pxa2xx_set_spi_info(2, &spitz_spi_info); + + /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1 */ + pd = platform_device_alloc("pxa2xx-spi", id); + if (pd == NULL) { + pr_err("pxa2xx-spi: failed to allocate device id %d\n", id); + } else { + pd->dev.platform_data = &spitz_spi_info; + platform_device_add(pd); + } + spi_register_board_info(ARRAY_AND_SIZE(spitz_spi_devices)); } #else diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index ca2cd4e30ead..56aba2f737b1 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -45,12 +45,4 @@ struct pxa2xx_spi_chip { u32 timeout; }; -#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP) - -#include - -extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); - -#endif - #endif /* __LINUX_SPI_PXA2XX_SPI_H */ -- cgit From e3f209e269d32ebc0ba7f497f5d2af21ed4f0dd0 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 7 Mar 2024 21:47:46 +0200 Subject: spi: pxa2xx: Make num_chipselect 8-bit in the struct pxa2xx_spi_controller There is no use for whole 16-bit for the number of chip select pins. Drop it to 8 bits. Acked-by: Arnd Bergmann Signed-off-by: Andy Shevchenko Link: https://msgid.link/r/20240307195056.4059864-3-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- Documentation/spi/pxa2xx.rst | 2 +- include/linux/spi/pxa2xx_spi.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst index 19479b801826..0997d8eaf190 100644 --- a/Documentation/spi/pxa2xx.rst +++ b/Documentation/spi/pxa2xx.rst @@ -24,7 +24,7 @@ arch/.../mach-*/board-*.c as a "platform device". The host controller configurat is passed to the driver via a table found in include/linux/spi/pxa2xx_spi.h:: struct pxa2xx_spi_controller { - u16 num_chipselect; + u8 num_chipselect; u8 enable_dma; ... }; diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index 56aba2f737b1..e5a4a045fb67 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h @@ -17,7 +17,7 @@ struct dma_chan; * (resides in device.platform_data). */ struct pxa2xx_spi_controller { - u16 num_chipselect; + u8 num_chipselect; u8 enable_dma; u8 dma_burst_size; bool is_target; -- cgit From 22d5607400c62c72da9b60e3324744be83e147a4 Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Sun, 24 Mar 2024 00:45:50 +0000 Subject: sched/fair: Check if a task has a fitting CPU when updating misfit If a misfit task is affined to a subset of the possible CPUs, we need to verify that one of these CPUs can fit it. Otherwise the load balancer code will continuously trigger needlessly leading the balance_interval to increase in return and eventually end up with a situation where real imbalances take a long time to address because of this impossible imbalance situation. This can happen in Android world where it's common for background tasks to be restricted to little cores. Similarly if we can't fit the biggest core, triggering misfit is pointless as it is the best we can ever get on this system. To be able to detect that; we use asym_cap_list to iterate through capacities in the system to see if the task is able to run at a higher capacity level based on its p->cpus_ptr. We do that when the affinity change, a fair task is forked, or when a task switched to fair policy. We store the max_allowed_capacity in task_struct to allow for cheap comparison in the fast path. Improve check_misfit_status() function by removing redundant checks. misfit_task_load will be 0 if the task can't move to a bigger CPU. And nohz_balancer_kick() already checks for cpu_check_capacity() before calling check_misfit_status(). Test: ===== Add trace_printk("balance_interval = %lu\n", interval) in get_sd_balance_interval(). run if [ "$MASK" != "0" ]; then adb shell "taskset -a $MASK cat /dev/zero > /dev/null" fi sleep 10 // parse ftrace buffer counting the occurrence of each valaue Where MASK is either: * 0: no busy task running * 1: busy task is pinned to 1 cpu; handled today to not cause misfit * f: busy task pinned to little cores, simulates busy background task, demonstrates the problem to be fixed Results: ======== Note how occurrence of balance_interval = 128 overshoots for MASK = f. BEFORE ------ MASK=0 1 balance_interval = 175 120 balance_interval = 128 846 balance_interval = 64 55 balance_interval = 63 215 balance_interval = 32 2 balance_interval = 31 2 balance_interval = 16 4 balance_interval = 8 1870 balance_interval = 4 65 balance_interval = 2 MASK=1 27 balance_interval = 175 37 balance_interval = 127 840 balance_interval = 64 167 balance_interval = 63 449 balance_interval = 32 84 balance_interval = 31 304 balance_interval = 16 1156 balance_interval = 8 2781 balance_interval = 4 428 balance_interval = 2 MASK=f 1 balance_interval = 175 1328 balance_interval = 128 44 balance_interval = 64 101 balance_interval = 63 25 balance_interval = 32 5 balance_interval = 31 23 balance_interval = 16 23 balance_interval = 8 4306 balance_interval = 4 177 balance_interval = 2 AFTER ----- Note how the high values almost disappear for all MASK values. The system has background tasks that could trigger the problem without simulate it even with MASK=0. MASK=0 103 balance_interval = 63 19 balance_interval = 31 194 balance_interval = 8 4827 balance_interval = 4 179 balance_interval = 2 MASK=1 131 balance_interval = 63 1 balance_interval = 31 87 balance_interval = 8 3600 balance_interval = 4 7 balance_interval = 2 MASK=f 8 balance_interval = 127 182 balance_interval = 63 3 balance_interval = 31 9 balance_interval = 16 415 balance_interval = 8 3415 balance_interval = 4 21 balance_interval = 2 Signed-off-by: Qais Yousef Signed-off-by: Ingo Molnar Reviewed-by: Vincent Guittot Link: https://lore.kernel.org/r/20240324004552.999936-3-qyousef@layalina.io --- include/linux/sched.h | 1 + init/init_task.c | 1 + kernel/sched/fair.c | 66 ++++++++++++++++++++++++++++++++++++++------------- 3 files changed, 52 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index 3ed40e9f6155..c75fd46506df 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -835,6 +835,7 @@ struct task_struct { #endif unsigned int policy; + unsigned long max_allowed_capacity; int nr_cpus_allowed; const cpumask_t *cpus_ptr; cpumask_t *user_cpus_ptr; diff --git a/init/init_task.c b/init/init_task.c index 4daee6d761c8..2558b719e053 100644 --- a/init/init_task.c +++ b/init/init_task.c @@ -77,6 +77,7 @@ struct task_struct init_task __aligned(L1_CACHE_BYTES) = { .cpus_ptr = &init_task.cpus_mask, .user_cpus_ptr = NULL, .cpus_mask = CPU_MASK_ALL, + .max_allowed_capacity = SCHED_CAPACITY_SCALE, .nr_cpus_allowed= NR_CPUS, .mm = NULL, .active_mm = &init_mm, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e8270e2e15cb..c47c4f2e28f7 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5098,15 +5098,19 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu) static inline void update_misfit_status(struct task_struct *p, struct rq *rq) { + int cpu = cpu_of(rq); + if (!sched_asym_cpucap_active()) return; - if (!p || p->nr_cpus_allowed == 1) { - rq->misfit_task_load = 0; - return; - } + /* + * Affinity allows us to go somewhere higher? Or are we on biggest + * available CPU already? Or do we fit into this CPU ? + */ + if (!p || (p->nr_cpus_allowed == 1) || + (arch_scale_cpu_capacity(cpu) == p->max_allowed_capacity) || + task_fits_cpu(p, cpu)) { - if (task_fits_cpu(p, cpu_of(rq))) { rq->misfit_task_load = 0; return; } @@ -8253,6 +8257,36 @@ static void task_dead_fair(struct task_struct *p) remove_entity_load_avg(&p->se); } +/* + * Set the max capacity the task is allowed to run at for misfit detection. + */ +static void set_task_max_allowed_capacity(struct task_struct *p) +{ + struct asym_cap_data *entry; + + if (!sched_asym_cpucap_active()) + return; + + rcu_read_lock(); + list_for_each_entry_rcu(entry, &asym_cap_list, link) { + cpumask_t *cpumask; + + cpumask = cpu_capacity_span(entry); + if (!cpumask_intersects(p->cpus_ptr, cpumask)) + continue; + + p->max_allowed_capacity = entry->capacity; + break; + } + rcu_read_unlock(); +} + +static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context *ctx) +{ + set_cpus_allowed_common(p, ctx); + set_task_max_allowed_capacity(p); +} + static int balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -8261,6 +8295,8 @@ balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) return sched_balance_newidle(rq, rf) != 0; } +#else +static inline void set_task_max_allowed_capacity(struct task_struct *p) {} #endif /* CONFIG_SMP */ static void set_next_buddy(struct sched_entity *se) @@ -9610,16 +9646,10 @@ check_cpu_capacity(struct rq *rq, struct sched_domain *sd) (arch_scale_cpu_capacity(cpu_of(rq)) * 100)); } -/* - * Check whether a rq has a misfit task and if it looks like we can actually - * help that task: we can migrate the task to a CPU of higher capacity, or - * the task's current CPU is heavily pressured. - */ -static inline int check_misfit_status(struct rq *rq, struct sched_domain *sd) +/* Check if the rq has a misfit task */ +static inline bool check_misfit_status(struct rq *rq) { - return rq->misfit_task_load && - (arch_scale_cpu_capacity(rq->cpu) < rq->rd->max_cpu_capacity || - check_cpu_capacity(rq, sd)); + return rq->misfit_task_load; } /* @@ -11923,7 +11953,7 @@ static void nohz_balancer_kick(struct rq *rq) * When ASYM_CPUCAPACITY; see if there's a higher capacity CPU * to run the misfit task on. */ - if (check_misfit_status(rq, sd)) { + if (check_misfit_status(rq)) { flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK; goto unlock; } @@ -12648,6 +12678,8 @@ static void task_fork_fair(struct task_struct *p) rq_lock(rq, &rf); update_rq_clock(rq); + set_task_max_allowed_capacity(p); + cfs_rq = task_cfs_rq(current); curr = cfs_rq->curr; if (curr) @@ -12771,6 +12803,8 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) { attach_task_cfs_rq(p); + set_task_max_allowed_capacity(p); + if (task_on_rq_queued(p)) { /* * We were most likely switched from sched_rt, so @@ -13142,7 +13176,7 @@ DEFINE_SCHED_CLASS(fair) = { .rq_offline = rq_offline_fair, .task_dead = task_dead_fair, - .set_cpus_allowed = set_cpus_allowed_common, + .set_cpus_allowed = set_cpus_allowed_fair, #endif .task_tick = task_tick_fair, -- cgit From b7793a1a2f370c28b17d9554b58e9dc51afcfcbd Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 18 Mar 2024 18:53:17 +0200 Subject: wifi: ieee80211: check for NULL in ieee80211_mle_size_ok() For simplicity, we may want to pass a NULL element, and while we should then pass also a zero length, just be a bit more careful here. Signed-off-by: Johannes Berg Signed-off-by: Miri Korenblit Link: https://msgid.link/20240318184907.4d983653cb8d.Ic3ea99b60c61ac2f7d38cb9fd202a03c97a05601@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 3385a2cc5b09..a99f048404b8 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -5166,7 +5166,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) bool check_common_len = false; u16 control; - if (len < fixed) + if (!data || len < fixed) return false; control = le16_to_cpu(mle->control); -- cgit From c121514df0daa800cc500dc2738e0b8a1c54af98 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 18 Mar 2024 18:53:18 +0200 Subject: wifi: ieee80211: fix ieee80211_mle_basic_sta_prof_size_ok() If there was a possibility of an MLE basic STA profile without subelements, we might reject it because we account for the one octet for sta_info_len twice (it's part of itself, and in the fixed portion). Like in ieee80211_mle_reconf_sta_prof_size_ok, subtract 1 to adjust that. When reading the elements we did take this into account, and since there are always elements, this never really mattered. Fixes: 7b6f08771bf6 ("wifi: ieee80211: Support validating ML station profile length") Signed-off-by: Johannes Berg Reviewed-by: Ilan Peer Signed-off-by: Miri Korenblit Link: https://msgid.link/20240318184907.00bb0b20ed60.I8c41dd6fc14c4b187ab901dea15ade73c79fb98c@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a99f048404b8..4fd9735bb75e 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -5302,7 +5302,7 @@ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data, info_len += 1; return prof->sta_info_len >= info_len && - fixed + prof->sta_info_len <= len; + fixed + prof->sta_info_len - 1 <= len; } /** -- cgit From 56cc479188f70bbb6eacbe5ed1e43d96c91ccc6a Mon Sep 17 00:00:00 2001 From: Anjaneyulu Date: Mon, 18 Mar 2024 18:53:23 +0200 Subject: wifi: mac80211: handle indoor AFC/LPI AP on assoc success Update power_type in bss_conf based on Indoor AFC and LPI power types received in HE 6 GHz operation element on assoc success. Signed-off-by: Anjaneyulu Signed-off-by: Miri Korenblit Link: https://msgid.link/20240318184907.89c25dae34ff.Ifd8b2983f400623ac03dc032fc9a20025c9ca365@changeid Signed-off-by: Johannes Berg --- include/linux/ieee80211.h | 8 +++++--- net/mac80211/mlme.c | 2 ++ 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 4fd9735bb75e..7f9b829dcb1e 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -2742,9 +2742,11 @@ static inline bool ieee80211_he_capa_size_ok(const u8 *data, u8 len) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 -#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 -#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 -#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 +#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 +#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 +#define IEEE80211_6GHZ_CTRL_REG_VLP_AP 2 +#define IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP 3 +#define IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP 4 /** * struct ieee80211_he_6ghz_oper - HE 6 GHz operation Information field diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 47a2cba8313f..8cd3253c6188 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4429,9 +4429,11 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, switch (u8_get_bits(he_6ghz_oper->control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { case IEEE80211_6GHZ_CTRL_REG_LPI_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: bss_conf->power_type = IEEE80211_REG_LPI_AP; break; case IEEE80211_6GHZ_CTRL_REG_SP_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: bss_conf->power_type = IEEE80211_REG_SP_AP; break; case IEEE80211_6GHZ_CTRL_REG_VLP_AP: -- cgit From 770546ae9f4c1ae1ebcaf0874f0dd9631d77ec97 Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Sun, 24 Mar 2024 18:32:26 +0000 Subject: bpf: implement insn_is_cast_user() helper for JITs Implement a helper function to check if an instruction is addr_space_cast from as(0) to as(1). Use this helper in the x86 JIT. Other JITs can use this helper when they add support for this instruction. Signed-off-by: Puranjay Mohan Link: https://lore.kernel.org/r/20240324183226.29674-1-puranjay12@gmail.com Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp.c | 3 +-- include/linux/filter.h | 10 ++++++++++ 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 4900b1ee019f..3b639d6f2f54 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1351,8 +1351,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image break; case BPF_ALU64 | BPF_MOV | BPF_X: - if (insn->off == BPF_ADDR_SPACE_CAST && - insn->imm == 1U << 16) { + if (insn_is_cast_user(insn)) { if (dst_reg != src_reg) /* 32-bit mov */ emit_mov_reg(&prog, false, dst_reg, src_reg); diff --git a/include/linux/filter.h b/include/linux/filter.h index c0d51bff8f96..44934b968b57 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -228,6 +228,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; } +/* addr_space_cast from as(0) to as(1) is for converting bpf arena pointers + * to pointers in user vma. + */ +static inline bool insn_is_cast_user(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && + insn->off == BPF_ADDR_SPACE_CAST && + insn->imm == 1U << 16; +} + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ #define BPF_LD_IMM64(DST, IMM) \ BPF_LD_IMM64_RAW(DST, 0, IMM) -- cgit From 21a8f8a0eb35ceb21e2c9ddd87468bc3b5ac87c0 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 7 Mar 2024 19:33:00 +0530 Subject: irqchip: Add RISC-V incoming MSI controller early driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RISC-V advanced interrupt architecture (AIA) specification defines a new MSI controller called incoming message signalled interrupt controller (IMSIC) which manages MSI on per-HART (or per-CPU) basis. It also supports IPIs as software injected MSIs. (For more details refer https://github.com/riscv/riscv-aia) Add an early irqchip driver for RISC-V IMSIC which sets up the IMSIC state and provide IPIs. Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Björn Töpel Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20240307140307.646078-3-apatel@ventanamicro.com --- drivers/irqchip/Kconfig | 7 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-riscv-imsic-early.c | 201 ++++++++ drivers/irqchip/irq-riscv-imsic-state.c | 865 ++++++++++++++++++++++++++++++++ drivers/irqchip/irq-riscv-imsic-state.h | 107 ++++ include/linux/cpuhotplug.h | 1 + include/linux/irqchip/riscv-imsic.h | 87 ++++ 7 files changed, 1269 insertions(+) create mode 100644 drivers/irqchip/irq-riscv-imsic-early.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.c create mode 100644 drivers/irqchip/irq-riscv-imsic-state.h create mode 100644 include/linux/irqchip/riscv-imsic.h (limited to 'include/linux') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 72c07a12f5e1..8610810b80da 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -540,6 +540,13 @@ config RISCV_INTC depends on RISCV select IRQ_DOMAIN_HIERARCHY +config RISCV_IMSIC + bool + depends on RISCV + select IRQ_DOMAIN_HIERARCHY + select GENERIC_IRQ_MATRIX_ALLOCATOR + select GENERIC_MSI_IRQ + config SIFIVE_PLIC bool depends on RISCV diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index ec4a18380998..972aa9fecf96 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -95,6 +95,7 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o +obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c new file mode 100644 index 000000000000..886418ec06cb --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-early.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-imsic-state.h" + +static int imsic_parent_irq; + +#ifdef CONFIG_SMP +static void imsic_ipi_send(unsigned int cpu) +{ + struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu); + + writel_relaxed(IMSIC_IPI_ID, local->msi_va); +} + +static void imsic_ipi_starting_cpu(void) +{ + /* Enable IPIs for current CPU. */ + __imsic_id_set_enable(IMSIC_IPI_ID); +} + +static void imsic_ipi_dying_cpu(void) +{ + /* Disable IPIs for current CPU. */ + __imsic_id_clear_enable(IMSIC_IPI_ID); +} + +static int __init imsic_ipi_domain_init(void) +{ + int virq; + + /* Create IMSIC IPI multiplexing */ + virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send); + if (virq <= 0) + return virq < 0 ? virq : -ENOMEM; + + /* Set vIRQ range */ + riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI, true); + + /* Announce that IMSIC is providing IPIs */ + pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID); + + return 0; +} +#else +static void imsic_ipi_starting_cpu(void) { } +static void imsic_ipi_dying_cpu(void) { } +static int __init imsic_ipi_domain_init(void) { return 0; } +#endif + +/* + * To handle an interrupt, we read the TOPEI CSR and write zero in one + * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to + * Linux interrupt number and let Linux IRQ subsystem handle it. + */ +static void imsic_handle_irq(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + int err, cpu = smp_processor_id(); + struct imsic_vector *vec; + unsigned long local_id; + + chained_irq_enter(chip, desc); + + while ((local_id = csr_swap(CSR_TOPEI, 0))) { + local_id >>= TOPEI_ID_SHIFT; + + if (local_id == IMSIC_IPI_ID) { + if (IS_ENABLED(CONFIG_SMP)) + ipi_mux_process(); + continue; + } + + if (unlikely(!imsic->base_domain)) + continue; + + vec = imsic_vector_from_local_id(cpu, local_id); + if (!vec) { + pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id); + continue; + } + + err = generic_handle_domain_irq(imsic->base_domain, vec->hwirq); + if (unlikely(err)) + pr_warn_ratelimited("hwirq 0x%x mapping not found\n", vec->hwirq); + } + + chained_irq_exit(chip, desc); +} + +static int imsic_starting_cpu(unsigned int cpu) +{ + /* Mark per-CPU IMSIC state as online */ + imsic_state_online(); + + /* Enable per-CPU parent interrupt */ + enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq)); + + /* Setup IPIs */ + imsic_ipi_starting_cpu(); + + /* + * Interrupts identities might have been enabled/disabled while + * this CPU was not running so sync-up local enable/disable state. + */ + imsic_local_sync_all(); + + /* Enable local interrupt delivery */ + imsic_local_delivery(true); + + return 0; +} + +static int imsic_dying_cpu(unsigned int cpu) +{ + /* Cleanup IPIs */ + imsic_ipi_dying_cpu(); + + /* Mark per-CPU IMSIC state as offline */ + imsic_state_offline(); + + return 0; +} + +static int __init imsic_early_probe(struct fwnode_handle *fwnode) +{ + struct irq_domain *domain; + int rc; + + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); + if (!domain) { + pr_err("%pfwP: Failed to find INTC domain\n", fwnode); + return -ENOENT; + } + imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (!imsic_parent_irq) { + pr_err("%pfwP: Failed to create INTC mapping\n", fwnode); + return -ENOENT; + } + + /* Initialize IPI domain */ + rc = imsic_ipi_domain_init(); + if (rc) { + pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode); + return rc; + } + + /* Setup chained handler to the parent domain interrupt */ + irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq); + + /* + * Setup cpuhp state (must be done after setting imsic_parent_irq) + * + * Don't disable per-CPU IMSIC file when CPU goes offline + * because this affects IPI and the masking/unmasking of + * virtual IPIs is done via generic IPI-Mux + */ + cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting", + imsic_starting_cpu, imsic_dying_cpu); + + return 0; +} + +static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent) +{ + struct fwnode_handle *fwnode = &node->fwnode; + int rc; + + /* Setup IMSIC state */ + rc = imsic_setup_state(fwnode); + if (rc) { + pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc); + return rc; + } + + /* Do early setup of IPIs */ + rc = imsic_early_probe(fwnode); + if (rc) + return rc; + + /* Ensure that OF platform device gets probed */ + of_node_clear_flag(node, OF_POPULATED); + return 0; +} + +IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init); diff --git a/drivers/irqchip/irq-riscv-imsic-state.c b/drivers/irqchip/irq-riscv-imsic-state.c new file mode 100644 index 000000000000..5479f872e62b --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.c @@ -0,0 +1,865 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-imsic: " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-imsic-state.h" + +#define IMSIC_DISABLE_EIDELIVERY 0 +#define IMSIC_ENABLE_EIDELIVERY 1 +#define IMSIC_DISABLE_EITHRESHOLD 1 +#define IMSIC_ENABLE_EITHRESHOLD 0 + +static inline void imsic_csr_write(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_write(CSR_IREG, val); +} + +static inline unsigned long imsic_csr_read(unsigned long reg) +{ + csr_write(CSR_ISELECT, reg); + return csr_read(CSR_IREG); +} + +static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + return csr_read_clear(CSR_IREG, val); +} + +static inline void imsic_csr_set(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_set(CSR_IREG, val); +} + +static inline void imsic_csr_clear(unsigned long reg, unsigned long val) +{ + csr_write(CSR_ISELECT, reg); + csr_clear(CSR_IREG, val); +} + +struct imsic_priv *imsic; + +const struct imsic_global_config *imsic_get_global_config(void) +{ + return imsic ? &imsic->global : NULL; +} +EXPORT_SYMBOL_GPL(imsic_get_global_config); + +static bool __imsic_eix_read_clear(unsigned long id, bool pend) +{ + unsigned long isel, imask; + + isel = id / BITS_PER_LONG; + isel *= BITS_PER_LONG / IMSIC_EIPx_BITS; + isel += pend ? IMSIC_EIP0 : IMSIC_EIE0; + imask = BIT(id & (__riscv_xlen - 1)); + + return !!(imsic_csr_read_clear(isel, imask) & imask); +} + +static inline bool __imsic_id_read_clear_enabled(unsigned long id) +{ + return __imsic_eix_read_clear(id, false); +} + +static inline bool __imsic_id_read_clear_pending(unsigned long id) +{ + return __imsic_eix_read_clear(id, true); +} + +void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val) +{ + unsigned long id = base_id, last_id = base_id + num_id; + unsigned long i, isel, ireg; + + while (id < last_id) { + isel = id / BITS_PER_LONG; + isel *= BITS_PER_LONG / IMSIC_EIPx_BITS; + isel += pend ? IMSIC_EIP0 : IMSIC_EIE0; + + /* + * Prepare the ID mask to be programmed in the + * IMSIC EIEx and EIPx registers. These registers + * are XLEN-wide and we must not touch IDs which + * are < base_id and >= (base_id + num_id). + */ + ireg = 0; + for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) { + ireg |= BIT(i); + id++; + } + + /* + * The IMSIC EIEx and EIPx registers are indirectly + * accessed via using ISELECT and IREG CSRs so we + * need to access these CSRs without getting preempted. + * + * All existing users of this function call this + * function with local IRQs disabled so we don't + * need to do anything special here. + */ + if (val) + imsic_csr_set(isel, ireg); + else + imsic_csr_clear(isel, ireg); + } +} + +static void __imsic_local_sync(struct imsic_local_priv *lpriv) +{ + struct imsic_local_config *mlocal; + struct imsic_vector *vec, *mvec; + int i; + + lockdep_assert_held(&lpriv->lock); + + for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) { + if (!i || i == IMSIC_IPI_ID) + goto skip; + vec = &lpriv->vectors[i]; + + if (READ_ONCE(vec->enable)) + __imsic_id_set_enable(i); + else + __imsic_id_clear_enable(i); + + /* + * If the ID was being moved to a new ID on some other CPU + * then we can get a MSI during the movement so check the + * ID pending bit and re-trigger the new ID on other CPU + * using MMIO write. + */ + mvec = READ_ONCE(vec->move); + WRITE_ONCE(vec->move, NULL); + if (mvec && mvec != vec) { + if (__imsic_id_read_clear_pending(i)) { + mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu); + writel_relaxed(mvec->local_id, mlocal->msi_va); + } + + imsic_vector_free(&lpriv->vectors[i]); + } + +skip: + bitmap_clear(lpriv->dirty_bitmap, i, 1); + } +} + +void imsic_local_sync_all(void) +{ + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + unsigned long flags; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1); + __imsic_local_sync(lpriv); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +} + +void imsic_local_delivery(bool enable) +{ + if (enable) { + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD); + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY); + return; + } + + imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY); + imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD); +} + +#ifdef CONFIG_SMP +static void imsic_local_timer_callback(struct timer_list *timer) +{ + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + unsigned long flags; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + __imsic_local_sync(lpriv); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +} + +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + + /* + * The spinlock acquire/release semantics ensure that changes + * to vector enable, vector move and dirty bitmap are visible + * to the target CPU. + */ + + /* + * We schedule a timer on the target CPU if the target CPU is not + * same as the current CPU. An offline CPU will unconditionally + * synchronize IDs through imsic_starting_cpu() when the + * CPU is brought up. + */ + if (cpu_online(cpu)) { + if (cpu == smp_processor_id()) { + __imsic_local_sync(lpriv); + return; + } + + if (!timer_pending(&lpriv->timer)) { + lpriv->timer.expires = jiffies + 1; + add_timer_on(&lpriv->timer, cpu); + } + } +} +#else +static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu) +{ + lockdep_assert_held(&lpriv->lock); + __imsic_local_sync(lpriv); +} +#endif + +void imsic_vector_mask(struct imsic_vector *vec) +{ + struct imsic_local_priv *lpriv; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + /* + * This function is called through Linux irq subsystem with + * irqs disabled so no need to save/restore irq flags. + */ + + raw_spin_lock(&lpriv->lock); + + WRITE_ONCE(vec->enable, false); + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock(&lpriv->lock); +} + +void imsic_vector_unmask(struct imsic_vector *vec) +{ + struct imsic_local_priv *lpriv; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + /* + * This function is called through Linux irq subsystem with + * irqs disabled so no need to save/restore irq flags. + */ + + raw_spin_lock(&lpriv->lock); + + WRITE_ONCE(vec->enable, true); + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock(&lpriv->lock); +} + +static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec, + bool new_enable, struct imsic_vector *new_move) +{ + unsigned long flags; + bool enabled; + + raw_spin_lock_irqsave(&lpriv->lock, flags); + + /* Update enable and move details */ + enabled = READ_ONCE(vec->enable); + WRITE_ONCE(vec->enable, new_enable); + WRITE_ONCE(vec->move, new_move); + + /* Mark the vector as dirty and synchronize */ + bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1); + __imsic_remote_sync(lpriv, vec->cpu); + + raw_spin_unlock_irqrestore(&lpriv->lock, flags); + + return enabled; +} + +void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec) +{ + struct imsic_local_priv *old_lpriv, *new_lpriv; + bool enabled; + + if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu)) + return; + + old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu); + if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec)) + return; + + new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu); + if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec)) + return; + + /* + * Move and re-trigger the new vector based on the pending + * state of the old vector because we might get a device + * interrupt on the old vector while device was being moved + * to the new vector. + */ + enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec); + imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec); +} + +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS +void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind) +{ + struct imsic_local_priv *lpriv; + struct imsic_vector *mvec; + bool is_enabled; + + lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu); + if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec)) + return; + + is_enabled = imsic_vector_isenabled(vec); + mvec = imsic_vector_get_move(vec); + + seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu); + seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id); + seq_printf(m, "%*sis_reserved : %5u\n", ind, "", + (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0); + seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0); + seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0); + if (mvec) { + seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu); + seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id); + } +} + +void imsic_vector_debug_show_summary(struct seq_file *m, int ind) +{ + irq_matrix_debug_show(m, imsic->matrix, ind); +} +#endif + +struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id) +{ + struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + if (!lpriv || imsic->global.nr_ids < local_id) + return NULL; + + return &lpriv->vectors[local_id]; +} + +struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask) +{ + struct imsic_vector *vec = NULL; + struct imsic_local_priv *lpriv; + unsigned long flags; + unsigned int cpu; + int local_id; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); + if (local_id < 0) + return NULL; + + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + vec = &lpriv->vectors[local_id]; + vec->hwirq = hwirq; + vec->enable = false; + vec->move = NULL; + + return vec; +} + +void imsic_vector_free(struct imsic_vector *vec) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + vec->hwirq = UINT_MAX; + irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); +} + +static void __init imsic_local_cleanup(void) +{ + struct imsic_local_priv *lpriv; + int cpu; + + for_each_possible_cpu(cpu) { + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + bitmap_free(lpriv->dirty_bitmap); + kfree(lpriv->vectors); + } + + free_percpu(imsic->lpriv); +} + +static int __init imsic_local_init(void) +{ + struct imsic_global_config *global = &imsic->global; + struct imsic_local_priv *lpriv; + struct imsic_vector *vec; + int cpu, i; + + /* Allocate per-CPU private state */ + imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv)); + if (!imsic->lpriv) + return -ENOMEM; + + /* Setup per-CPU private state */ + for_each_possible_cpu(cpu) { + lpriv = per_cpu_ptr(imsic->lpriv, cpu); + + raw_spin_lock_init(&lpriv->lock); + + /* Allocate dirty bitmap */ + lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL); + if (!lpriv->dirty_bitmap) + goto fail_local_cleanup; + +#ifdef CONFIG_SMP + /* Setup lazy timer for synchronization */ + timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED); +#endif + + /* Allocate vector array */ + lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors), + GFP_KERNEL); + if (!lpriv->vectors) + goto fail_local_cleanup; + + /* Setup vector array */ + for (i = 0; i <= global->nr_ids; i++) { + vec = &lpriv->vectors[i]; + vec->cpu = cpu; + vec->local_id = i; + vec->hwirq = UINT_MAX; + } + } + + return 0; + +fail_local_cleanup: + imsic_local_cleanup(); + return -ENOMEM; +} + +void imsic_state_online(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + irq_matrix_online(imsic->matrix); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); +} + +void imsic_state_offline(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&imsic->matrix_lock, flags); + irq_matrix_offline(imsic->matrix); + raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags); + +#ifdef CONFIG_SMP + struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv); + + raw_spin_lock_irqsave(&lpriv->lock, flags); + WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0); + raw_spin_unlock_irqrestore(&lpriv->lock, flags); +#endif +} + +static int __init imsic_matrix_init(void) +{ + struct imsic_global_config *global = &imsic->global; + + raw_spin_lock_init(&imsic->matrix_lock); + imsic->matrix = irq_alloc_matrix(global->nr_ids + 1, + 0, global->nr_ids + 1); + if (!imsic->matrix) + return -ENOMEM; + + /* Reserve ID#0 because it is special and never implemented */ + irq_matrix_assign_system(imsic->matrix, 0, false); + + /* Reserve IPI ID because it is special and used internally */ + irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false); + + return 0; +} + +static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode, + u32 index, unsigned long *hartid) +{ + struct of_phandle_args parent; + int rc; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(fwnode)) + return -EINVAL; + + rc = of_irq_parse_one(to_of_node(fwnode), index, &parent); + if (rc) + return rc; + + /* + * Skip interrupts other than external interrupts for + * current privilege level. + */ + if (parent.args[0] != RV_IRQ_EXT) + return -EINVAL; + + return riscv_of_parent_hartid(parent.np, hartid); +} + +static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode, + u32 index, struct resource *res) +{ + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(fwnode)) + return -EINVAL; + + return of_address_to_resource(to_of_node(fwnode), index, res); +} + +static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode, + struct imsic_global_config *global, + u32 *nr_parent_irqs, + u32 *nr_mmios) +{ + unsigned long hartid; + struct resource res; + int rc; + u32 i; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(fwnode)) + return -EINVAL; + + *nr_parent_irqs = 0; + *nr_mmios = 0; + + /* Find number of parent interrupts */ + while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid)) + (*nr_parent_irqs)++; + if (!*nr_parent_irqs) { + pr_err("%pfwP: no parent irqs available\n", fwnode); + return -EINVAL; + } + + /* Find number of guest index bits in MSI address */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits", + &global->guest_index_bits); + if (rc) + global->guest_index_bits = 0; + + /* Find number of HART index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits", + &global->hart_index_bits); + if (rc) { + /* Assume default value */ + global->hart_index_bits = __fls(*nr_parent_irqs); + if (BIT(global->hart_index_bits) < *nr_parent_irqs) + global->hart_index_bits++; + } + + /* Find number of group index bits */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits", + &global->group_index_bits); + if (rc) + global->group_index_bits = 0; + + /* + * Find first bit position of group index. + * If not specified assumed the default APLIC-IMSIC configuration. + */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift", + &global->group_index_shift); + if (rc) + global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2; + + /* Find number of interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids", + &global->nr_ids); + if (rc) { + pr_err("%pfwP: number of interrupt identities not found\n", fwnode); + return rc; + } + + /* Find number of guest interrupt identities */ + rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids", + &global->nr_guest_ids); + if (rc) + global->nr_guest_ids = global->nr_ids; + + /* Sanity check guest index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT; + if (i < global->guest_index_bits) { + pr_err("%pfwP: guest index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check HART index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits; + if (i < global->hart_index_bits) { + pr_err("%pfwP: HART index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check group index bits */ + i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - + global->guest_index_bits - global->hart_index_bits; + if (i < global->group_index_bits) { + pr_err("%pfwP: group index bits too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check group index shift */ + i = global->group_index_bits + global->group_index_shift - 1; + if (i >= BITS_PER_LONG) { + pr_err("%pfwP: group index shift too big\n", fwnode); + return -EINVAL; + } + + /* Sanity check number of interrupt identities */ + if (global->nr_ids < IMSIC_MIN_ID || + global->nr_ids >= IMSIC_MAX_ID || + (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { + pr_err("%pfwP: invalid number of interrupt identities\n", fwnode); + return -EINVAL; + } + + /* Sanity check number of guest interrupt identities */ + if (global->nr_guest_ids < IMSIC_MIN_ID || + global->nr_guest_ids >= IMSIC_MAX_ID || + (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) { + pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode); + return -EINVAL; + } + + /* Compute base address */ + rc = imsic_get_mmio_resource(fwnode, 0, &res); + if (rc) { + pr_err("%pfwP: first MMIO resource not found\n", fwnode); + return -EINVAL; + } + global->base_addr = res.start; + global->base_addr &= ~(BIT(global->guest_index_bits + + global->hart_index_bits + + IMSIC_MMIO_PAGE_SHIFT) - 1); + global->base_addr &= ~((BIT(global->group_index_bits) - 1) << + global->group_index_shift); + + /* Find number of MMIO register sets */ + while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res)) + (*nr_mmios)++; + + return 0; +} + +int __init imsic_setup_state(struct fwnode_handle *fwnode) +{ + u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0; + struct imsic_global_config *global; + struct imsic_local_config *local; + void __iomem **mmios_va = NULL; + struct resource *mmios = NULL; + unsigned long reloff, hartid; + phys_addr_t base_addr; + int rc, cpu; + + /* + * Only one IMSIC instance allowed in a platform for clean + * implementation of SMP IRQ affinity and per-CPU IPIs. + * + * This means on a multi-socket (or multi-die) platform we + * will have multiple MMIO regions for one IMSIC instance. + */ + if (imsic) { + pr_err("%pfwP: already initialized hence ignoring\n", fwnode); + return -EALREADY; + } + + if (!riscv_isa_extension_available(NULL, SxAIA)) { + pr_err("%pfwP: AIA support not available\n", fwnode); + return -ENODEV; + } + + imsic = kzalloc(sizeof(*imsic), GFP_KERNEL); + if (!imsic) + return -ENOMEM; + imsic->fwnode = fwnode; + global = &imsic->global; + + global->local = alloc_percpu(typeof(*global->local)); + if (!global->local) { + rc = -ENOMEM; + goto out_free_priv; + } + + /* Parse IMSIC fwnode */ + rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios); + if (rc) + goto out_free_local; + + /* Allocate MMIO resource array */ + mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL); + if (!mmios) { + rc = -ENOMEM; + goto out_free_local; + } + + /* Allocate MMIO virtual address array */ + mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL); + if (!mmios_va) { + rc = -ENOMEM; + goto out_iounmap; + } + + /* Parse and map MMIO register sets */ + for (i = 0; i < nr_mmios; i++) { + rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]); + if (rc) { + pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i); + goto out_iounmap; + } + + base_addr = mmios[i].start; + base_addr &= ~(BIT(global->guest_index_bits + + global->hart_index_bits + + IMSIC_MMIO_PAGE_SHIFT) - 1); + base_addr &= ~((BIT(global->group_index_bits) - 1) << + global->group_index_shift); + if (base_addr != global->base_addr) { + rc = -EINVAL; + pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i); + goto out_iounmap; + } + + mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i])); + if (!mmios_va[i]) { + rc = -EIO; + pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i); + goto out_iounmap; + } + } + + /* Initialize local (or per-CPU )state */ + rc = imsic_local_init(); + if (rc) { + pr_err("%pfwP: failed to initialize local state\n", + fwnode); + goto out_iounmap; + } + + /* Configure handlers for target CPUs */ + for (i = 0; i < nr_parent_irqs; i++) { + rc = imsic_get_parent_hartid(fwnode, i, &hartid); + if (rc) { + pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i); + continue; + } + + cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i); + continue; + } + + /* Find MMIO location of MSI page */ + index = nr_mmios; + reloff = i * BIT(global->guest_index_bits) * + IMSIC_MMIO_PAGE_SZ; + for (j = 0; nr_mmios; j++) { + if (reloff < resource_size(&mmios[j])) { + index = j; + break; + } + + /* + * MMIO region size may not be aligned to + * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ + * if holes are present. + */ + reloff -= ALIGN(resource_size(&mmios[j]), + BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ); + } + if (index >= nr_mmios) { + pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i); + continue; + } + + local = per_cpu_ptr(global->local, cpu); + local->msi_pa = mmios[index].start + reloff; + local->msi_va = mmios_va[index] + reloff; + + nr_handlers++; + } + + /* If no CPU handlers found then can't take interrupts */ + if (!nr_handlers) { + pr_err("%pfwP: No CPU handlers found\n", fwnode); + rc = -ENODEV; + goto out_local_cleanup; + } + + /* Initialize matrix allocator */ + rc = imsic_matrix_init(); + if (rc) { + pr_err("%pfwP: failed to create matrix allocator\n", fwnode); + goto out_local_cleanup; + } + + /* We don't need MMIO arrays anymore so let's free-up */ + kfree(mmios_va); + kfree(mmios); + + return 0; + +out_local_cleanup: + imsic_local_cleanup(); +out_iounmap: + for (i = 0; i < nr_mmios; i++) { + if (mmios_va[i]) + iounmap(mmios_va[i]); + } + kfree(mmios_va); + kfree(mmios); +out_free_local: + free_percpu(imsic->global.local); +out_free_priv: + kfree(imsic); + imsic = NULL; + return rc; +} diff --git a/drivers/irqchip/irq-riscv-imsic-state.h b/drivers/irqchip/irq-riscv-imsic-state.h new file mode 100644 index 000000000000..8ec9649d0d01 --- /dev/null +++ b/drivers/irqchip/irq-riscv-imsic-state.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _IRQ_RISCV_IMSIC_STATE_H +#define _IRQ_RISCV_IMSIC_STATE_H + +#include +#include +#include +#include + +#define IMSIC_IPI_ID 1 +#define IMSIC_NR_IPI 8 + +struct imsic_vector { + /* Fixed details of the vector */ + unsigned int cpu; + unsigned int local_id; + /* Details saved by driver in the vector */ + unsigned int hwirq; + /* Details accessed using local lock held */ + bool enable; + struct imsic_vector *move; +}; + +struct imsic_local_priv { + /* Local lock to protect vector enable/move variables and dirty bitmap */ + raw_spinlock_t lock; + + /* Local dirty bitmap for synchronization */ + unsigned long *dirty_bitmap; + +#ifdef CONFIG_SMP + /* Local timer for synchronization */ + struct timer_list timer; +#endif + + /* Local vector table */ + struct imsic_vector *vectors; +}; + +struct imsic_priv { + /* Device details */ + struct fwnode_handle *fwnode; + + /* Global configuration common for all HARTs */ + struct imsic_global_config global; + + /* Per-CPU state */ + struct imsic_local_priv __percpu *lpriv; + + /* State of IRQ matrix allocator */ + raw_spinlock_t matrix_lock; + struct irq_matrix *matrix; + + /* IRQ domains (created by platform driver) */ + struct irq_domain *base_domain; +}; + +extern struct imsic_priv *imsic; + +void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val); + +static inline void __imsic_id_set_enable(unsigned long id) +{ + __imsic_eix_update(id, 1, false, true); +} + +static inline void __imsic_id_clear_enable(unsigned long id) +{ + __imsic_eix_update(id, 1, false, false); +} + +void imsic_local_sync_all(void); +void imsic_local_delivery(bool enable); + +void imsic_vector_mask(struct imsic_vector *vec); +void imsic_vector_unmask(struct imsic_vector *vec); + +static inline bool imsic_vector_isenabled(struct imsic_vector *vec) +{ + return READ_ONCE(vec->enable); +} + +static inline struct imsic_vector *imsic_vector_get_move(struct imsic_vector *vec) +{ + return READ_ONCE(vec->move); +} + +void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec); + +struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id); + +struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask); +void imsic_vector_free(struct imsic_vector *vector); + +void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind); +void imsic_vector_debug_show_summary(struct seq_file *m, int ind); + +void imsic_state_online(void); +void imsic_state_offline(void); +int imsic_setup_state(struct fwnode_handle *fwnode); + +#endif diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 35e78ddb2b37..7a5785f405b6 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -146,6 +146,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_LOONGARCH_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h new file mode 100644 index 000000000000..faf0b800b1b0 --- /dev/null +++ b/include/linux/irqchip/riscv-imsic.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ +#ifndef __LINUX_IRQCHIP_RISCV_IMSIC_H +#define __LINUX_IRQCHIP_RISCV_IMSIC_H + +#include +#include +#include + +#define IMSIC_MMIO_PAGE_SHIFT 12 +#define IMSIC_MMIO_PAGE_SZ BIT(IMSIC_MMIO_PAGE_SHIFT) +#define IMSIC_MMIO_PAGE_LE 0x00 +#define IMSIC_MMIO_PAGE_BE 0x04 + +#define IMSIC_MIN_ID 63 +#define IMSIC_MAX_ID 2048 + +#define IMSIC_EIDELIVERY 0x70 + +#define IMSIC_EITHRESHOLD 0x72 + +#define IMSIC_EIP0 0x80 +#define IMSIC_EIP63 0xbf +#define IMSIC_EIPx_BITS 32 + +#define IMSIC_EIE0 0xc0 +#define IMSIC_EIE63 0xff +#define IMSIC_EIEx_BITS 32 + +#define IMSIC_FIRST IMSIC_EIDELIVERY +#define IMSIC_LAST IMSIC_EIE63 + +#define IMSIC_MMIO_SETIPNUM_LE 0x00 +#define IMSIC_MMIO_SETIPNUM_BE 0x04 + +struct imsic_local_config { + phys_addr_t msi_pa; + void __iomem *msi_va; +}; + +struct imsic_global_config { + /* + * MSI Target Address Scheme + * + * XLEN-1 12 0 + * | | | + * ------------------------------------------------------------- + * |xxxxxx|Group Index|xxxxxxxxxxx|HART Index|Guest Index| 0 | + * ------------------------------------------------------------- + */ + + /* Bits representing Guest index, HART index, and Group index */ + u32 guest_index_bits; + u32 hart_index_bits; + u32 group_index_bits; + u32 group_index_shift; + + /* Global base address matching all target MSI addresses */ + phys_addr_t base_addr; + + /* Number of interrupt identities */ + u32 nr_ids; + + /* Number of guest interrupt identities */ + u32 nr_guest_ids; + + /* Per-CPU IMSIC addresses */ + struct imsic_local_config __percpu *local; +}; + +#ifdef CONFIG_RISCV_IMSIC + +const struct imsic_global_config *imsic_get_global_config(void); + +#else + +static inline const struct imsic_global_config *imsic_get_global_config(void) +{ + return NULL; +} + +#endif + +#endif -- cgit From 2333df5ae51ead2188d07c99e841e159a664741e Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Thu, 7 Mar 2024 19:33:04 +0530 Subject: irqchip: Add RISC-V advanced PLIC driver for direct-mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The RISC-V advanced interrupt architecture (AIA) specification defines advanced platform-level interrupt controller (APLIC) which has two modes of operation: 1) Direct mode and 2) MSI mode. (For more details, refer https://github.com/riscv/riscv-aia) In APLIC direct-mode, wired interrupts are forwared to CPUs (or HARTs) as a local external interrupt. Add a platform irqchip driver for the RISC-V APLIC direct-mode to support RISC-V platforms having only wired interrupts. Signed-off-by: Anup Patel Signed-off-by: Thomas Gleixner Tested-by: Björn Töpel Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20240307140307.646078-7-apatel@ventanamicro.com --- drivers/irqchip/Kconfig | 5 + drivers/irqchip/Makefile | 1 + drivers/irqchip/irq-riscv-aplic-direct.c | 326 +++++++++++++++++++++++++++++++ drivers/irqchip/irq-riscv-aplic-main.c | 211 ++++++++++++++++++++ drivers/irqchip/irq-riscv-aplic-main.h | 44 +++++ include/linux/irqchip/riscv-aplic.h | 145 ++++++++++++++ 6 files changed, 732 insertions(+) create mode 100644 drivers/irqchip/irq-riscv-aplic-direct.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.c create mode 100644 drivers/irqchip/irq-riscv-aplic-main.h create mode 100644 include/linux/irqchip/riscv-aplic.h (limited to 'include/linux') diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 8f6ce419ba4c..f6728471f592 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -540,6 +540,11 @@ config RISCV_INTC depends on RISCV select IRQ_DOMAIN_HIERARCHY +config RISCV_APLIC + bool + depends on RISCV + select IRQ_DOMAIN_HIERARCHY + config RISCV_IMSIC bool depends on RISCV diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index c1390980b4eb..5adbe7a6f604 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -95,6 +95,7 @@ obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o +obj-$(CONFIG_RISCV_APLIC) += irq-riscv-aplic-main.o irq-riscv-aplic-direct.o obj-$(CONFIG_RISCV_IMSIC) += irq-riscv-imsic-state.o irq-riscv-imsic-early.o irq-riscv-imsic-platform.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_STARFIVE_JH8100_INTC) += irq-starfive-jh8100-intc.o diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c new file mode 100644 index 000000000000..06bace9b7497 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-direct.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-aplic-main.h" + +#define APLIC_DISABLE_IDELIVERY 0 +#define APLIC_ENABLE_IDELIVERY 1 +#define APLIC_DISABLE_ITHRESHOLD 1 +#define APLIC_ENABLE_ITHRESHOLD 0 + +struct aplic_direct { + struct aplic_priv priv; + struct irq_domain *irqdomain; + struct cpumask lmask; +}; + +struct aplic_idc { + unsigned int hart_index; + void __iomem *regs; + struct aplic_direct *direct; +}; + +static unsigned int aplic_direct_parent_irq; +static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs); + +static void aplic_direct_irq_eoi(struct irq_data *d) +{ + /* + * The fasteoi_handler requires irq_eoi() callback hence + * provide a dummy handler. + */ +} + +#ifdef CONFIG_SMP +static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); + struct aplic_idc *idc; + unsigned int cpu, val; + struct cpumask amask; + void __iomem *target; + + cpumask_and(&amask, &direct->lmask, mask_val); + + if (force) + cpu = cpumask_first(&amask); + else + cpu = cpumask_any_and(&amask, cpu_online_mask); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + idc = per_cpu_ptr(&aplic_idcs, cpu); + target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32); + val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); + val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); + writel(val, target); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#endif + +static struct irq_chip aplic_direct_chip = { + .name = "APLIC-DIRECT", + .irq_mask = aplic_irq_mask, + .irq_unmask = aplic_irq_unmask, + .irq_set_type = aplic_irq_set_type, + .irq_eoi = aplic_direct_irq_eoi, +#ifdef CONFIG_SMP + .irq_set_affinity = aplic_direct_set_affinity, +#endif + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, + unsigned long *hwirq, unsigned int *type) +{ + struct aplic_priv *priv = d->host_data; + + return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type); +} + +static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + struct aplic_priv *priv = domain->host_data; + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); + struct irq_fwspec *fwspec = arg; + irq_hw_number_t hwirq; + unsigned int type; + int i, ret; + + ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip, + priv, handle_fasteoi_irq, NULL, NULL); + irq_set_affinity(virq + i, &direct->lmask); + } + + return 0; +} + +static const struct irq_domain_ops aplic_direct_irqdomain_ops = { + .translate = aplic_direct_irqdomain_translate, + .alloc = aplic_direct_irqdomain_alloc, + .free = irq_domain_free_irqs_top, +}; + +/* + * To handle an APLIC direct interrupts, we just read the CLAIMI register + * which will return highest priority pending interrupt and clear the + * pending bit of the interrupt. This process is repeated until CLAIMI + * register return zero value. + */ +static void aplic_direct_handle_irq(struct irq_desc *desc) +{ + struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs); + struct irq_domain *irqdomain = idc->direct->irqdomain; + struct irq_chip *chip = irq_desc_get_chip(desc); + irq_hw_number_t hw_irq; + int irq; + + chained_irq_enter(chip, desc); + + while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) { + hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT; + irq = irq_find_mapping(irqdomain, hw_irq); + + if (unlikely(irq <= 0)) { + dev_warn_ratelimited(idc->direct->priv.dev, + "hw_irq %lu mapping not found\n", hw_irq); + } else { + generic_handle_irq(irq); + } + } + + chained_irq_exit(chip, desc); +} + +static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en) +{ + u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY; + u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD; + + /* Priority must be less than threshold for interrupt triggering */ + writel(th, idc->regs + APLIC_IDC_ITHRESHOLD); + + /* Delivery must be set to 1 for interrupt triggering */ + writel(de, idc->regs + APLIC_IDC_IDELIVERY); +} + +static int aplic_direct_dying_cpu(unsigned int cpu) +{ + if (aplic_direct_parent_irq) + disable_percpu_irq(aplic_direct_parent_irq); + + return 0; +} + +static int aplic_direct_starting_cpu(unsigned int cpu) +{ + if (aplic_direct_parent_irq) { + enable_percpu_irq(aplic_direct_parent_irq, + irq_get_trigger_type(aplic_direct_parent_irq)); + } + + return 0; +} + +static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, + u32 *parent_hwirq, unsigned long *parent_hartid) +{ + struct of_phandle_args parent; + int rc; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(dev->fwnode)) + return -EINVAL; + + rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent); + if (rc) + return rc; + + rc = riscv_of_parent_hartid(parent.np, parent_hartid); + if (rc) + return rc; + + *parent_hwirq = parent.args[0]; + return 0; +} + +int aplic_direct_setup(struct device *dev, void __iomem *regs) +{ + int i, j, rc, cpu, current_cpu, setup_count = 0; + struct aplic_direct *direct; + struct irq_domain *domain; + struct aplic_priv *priv; + struct aplic_idc *idc; + unsigned long hartid; + u32 v, hwirq; + + direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL); + if (!direct) + return -ENOMEM; + priv = &direct->priv; + + rc = aplic_setup_priv(priv, dev, regs); + if (rc) { + dev_err(dev, "failed to create APLIC context\n"); + return rc; + } + + /* Setup per-CPU IDC and target CPU mask */ + current_cpu = get_cpu(); + for (i = 0; i < priv->nr_idcs; i++) { + rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid); + if (rc) { + dev_warn(dev, "parent irq for IDC%d not found\n", i); + continue; + } + + /* + * Skip interrupts other than external interrupts for + * current privilege level. + */ + if (hwirq != RV_IRQ_EXT) + continue; + + cpu = riscv_hartid_to_cpuid(hartid); + if (cpu < 0) { + dev_warn(dev, "invalid cpuid for IDC%d\n", i); + continue; + } + + cpumask_set_cpu(cpu, &direct->lmask); + + idc = per_cpu_ptr(&aplic_idcs, cpu); + idc->hart_index = i; + idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE; + idc->direct = direct; + + aplic_idc_set_delivery(idc, true); + + /* + * Boot cpu might not have APLIC hart_index = 0 so check + * and update target registers of all interrupts. + */ + if (cpu == current_cpu && idc->hart_index) { + v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); + v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); + for (j = 1; j <= priv->nr_irqs; j++) + writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32)); + } + + setup_count++; + } + put_cpu(); + + /* Find parent domain and register chained handler */ + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), + DOMAIN_BUS_ANY); + if (!aplic_direct_parent_irq && domain) { + aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); + if (aplic_direct_parent_irq) { + irq_set_chained_handler(aplic_direct_parent_irq, + aplic_direct_handle_irq); + + /* + * Setup CPUHP notifier to enable parent + * interrupt on all CPUs + */ + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "irqchip/riscv/aplic:starting", + aplic_direct_starting_cpu, + aplic_direct_dying_cpu); + } + } + + /* Fail if we were not able to setup IDC for any CPU */ + if (!setup_count) + return -ENODEV; + + /* Setup global config and interrupt delivery */ + aplic_init_hw_global(priv, false); + + /* Create irq domain instance for the APLIC */ + direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1, + &aplic_direct_irqdomain_ops, priv); + if (!direct->irqdomain) { + dev_err(dev, "failed to create direct irq domain\n"); + return -ENOMEM; + } + + /* Advertise the interrupt controller */ + dev_info(dev, "%d interrupts directly connected to %d CPUs\n", + priv->nr_irqs, priv->nr_idcs); + + return 0; +} diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c new file mode 100644 index 000000000000..160ff99d6979 --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.c @@ -0,0 +1,211 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "irq-riscv-aplic-main.h" + +void aplic_irq_unmask(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(d->hwirq, priv->regs + APLIC_SETIENUM); +} + +void aplic_irq_mask(struct irq_data *d) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + + writel(d->hwirq, priv->regs + APLIC_CLRIENUM); +} + +int aplic_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); + void __iomem *sourcecfg; + u32 val = 0; + + switch (type) { + case IRQ_TYPE_NONE: + val = APLIC_SOURCECFG_SM_INACTIVE; + break; + case IRQ_TYPE_LEVEL_LOW: + val = APLIC_SOURCECFG_SM_LEVEL_LOW; + break; + case IRQ_TYPE_LEVEL_HIGH: + val = APLIC_SOURCECFG_SM_LEVEL_HIGH; + break; + case IRQ_TYPE_EDGE_FALLING: + val = APLIC_SOURCECFG_SM_EDGE_FALL; + break; + case IRQ_TYPE_EDGE_RISING: + val = APLIC_SOURCECFG_SM_EDGE_RISE; + break; + default: + return -EINVAL; + } + + sourcecfg = priv->regs + APLIC_SOURCECFG_BASE; + sourcecfg += (d->hwirq - 1) * sizeof(u32); + writel(val, sourcecfg); + + return 0; +} + +int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base, + unsigned long *hwirq, unsigned int *type) +{ + if (WARN_ON(fwspec->param_count < 2)) + return -EINVAL; + if (WARN_ON(!fwspec->param[0])) + return -EINVAL; + + /* For DT, gsi_base is always zero. */ + *hwirq = fwspec->param[0] - gsi_base; + *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; + + WARN_ON(*type == IRQ_TYPE_NONE); + + return 0; +} + +void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode) +{ + u32 val; +#ifdef CONFIG_RISCV_M_MODE + u32 valh; + + if (msi_mode) { + val = lower_32_bits(priv->msicfg.base_ppn); + valh = FIELD_PREP(APLIC_xMSICFGADDRH_BAPPN, upper_32_bits(priv->msicfg.base_ppn)); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXW, priv->msicfg.lhxw); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXW, priv->msicfg.hhxw); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_LHXS, priv->msicfg.lhxs); + valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXS, priv->msicfg.hhxs); + writel(val, priv->regs + APLIC_xMSICFGADDR); + writel(valh, priv->regs + APLIC_xMSICFGADDRH); + } +#endif + + /* Setup APLIC domaincfg register */ + val = readl(priv->regs + APLIC_DOMAINCFG); + val |= APLIC_DOMAINCFG_IE; + if (msi_mode) + val |= APLIC_DOMAINCFG_DM; + writel(val, priv->regs + APLIC_DOMAINCFG); + if (readl(priv->regs + APLIC_DOMAINCFG) != val) + dev_warn(priv->dev, "unable to write 0x%x in domaincfg\n", val); +} + +static void aplic_init_hw_irqs(struct aplic_priv *priv) +{ + int i; + + /* Disable all interrupts */ + for (i = 0; i <= priv->nr_irqs; i += 32) + writel(-1U, priv->regs + APLIC_CLRIE_BASE + (i / 32) * sizeof(u32)); + + /* Set interrupt type and default priority for all interrupts */ + for (i = 1; i <= priv->nr_irqs; i++) { + writel(0, priv->regs + APLIC_SOURCECFG_BASE + (i - 1) * sizeof(u32)); + writel(APLIC_DEFAULT_PRIORITY, + priv->regs + APLIC_TARGET_BASE + (i - 1) * sizeof(u32)); + } + + /* Clear APLIC domaincfg */ + writel(0, priv->regs + APLIC_DOMAINCFG); +} + +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs) +{ + struct of_phandle_args parent; + int rc; + + /* + * Currently, only OF fwnode is supported so extend this + * function for ACPI support. + */ + if (!is_of_node(dev->fwnode)) + return -EINVAL; + + /* Save device pointer and register base */ + priv->dev = dev; + priv->regs = regs; + + /* Find out number of interrupt sources */ + rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,num-sources", + &priv->nr_irqs); + if (rc) { + dev_err(dev, "failed to get number of interrupt sources\n"); + return rc; + } + + /* + * Find out number of IDCs based on parent interrupts + * + * If "msi-parent" property is present then we ignore the + * APLIC IDCs which forces the APLIC driver to use MSI mode. + */ + if (!of_property_present(to_of_node(dev->fwnode), "msi-parent")) { + while (!of_irq_parse_one(to_of_node(dev->fwnode), priv->nr_idcs, &parent)) + priv->nr_idcs++; + } + + /* Setup initial state APLIC interrupts */ + aplic_init_hw_irqs(priv); + + return 0; +} + +static int aplic_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + bool msi_mode = false; + void __iomem *regs; + int rc; + + /* Map the MMIO registers */ + regs = devm_platform_ioremap_resource(pdev, 0); + if (!regs) { + dev_err(dev, "failed map MMIO registers\n"); + return -ENOMEM; + } + + /* + * If msi-parent property is present then setup APLIC MSI + * mode otherwise setup APLIC direct mode. + */ + if (is_of_node(dev->fwnode)) + msi_mode = of_property_present(to_of_node(dev->fwnode), "msi-parent"); + if (msi_mode) + rc = -ENODEV; + else + rc = aplic_direct_setup(dev, regs); + if (rc) + dev_err(dev, "failed to setup APLIC in %s mode\n", msi_mode ? "MSI" : "direct"); + + return rc; +} + +static const struct of_device_id aplic_match[] = { + { .compatible = "riscv,aplic" }, + {} +}; + +static struct platform_driver aplic_driver = { + .driver = { + .name = "riscv-aplic", + .of_match_table = aplic_match, + }, + .probe = aplic_probe, +}; +builtin_platform_driver(aplic_driver); diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h new file mode 100644 index 000000000000..4cfbadf37ddc --- /dev/null +++ b/drivers/irqchip/irq-riscv-aplic-main.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _IRQ_RISCV_APLIC_MAIN_H +#define _IRQ_RISCV_APLIC_MAIN_H + +#include +#include +#include +#include +#include + +#define APLIC_DEFAULT_PRIORITY 1 + +struct aplic_msicfg { + phys_addr_t base_ppn; + u32 hhxs; + u32 hhxw; + u32 lhxs; + u32 lhxw; +}; + +struct aplic_priv { + struct device *dev; + u32 gsi_base; + u32 nr_irqs; + u32 nr_idcs; + void __iomem *regs; + struct aplic_msicfg msicfg; +}; + +void aplic_irq_unmask(struct irq_data *d); +void aplic_irq_mask(struct irq_data *d); +int aplic_irq_set_type(struct irq_data *d, unsigned int type); +int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base, + unsigned long *hwirq, unsigned int *type); +void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode); +int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs); +int aplic_direct_setup(struct device *dev, void __iomem *regs); + +#endif diff --git a/include/linux/irqchip/riscv-aplic.h b/include/linux/irqchip/riscv-aplic.h new file mode 100644 index 000000000000..ec8f7df50583 --- /dev/null +++ b/include/linux/irqchip/riscv-aplic.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * Copyright (C) 2022 Ventana Micro Systems Inc. + */ +#ifndef __LINUX_IRQCHIP_RISCV_APLIC_H +#define __LINUX_IRQCHIP_RISCV_APLIC_H + +#include + +#define APLIC_MAX_IDC BIT(14) +#define APLIC_MAX_SOURCE 1024 + +#define APLIC_DOMAINCFG 0x0000 +#define APLIC_DOMAINCFG_RDONLY 0x80000000 +#define APLIC_DOMAINCFG_IE BIT(8) +#define APLIC_DOMAINCFG_DM BIT(2) +#define APLIC_DOMAINCFG_BE BIT(0) + +#define APLIC_SOURCECFG_BASE 0x0004 +#define APLIC_SOURCECFG_D BIT(10) +#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff +#define APLIC_SOURCECFG_SM_MASK 0x00000007 +#define APLIC_SOURCECFG_SM_INACTIVE 0x0 +#define APLIC_SOURCECFG_SM_DETACH 0x1 +#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 +#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 +#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 +#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 + +#define APLIC_MMSICFGADDR 0x1bc0 +#define APLIC_MMSICFGADDRH 0x1bc4 +#define APLIC_SMSICFGADDR 0x1bc8 +#define APLIC_SMSICFGADDRH 0x1bcc + +#ifdef CONFIG_RISCV_M_MODE +#define APLIC_xMSICFGADDR APLIC_MMSICFGADDR +#define APLIC_xMSICFGADDRH APLIC_MMSICFGADDRH +#else +#define APLIC_xMSICFGADDR APLIC_SMSICFGADDR +#define APLIC_xMSICFGADDRH APLIC_SMSICFGADDRH +#endif + +#define APLIC_xMSICFGADDRH_L BIT(31) +#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f +#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24 +#define APLIC_xMSICFGADDRH_HHXS (APLIC_xMSICFGADDRH_HHXS_MASK << \ + APLIC_xMSICFGADDRH_HHXS_SHIFT) +#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7 +#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20 +#define APLIC_xMSICFGADDRH_LHXS (APLIC_xMSICFGADDRH_LHXS_MASK << \ + APLIC_xMSICFGADDRH_LHXS_SHIFT) +#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7 +#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16 +#define APLIC_xMSICFGADDRH_HHXW (APLIC_xMSICFGADDRH_HHXW_MASK << \ + APLIC_xMSICFGADDRH_HHXW_SHIFT) +#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf +#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12 +#define APLIC_xMSICFGADDRH_LHXW (APLIC_xMSICFGADDRH_LHXW_MASK << \ + APLIC_xMSICFGADDRH_LHXW_SHIFT) +#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff +#define APLIC_xMSICFGADDRH_BAPPN_SHIFT 0 +#define APLIC_xMSICFGADDRH_BAPPN (APLIC_xMSICFGADDRH_BAPPN_MASK << \ + APLIC_xMSICFGADDRH_BAPPN_SHIFT) + +#define APLIC_xMSICFGADDR_PPN_SHIFT 12 + +#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \ + (BIT(__lhxs) - 1) + +#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \ + (BIT(__lhxw) - 1) +#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \ + ((__lhxs)) +#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \ + (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \ + APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs)) + +#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \ + (BIT(__hhxw) - 1) +#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \ + ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT) +#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \ + (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \ + APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs)) + +#define APLIC_IRQBITS_PER_REG 32 + +#define APLIC_SETIP_BASE 0x1c00 +#define APLIC_SETIPNUM 0x1cdc + +#define APLIC_CLRIP_BASE 0x1d00 +#define APLIC_CLRIPNUM 0x1ddc + +#define APLIC_SETIE_BASE 0x1e00 +#define APLIC_SETIENUM 0x1edc + +#define APLIC_CLRIE_BASE 0x1f00 +#define APLIC_CLRIENUM 0x1fdc + +#define APLIC_SETIPNUM_LE 0x2000 +#define APLIC_SETIPNUM_BE 0x2004 + +#define APLIC_GENMSI 0x3000 + +#define APLIC_TARGET_BASE 0x3004 +#define APLIC_TARGET_HART_IDX_SHIFT 18 +#define APLIC_TARGET_HART_IDX_MASK 0x3fff +#define APLIC_TARGET_HART_IDX (APLIC_TARGET_HART_IDX_MASK << \ + APLIC_TARGET_HART_IDX_SHIFT) +#define APLIC_TARGET_GUEST_IDX_SHIFT 12 +#define APLIC_TARGET_GUEST_IDX_MASK 0x3f +#define APLIC_TARGET_GUEST_IDX (APLIC_TARGET_GUEST_IDX_MASK << \ + APLIC_TARGET_GUEST_IDX_SHIFT) +#define APLIC_TARGET_IPRIO_SHIFT 0 +#define APLIC_TARGET_IPRIO_MASK 0xff +#define APLIC_TARGET_IPRIO (APLIC_TARGET_IPRIO_MASK << \ + APLIC_TARGET_IPRIO_SHIFT) +#define APLIC_TARGET_EIID_SHIFT 0 +#define APLIC_TARGET_EIID_MASK 0x7ff +#define APLIC_TARGET_EIID (APLIC_TARGET_EIID_MASK << \ + APLIC_TARGET_EIID_SHIFT) + +#define APLIC_IDC_BASE 0x4000 +#define APLIC_IDC_SIZE 32 + +#define APLIC_IDC_IDELIVERY 0x00 + +#define APLIC_IDC_IFORCE 0x04 + +#define APLIC_IDC_ITHRESHOLD 0x08 + +#define APLIC_IDC_TOPI 0x18 +#define APLIC_IDC_TOPI_ID_SHIFT 16 +#define APLIC_IDC_TOPI_ID_MASK 0x3ff +#define APLIC_IDC_TOPI_ID (APLIC_IDC_TOPI_ID_MASK << \ + APLIC_IDC_TOPI_ID_SHIFT) +#define APLIC_IDC_TOPI_PRIO_SHIFT 0 +#define APLIC_IDC_TOPI_PRIO_MASK 0xff +#define APLIC_IDC_TOPI_PRIO (APLIC_IDC_TOPI_PRIO_MASK << \ + APLIC_IDC_TOPI_PRIO_SHIFT) + +#define APLIC_IDC_CLAIMI 0x1c + +#endif -- cgit From bdeef5dcea6b164f4bd614655821b1ef12ebec9a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 25 Mar 2024 16:20:04 +0200 Subject: spi: rspi: Get rid of unused struct rspi_plat_data No in-kernel users of struct rspi_plat_data. If required, the software nodes should be used for such users. For now just get rid of it. Signed-off-by: Andy Shevchenko Link: https://msgid.link/r/20240325142118.3210915-1-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- drivers/spi/spi-rspi.c | 12 +----------- include/linux/spi/rspi.h | 18 ------------------ 2 files changed, 1 insertion(+), 29 deletions(-) delete mode 100644 include/linux/spi/rspi.h (limited to 'include/linux') diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 8e81f1a8623f..7f95d22fb1ac 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -24,7 +24,6 @@ #include #include #include -#include #include #define RSPI_SPCR 0x00 /* Control Register */ @@ -1131,16 +1130,12 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev, static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr, const struct resource *res) { - const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev); unsigned int dma_tx_id, dma_rx_id; if (dev->of_node) { /* In the OF case we will get the slave IDs from the DT */ dma_tx_id = 0; dma_rx_id = 0; - } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) { - dma_tx_id = rspi_pd->dma_tx_id; - dma_rx_id = rspi_pd->dma_rx_id; } else { /* The driver assumes no error. */ return 0; @@ -1290,7 +1285,6 @@ static int rspi_probe(struct platform_device *pdev) struct spi_controller *ctlr; struct rspi_data *rspi; int ret; - const struct rspi_plat_data *rspi_pd; const struct spi_ops *ops; unsigned long clksrc; @@ -1305,11 +1299,7 @@ static int rspi_probe(struct platform_device *pdev) goto error1; } else { ops = (struct spi_ops *)pdev->id_entry->driver_data; - rspi_pd = dev_get_platdata(&pdev->dev); - if (rspi_pd && rspi_pd->num_chipselect) - ctlr->num_chipselect = rspi_pd->num_chipselect; - else - ctlr->num_chipselect = 2; /* default */ + ctlr->num_chipselect = 2; /* default */ } rspi = spi_controller_get_devdata(ctlr); diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h deleted file mode 100644 index dbdfcc7a3db2..000000000000 --- a/include/linux/spi/rspi.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Renesas SPI driver - * - * Copyright (C) 2012 Renesas Solutions Corp. - */ - -#ifndef __LINUX_SPI_RENESAS_SPI_H__ -#define __LINUX_SPI_RENESAS_SPI_H__ - -struct rspi_plat_data { - unsigned int dma_tx_id; - unsigned int dma_rx_id; - - u16 num_chipselect; -}; - -#endif -- cgit From 1211f3b21c2aa0d22d8d7f050e3a5930a91cd0e4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 25 Mar 2024 07:21:02 -1000 Subject: workqueue: Preserve OFFQ bits in cancel[_sync] paths The cancel[_sync] paths acquire and release WORK_STRUCT_PENDING, and manipulate WORK_OFFQ_CANCELING. However, they assume that all the OFFQ bit values except for the pool ID are statically known and don't preserve them, which is not wrong in the current code as the pool ID and CANCELING are the only information carried. However, the planned disable/enable support will add more fields and need them to be preserved. This patch updates work data handling so that only the bits which need updating are updated. - struct work_offq_data is added along with work_offqd_unpack() and work_offqd_pack_flags() to help manipulating multiple fields contained in work->data. Note that the helpers look a bit silly right now as there isn't that much to pack. The next patch will add more. - mark_work_canceling() which is used only by __cancel_work_sync() is replaced by open-coded usage of work_offq_data and set_work_pool_and_keep_pending() in __cancel_work_sync(). - __cancel_work[_sync]() uses offq_data helpers to preserve other OFFQ bits when clearing WORK_STRUCT_PENDING and WORK_OFFQ_CANCELING at the end. - This removes all users of get_work_pool_id() which is dropped. Note that get_work_pool_id() could handle both WORK_STRUCT_PWQ and !WORK_STRUCT_PWQ cases; however, it was only being called after try_to_grab_pending() succeeded, in which case WORK_STRUCT_PWQ is never set and thus it's safe to use work_offqd_unpack() instead. No behavior changes intended. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- include/linux/workqueue.h | 1 + kernel/workqueue.c | 51 ++++++++++++++++++++++++++++------------------- 2 files changed, 32 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 158784dd189a..ae7ae4a51499 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -97,6 +97,7 @@ enum wq_misc_consts { /* Convenience constants - of type 'unsigned long', not 'enum'! */ #define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT) +#define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0066c8f6c154..d8f37cfa9935 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -392,6 +392,11 @@ struct wq_pod_type { int *cpu_pod; /* cpu -> pod */ }; +struct work_offq_data { + u32 pool_id; + u32 flags; +}; + static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_DFL] = "default", [WQ_AFFN_CPU] = "cpu", @@ -892,29 +897,23 @@ static struct worker_pool *get_work_pool(struct work_struct *work) return idr_find(&worker_pool_idr, pool_id); } -/** - * get_work_pool_id - return the worker pool ID a given work is associated with - * @work: the work item of interest - * - * Return: The worker_pool ID @work was last associated with. - * %WORK_OFFQ_POOL_NONE if none. - */ -static int get_work_pool_id(struct work_struct *work) +static unsigned long shift_and_mask(unsigned long v, u32 shift, u32 bits) { - unsigned long data = atomic_long_read(&work->data); + return (v >> shift) & ((1 << bits) - 1); +} - if (data & WORK_STRUCT_PWQ) - return work_struct_pwq(data)->pool->id; +static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data) +{ + WARN_ON_ONCE(data & WORK_STRUCT_PWQ); - return data >> WORK_OFFQ_POOL_SHIFT; + offqd->pool_id = shift_and_mask(data, WORK_OFFQ_POOL_SHIFT, + WORK_OFFQ_POOL_BITS); + offqd->flags = data & WORK_OFFQ_FLAG_MASK; } -static void mark_work_canceling(struct work_struct *work) +static unsigned long work_offqd_pack_flags(struct work_offq_data *offqd) { - unsigned long pool_id = get_work_pool_id(work); - - pool_id <<= WORK_OFFQ_POOL_SHIFT; - set_work_data(work, pool_id | WORK_STRUCT_PENDING | WORK_OFFQ_CANCELING); + return (unsigned long)offqd->flags; } static bool work_is_canceling(struct work_struct *work) @@ -4271,6 +4270,7 @@ EXPORT_SYMBOL(flush_rcu_work); static bool __cancel_work(struct work_struct *work, u32 cflags) { + struct work_offq_data offqd; unsigned long irq_flags; int ret; @@ -4281,19 +4281,26 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) if (unlikely(ret < 0)) return false; - set_work_pool_and_clear_pending(work, get_work_pool_id(work), 0); + work_offqd_unpack(&offqd, *work_data_bits(work)); + set_work_pool_and_clear_pending(work, offqd.pool_id, + work_offqd_pack_flags(&offqd)); local_irq_restore(irq_flags); return ret; } static bool __cancel_work_sync(struct work_struct *work, u32 cflags) { + struct work_offq_data offqd; unsigned long irq_flags; bool ret; /* claim @work and tell other tasks trying to grab @work to back off */ ret = work_grab_pending(work, cflags, &irq_flags); - mark_work_canceling(work); + + work_offqd_unpack(&offqd, *work_data_bits(work)); + offqd.flags |= WORK_OFFQ_CANCELING; + set_work_pool_and_keep_pending(work, offqd.pool_id, + work_offqd_pack_flags(&offqd)); local_irq_restore(irq_flags); /* @@ -4303,12 +4310,16 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) if (wq_online) __flush_work(work, true); + work_offqd_unpack(&offqd, *work_data_bits(work)); + /* * smp_mb() at the end of set_work_pool_and_clear_pending() is paired * with prepare_to_wait() above so that either waitqueue_active() is * visible here or !work_is_canceling() is visible there. */ - set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, 0); + offqd.flags &= ~WORK_OFFQ_CANCELING; + set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, + work_offqd_pack_flags(&offqd)); if (waitqueue_active(&wq_cancel_waitq)) __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); -- cgit From 86898fa6b8cd942505860556f3a0bf52eae57fe8 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 25 Mar 2024 07:21:03 -1000 Subject: workqueue: Implement disable/enable for (delayed) work items While (delayed) work items could be flushed and canceled, there was no way to prevent them from being queued in the future. While this didn't lead to functional deficiencies, it sometimes required a bit more effort from the workqueue users to e.g. sequence shutdown steps with more care. Workqueue is currently in the process of replacing tasklet which does support disabling and enabling. The feature is used relatively widely to, for example, temporarily suppress main path while a control plane operation (reset or config change) is in progress. To enable easy conversion of tasklet users and as it seems like an inherent useful feature, this patch implements disabling and enabling of work items. - A work item carries 16bit disable count in work->data while not queued. The access to the count is synchronized by the PENDING bit like all other parts of work->data. - If the count is non-zero, the work item cannot be queued. Any attempt to queue the work item fails and returns %false. - disable_work[_sync](), enable_work(), disable_delayed_work[_sync]() and enable_delayed_work() are added. v3: enable_work() was using local_irq_enable() instead of local_irq_restore() to undo IRQ-disable by work_grab_pending(). This is awkward now and will become incorrect as enable_work() will later be used from IRQ context too. (Lai) v2: Lai noticed that queue_work_node() wasn't checking the disable count. Fixed. queue_rcu_work() is updated to trigger warning if the inner work item is disabled. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- include/linux/workqueue.h | 18 ++++- kernel/workqueue.c | 177 +++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 182 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ae7ae4a51499..bd80e66298a0 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -51,20 +51,23 @@ enum work_bits { * data contains off-queue information when !WORK_STRUCT_PWQ. * * MSB - * [ pool ID ] [ OFFQ flags ] [ STRUCT flags ] - * 1 bit 4 or 5 bits + * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] + * 16 bits 1 bit 4 or 5 bits */ WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT, WORK_OFFQ_FLAG_END, WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, + WORK_OFFQ_DISABLE_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_DISABLE_BITS = 16, + /* * When a work item is off queue, the high bits encode off-queue flags * and the last pool it was on. Cap pool ID to 31 bits and use the * highest number to indicate that no pool is associated. */ - WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_FLAG_SHIFT + WORK_OFFQ_FLAG_BITS, + WORK_OFFQ_POOL_SHIFT = WORK_OFFQ_DISABLE_SHIFT + WORK_OFFQ_DISABLE_BITS, WORK_OFFQ_LEFT = BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS = WORK_OFFQ_LEFT <= 31 ? WORK_OFFQ_LEFT : 31, }; @@ -98,6 +101,7 @@ enum wq_misc_consts { /* Convenience constants - of type 'unsigned long', not 'enum'! */ #define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT) #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) +#define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) #define WORK_STRUCT_NO_POOL (WORK_OFFQ_POOL_NONE << WORK_OFFQ_POOL_SHIFT) #define WORK_STRUCT_PWQ_MASK (~((1ul << WORK_STRUCT_PWQ_SHIFT) - 1)) @@ -560,6 +564,14 @@ extern bool flush_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work(struct delayed_work *dwork); extern bool cancel_delayed_work_sync(struct delayed_work *dwork); +extern bool disable_work(struct work_struct *work); +extern bool disable_work_sync(struct work_struct *work); +extern bool enable_work(struct work_struct *work); + +extern bool disable_delayed_work(struct delayed_work *dwork); +extern bool disable_delayed_work_sync(struct delayed_work *dwork); +extern bool enable_delayed_work(struct delayed_work *dwork); + extern bool flush_rcu_work(struct rcu_work *rwork); extern void workqueue_set_max_active(struct workqueue_struct *wq, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d8f37cfa9935..5c53dde877fd 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -99,6 +99,7 @@ enum worker_flags { enum work_cancel_flags { WORK_CANCEL_DELAYED = 1 << 0, /* canceling a delayed_work */ + WORK_CANCEL_DISABLE = 1 << 1, /* canceling to disable */ }; enum wq_internal_consts { @@ -394,6 +395,7 @@ struct wq_pod_type { struct work_offq_data { u32 pool_id; + u32 disable; u32 flags; }; @@ -908,12 +910,15 @@ static void work_offqd_unpack(struct work_offq_data *offqd, unsigned long data) offqd->pool_id = shift_and_mask(data, WORK_OFFQ_POOL_SHIFT, WORK_OFFQ_POOL_BITS); + offqd->disable = shift_and_mask(data, WORK_OFFQ_DISABLE_SHIFT, + WORK_OFFQ_DISABLE_BITS); offqd->flags = data & WORK_OFFQ_FLAG_MASK; } static unsigned long work_offqd_pack_flags(struct work_offq_data *offqd) { - return (unsigned long)offqd->flags; + return ((unsigned long)offqd->disable << WORK_OFFQ_DISABLE_SHIFT) | + ((unsigned long)offqd->flags); } static bool work_is_canceling(struct work_struct *work) @@ -2408,6 +2413,21 @@ out: rcu_read_unlock(); } +static bool clear_pending_if_disabled(struct work_struct *work) +{ + unsigned long data = *work_data_bits(work); + struct work_offq_data offqd; + + if (likely((data & WORK_STRUCT_PWQ) || + !(data & WORK_OFFQ_DISABLE_MASK))) + return false; + + work_offqd_unpack(&offqd, data); + set_work_pool_and_clear_pending(work, offqd.pool_id, + work_offqd_pack_flags(&offqd)); + return true; +} + /** * queue_work_on - queue work on specific cpu * @cpu: CPU number to execute work on @@ -2430,7 +2450,8 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, local_irq_save(irq_flags); - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && + !clear_pending_if_disabled(work)) { __queue_work(cpu, wq, work); ret = true; } @@ -2508,7 +2529,8 @@ bool queue_work_node(int node, struct workqueue_struct *wq, local_irq_save(irq_flags); - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && + !clear_pending_if_disabled(work)) { int cpu = select_numa_node_cpu(node); __queue_work(cpu, wq, work); @@ -2590,7 +2612,8 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, /* read the comment in __queue_work() */ local_irq_save(irq_flags); - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && + !clear_pending_if_disabled(work)) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } @@ -2663,7 +2686,12 @@ bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) { struct work_struct *work = &rwork->work; - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + /* + * rcu_work can't be canceled or disabled. Warn if the user reached + * inside @rwork and disabled the inner work. + */ + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) && + !WARN_ON_ONCE(clear_pending_if_disabled(work))) { rwork->wq = wq; call_rcu_hurry(&rwork->rcu, rcu_work_rcufn); return true; @@ -4268,20 +4296,46 @@ bool flush_rcu_work(struct rcu_work *rwork) } EXPORT_SYMBOL(flush_rcu_work); +static void work_offqd_disable(struct work_offq_data *offqd) +{ + const unsigned long max = (1lu << WORK_OFFQ_DISABLE_BITS) - 1; + + if (likely(offqd->disable < max)) + offqd->disable++; + else + WARN_ONCE(true, "workqueue: work disable count overflowed\n"); +} + +static void work_offqd_enable(struct work_offq_data *offqd) +{ + if (likely(offqd->disable > 0)) + offqd->disable--; + else + WARN_ONCE(true, "workqueue: work disable count underflowed\n"); +} + static bool __cancel_work(struct work_struct *work, u32 cflags) { struct work_offq_data offqd; unsigned long irq_flags; int ret; - do { - ret = try_to_grab_pending(work, cflags, &irq_flags); - } while (unlikely(ret == -EAGAIN)); + if (cflags & WORK_CANCEL_DISABLE) { + ret = work_grab_pending(work, cflags, &irq_flags); + } else { + do { + ret = try_to_grab_pending(work, cflags, &irq_flags); + } while (unlikely(ret == -EAGAIN)); - if (unlikely(ret < 0)) - return false; + if (unlikely(ret < 0)) + return false; + } work_offqd_unpack(&offqd, *work_data_bits(work)); + + if (cflags & WORK_CANCEL_DISABLE) + work_offqd_disable(&offqd); + set_work_pool_and_clear_pending(work, offqd.pool_id, work_offqd_pack_flags(&offqd)); local_irq_restore(irq_flags); @@ -4298,6 +4352,10 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) ret = work_grab_pending(work, cflags, &irq_flags); work_offqd_unpack(&offqd, *work_data_bits(work)); + + if (cflags & WORK_CANCEL_DISABLE) + work_offqd_disable(&offqd); + offqd.flags |= WORK_OFFQ_CANCELING; set_work_pool_and_keep_pending(work, offqd.pool_id, work_offqd_pack_flags(&offqd)); @@ -4397,6 +4455,105 @@ bool cancel_delayed_work_sync(struct delayed_work *dwork) } EXPORT_SYMBOL(cancel_delayed_work_sync); +/** + * disable_work - Disable and cancel a work item + * @work: work item to disable + * + * Disable @work by incrementing its disable count and cancel it if currently + * pending. As long as the disable count is non-zero, any attempt to queue @work + * will fail and return %false. The maximum supported disable depth is 2 to the + * power of %WORK_OFFQ_DISABLE_BITS, currently 65536. + * + * Must be called from a sleepable context. Returns %true if @work was pending, + * %false otherwise. + */ +bool disable_work(struct work_struct *work) +{ + return __cancel_work(work, WORK_CANCEL_DISABLE); +} +EXPORT_SYMBOL_GPL(disable_work); + +/** + * disable_work_sync - Disable, cancel and drain a work item + * @work: work item to disable + * + * Similar to disable_work() but also wait for @work to finish if currently + * executing. + * + * Must be called from a sleepable context. Returns %true if @work was pending, + * %false otherwise. + */ +bool disable_work_sync(struct work_struct *work) +{ + return __cancel_work_sync(work, WORK_CANCEL_DISABLE); +} +EXPORT_SYMBOL_GPL(disable_work_sync); + +/** + * enable_work - Enable a work item + * @work: work item to enable + * + * Undo disable_work[_sync]() by decrementing @work's disable count. @work can + * only be queued if its disable count is 0. + * + * Must be called from a sleepable context. Returns %true if the disable count + * reached 0. Otherwise, %false. + */ +bool enable_work(struct work_struct *work) +{ + struct work_offq_data offqd; + unsigned long irq_flags; + + work_grab_pending(work, 0, &irq_flags); + + work_offqd_unpack(&offqd, *work_data_bits(work)); + work_offqd_enable(&offqd); + set_work_pool_and_clear_pending(work, offqd.pool_id, + work_offqd_pack_flags(&offqd)); + local_irq_restore(irq_flags); + + return !offqd.disable; +} +EXPORT_SYMBOL_GPL(enable_work); + +/** + * disable_delayed_work - Disable and cancel a delayed work item + * @dwork: delayed work item to disable + * + * disable_work() for delayed work items. + */ +bool disable_delayed_work(struct delayed_work *dwork) +{ + return __cancel_work(&dwork->work, + WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE); +} +EXPORT_SYMBOL_GPL(disable_delayed_work); + +/** + * disable_delayed_work_sync - Disable, cancel and drain a delayed work item + * @dwork: delayed work item to disable + * + * disable_work_sync() for delayed work items. + */ +bool disable_delayed_work_sync(struct delayed_work *dwork) +{ + return __cancel_work_sync(&dwork->work, + WORK_CANCEL_DELAYED | WORK_CANCEL_DISABLE); +} +EXPORT_SYMBOL_GPL(disable_delayed_work_sync); + +/** + * enable_delayed_work - Enable a delayed work item + * @dwork: delayed work item to enable + * + * enable_work() for delayed work items. + */ +bool enable_delayed_work(struct delayed_work *dwork) +{ + return enable_work(&dwork->work); +} +EXPORT_SYMBOL_GPL(enable_delayed_work); + /** * schedule_on_each_cpu - execute a function synchronously on each online CPU * @func: the function to call -- cgit From f09b10b6f442656524d2ee26e45966401a14f54b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 25 Mar 2024 07:21:03 -1000 Subject: workqueue: Remove WORK_OFFQ_CANCELING cancel[_delayed]_work_sync() guarantees that it can shut down self-requeueing work items. To achieve that, it grabs and then holds WORK_STRUCT_PENDING bit set while flushing the currently executing instance. As the PENDING bit is set, all queueing attempts including the self-requeueing ones fail and once the currently executing instance is flushed, the work item should be idle as long as someone else isn't actively queueing it. This means that the cancel_work_sync path may hold the PENDING bit set while flushing the target work item. This isn't a problem for the queueing path - it can just fail which is the desired effect. It doesn't affect flush. It doesn't matter to cancel_work either as it can just report that the work item has successfully canceled. However, if there's another cancel_work_sync attempt on the work item, it can't simply fail or report success and that would breach the guarantee that it should provide. cancel_work_sync has to wait for and grab that PENDING bit and go through the motions. WORK_OFFQ_CANCELING and wq_cancel_waitq are what implement this cancel_work_sync to cancel_work_sync wait mechanism. When a work item is being canceled, WORK_OFFQ_CANCELING is also set on it and other cancel_work_sync attempts wait on the bit to be cleared using the wait queue. While this works, it's an isolated wart which doesn't jive with the rest of flush and cancel mechanisms and forces enable_work() and disable_work() to require a sleepable context, which hampers their usability. Now that a work item can be disabled, we can use that to block queueing while cancel_work_sync is in progress. Instead of holding PENDING the bit, it can temporarily disable the work item, flush and then re-enable it as that'd achieve the same end result of blocking queueings while canceling and thus enable canceling of self-requeueing work items. - WORK_OFFQ_CANCELING and the surrounding mechanims are removed. - work_grab_pending() is now simpler, no longer has to wait for a blocking operation and thus can be called from any context. - With work_grab_pending() simplified, no need to use try_to_grab_pending() directly. All users are converted to use work_grab_pending(). - __cancel_work_sync() is updated to __cancel_work() with WORK_CANCEL_DISABLE to cancel and plug racing queueing attempts. It then flushes and re-enables the work item if necessary. - These changes allow disable_work() and enable_work() to be called from any context. v2: Lai pointed out that mod_delayed_work_on() needs to check the disable count before queueing the delayed work item. Added clear_pending_if_disabled() call. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- include/linux/workqueue.h | 4 +- kernel/workqueue.c | 140 +++++++--------------------------------------- 2 files changed, 20 insertions(+), 124 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index bd80e66298a0..a5075969931b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -52,10 +52,9 @@ enum work_bits { * * MSB * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] - * 16 bits 1 bit 4 or 5 bits + * 16 bits 0 bits 4 or 5 bits */ WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, - WORK_OFFQ_CANCELING_BIT = WORK_OFFQ_FLAG_SHIFT, WORK_OFFQ_FLAG_END, WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, @@ -99,7 +98,6 @@ enum wq_misc_consts { }; /* Convenience constants - of type 'unsigned long', not 'enum'! */ -#define WORK_OFFQ_CANCELING (1ul << WORK_OFFQ_CANCELING_BIT) #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5c53dde877fd..e80a815ec172 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -496,12 +496,6 @@ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; /* I: attributes used when instantiating ordered pools on demand */ static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS]; -/* - * Used to synchronize multiple cancel_sync attempts on the same work item. See - * work_grab_pending() and __cancel_work_sync(). - */ -static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq); - /* * I: kthread_worker to release pwq's. pwq release needs to be bounced to a * process context while holding a pool lock. Bounce to a dedicated kthread @@ -783,11 +777,6 @@ static int work_next_color(int color) * corresponding to a work. Pool is available once the work has been * queued anywhere after initialization until it is sync canceled. pwq is * available only while the work item is queued. - * - * %WORK_OFFQ_CANCELING is used to mark a work item which is being - * canceled. While being canceled, a work item may have its PENDING set - * but stay off timer and worklist for arbitrarily long and nobody should - * try to steal the PENDING bit. */ static inline void set_work_data(struct work_struct *work, unsigned long data) { @@ -921,13 +910,6 @@ static unsigned long work_offqd_pack_flags(struct work_offq_data *offqd) ((unsigned long)offqd->flags); } -static bool work_is_canceling(struct work_struct *work) -{ - unsigned long data = atomic_long_read(&work->data); - - return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING); -} - /* * Policy functions. These define the policies on how the global worker * pools are managed. Unless noted otherwise, these functions assume that @@ -2058,8 +2040,6 @@ out_put: * 1 if @work was pending and we successfully stole PENDING * 0 if @work was idle and we claimed PENDING * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry - * -ENOENT if someone else is canceling @work, this state may persist - * for arbitrarily long * ======== ================================================================ * * Note: @@ -2155,26 +2135,9 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags, fail: rcu_read_unlock(); local_irq_restore(*irq_flags); - if (work_is_canceling(work)) - return -ENOENT; - cpu_relax(); return -EAGAIN; } -struct cwt_wait { - wait_queue_entry_t wait; - struct work_struct *work; -}; - -static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) -{ - struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait); - - if (cwait->work != key) - return 0; - return autoremove_wake_function(wait, mode, sync, key); -} - /** * work_grab_pending - steal work item from worklist and disable irq * @work: work item to steal @@ -2184,7 +2147,7 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k * Grab PENDING bit of @work. @work can be in any stable state - idle, on timer * or on worklist. * - * Must be called in process context. IRQ is disabled on return with IRQ state + * Can be called from any context. IRQ is disabled on return with IRQ state * stored in *@irq_flags. The caller is responsible for re-enabling it using * local_irq_restore(). * @@ -2193,41 +2156,14 @@ static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *k static bool work_grab_pending(struct work_struct *work, u32 cflags, unsigned long *irq_flags) { - struct cwt_wait cwait; int ret; - might_sleep(); -repeat: - ret = try_to_grab_pending(work, cflags, irq_flags); - if (likely(ret >= 0)) - return ret; - if (ret != -ENOENT) - goto repeat; - - /* - * Someone is already canceling. Wait for it to finish. flush_work() - * doesn't work for PREEMPT_NONE because we may get woken up between - * @work's completion and the other canceling task resuming and clearing - * CANCELING - flush_work() will return false immediately as @work is no - * longer busy, try_to_grab_pending() will return -ENOENT as @work is - * still being canceled and the other canceling task won't be able to - * clear CANCELING as we're hogging the CPU. - * - * Let's wait for completion using a waitqueue. As this may lead to the - * thundering herd problem, use a custom wake function which matches - * @work along with exclusive wait and wakeup. - */ - init_wait(&cwait.wait); - cwait.wait.func = cwt_wakefn; - cwait.work = work; - - prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait, - TASK_UNINTERRUPTIBLE); - if (work_is_canceling(work)) - schedule(); - finish_wait(&wq_cancel_waitq, &cwait.wait); - - goto repeat; + while (true) { + ret = try_to_grab_pending(work, cflags, irq_flags); + if (ret >= 0) + return ret; + cpu_relax(); + } } /** @@ -2645,19 +2581,14 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, struct delayed_work *dwork, unsigned long delay) { unsigned long irq_flags; - int ret; + bool ret; - do { - ret = try_to_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, - &irq_flags); - } while (unlikely(ret == -EAGAIN)); + ret = work_grab_pending(&dwork->work, WORK_CANCEL_DELAYED, &irq_flags); - if (likely(ret >= 0)) { + if (!clear_pending_if_disabled(&dwork->work)) __queue_delayed_work(cpu, wq, dwork, delay); - local_irq_restore(irq_flags); - } - /* -ENOENT from try_to_grab_pending() becomes %true */ + local_irq_restore(irq_flags); return ret; } EXPORT_SYMBOL_GPL(mod_delayed_work_on); @@ -4320,16 +4251,7 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) unsigned long irq_flags; int ret; - if (cflags & WORK_CANCEL_DISABLE) { - ret = work_grab_pending(work, cflags, &irq_flags); - } else { - do { - ret = try_to_grab_pending(work, cflags, &irq_flags); - } while (unlikely(ret == -EAGAIN)); - - if (unlikely(ret < 0)) - return false; - } + ret = work_grab_pending(work, cflags, &irq_flags); work_offqd_unpack(&offqd, *work_data_bits(work)); @@ -4344,22 +4266,9 @@ static bool __cancel_work(struct work_struct *work, u32 cflags) static bool __cancel_work_sync(struct work_struct *work, u32 cflags) { - struct work_offq_data offqd; - unsigned long irq_flags; bool ret; - /* claim @work and tell other tasks trying to grab @work to back off */ - ret = work_grab_pending(work, cflags, &irq_flags); - - work_offqd_unpack(&offqd, *work_data_bits(work)); - - if (cflags & WORK_CANCEL_DISABLE) - work_offqd_disable(&offqd); - - offqd.flags |= WORK_OFFQ_CANCELING; - set_work_pool_and_keep_pending(work, offqd.pool_id, - work_offqd_pack_flags(&offqd)); - local_irq_restore(irq_flags); + ret = __cancel_work(work, cflags | WORK_CANCEL_DISABLE); /* * Skip __flush_work() during early boot when we know that @work isn't @@ -4368,19 +4277,8 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags) if (wq_online) __flush_work(work, true); - work_offqd_unpack(&offqd, *work_data_bits(work)); - - /* - * smp_mb() at the end of set_work_pool_and_clear_pending() is paired - * with prepare_to_wait() above so that either waitqueue_active() is - * visible here or !work_is_canceling() is visible there. - */ - offqd.flags &= ~WORK_OFFQ_CANCELING; - set_work_pool_and_clear_pending(work, WORK_OFFQ_POOL_NONE, - work_offqd_pack_flags(&offqd)); - - if (waitqueue_active(&wq_cancel_waitq)) - __wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work); + if (!(cflags & WORK_CANCEL_DISABLE)) + enable_work(work); return ret; } @@ -4464,8 +4362,8 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); * will fail and return %false. The maximum supported disable depth is 2 to the * power of %WORK_OFFQ_DISABLE_BITS, currently 65536. * - * Must be called from a sleepable context. Returns %true if @work was pending, - * %false otherwise. + * Can be called from any context. Returns %true if @work was pending, %false + * otherwise. */ bool disable_work(struct work_struct *work) { @@ -4496,8 +4394,8 @@ EXPORT_SYMBOL_GPL(disable_work_sync); * Undo disable_work[_sync]() by decrementing @work's disable count. @work can * only be queued if its disable count is 0. * - * Must be called from a sleepable context. Returns %true if the disable count - * reached 0. Otherwise, %false. + * Can be called from any context. Returns %true if the disable count reached 0. + * Otherwise, %false. */ bool enable_work(struct work_struct *work) { -- cgit From 456a78eef2670d0e9521e87f35a056de8fec7fb2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 25 Mar 2024 07:21:03 -1000 Subject: workqueue: Remember whether a work item was on a BH workqueue Add an off-queue flag, WORK_OFFQ_BH, that indicates whether the last workqueue the work item was on was a BH one. This will be used to test whether a work item is BH in cancel_sync path to implement atomic cancel_sync'ing for BH work items. Signed-off-by: Tejun Heo Reviewed-by: Lai Jiangshan --- include/linux/workqueue.h | 4 +++- kernel/workqueue.c | 10 ++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index a5075969931b..777b0186317e 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -52,9 +52,10 @@ enum work_bits { * * MSB * [ pool ID ] [ disable depth ] [ OFFQ flags ] [ STRUCT flags ] - * 16 bits 0 bits 4 or 5 bits + * 16 bits 1 bit 4 or 5 bits */ WORK_OFFQ_FLAG_SHIFT = WORK_STRUCT_FLAG_BITS, + WORK_OFFQ_BH_BIT = WORK_OFFQ_FLAG_SHIFT, WORK_OFFQ_FLAG_END, WORK_OFFQ_FLAG_BITS = WORK_OFFQ_FLAG_END - WORK_OFFQ_FLAG_SHIFT, @@ -98,6 +99,7 @@ enum wq_misc_consts { }; /* Convenience constants - of type 'unsigned long', not 'enum'! */ +#define WORK_OFFQ_BH (1ul << WORK_OFFQ_BH_BIT) #define WORK_OFFQ_FLAG_MASK (((1ul << WORK_OFFQ_FLAG_BITS) - 1) << WORK_OFFQ_FLAG_SHIFT) #define WORK_OFFQ_DISABLE_MASK (((1ul << WORK_OFFQ_DISABLE_BITS) - 1) << WORK_OFFQ_DISABLE_SHIFT) #define WORK_OFFQ_POOL_NONE ((1ul << WORK_OFFQ_POOL_BITS) - 1) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e80a815ec172..baf7495338bc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -764,6 +764,11 @@ static int work_next_color(int color) return (color + 1) % WORK_NR_COLORS; } +static unsigned long pool_offq_flags(struct worker_pool *pool) +{ + return (pool->flags & POOL_BH) ? WORK_OFFQ_BH : 0; +} + /* * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data * contain the pointer to the queued pwq. Once execution starts, the flag @@ -2122,7 +2127,8 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags, * this destroys work->data needed by the next step, stash it. */ work_data = *work_data_bits(work); - set_work_pool_and_keep_pending(work, pool->id, 0); + set_work_pool_and_keep_pending(work, pool->id, + pool_offq_flags(pool)); /* must be the last step, see the function comment */ pwq_dec_nr_in_flight(pwq, work_data); @@ -3175,7 +3181,7 @@ __acquires(&pool->lock) * PENDING and queued state changes happen together while IRQ is * disabled. */ - set_work_pool_and_clear_pending(work, pool->id, 0); + set_work_pool_and_clear_pending(work, pool->id, pool_offq_flags(pool)); pwq->stats[PWQ_STAT_STARTED]++; raw_spin_unlock_irq(&pool->lock); -- cgit From ae1296a7bfe4f8e446677ccb761d9419926557bc Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 8 Mar 2024 17:42:52 +0800 Subject: workqueue: Move attrs->cpumask out of worker_pool's properties when attrs->affn_strict Allow more pools can be shared when attrs->affn_strict. Signed-off-by: Lai Jiangshan Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 3 +++ kernel/workqueue.c | 13 ++++++++----- 2 files changed, 11 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 777b0186317e..bfcf8d38f4b1 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -185,6 +185,9 @@ struct workqueue_attrs { * Below fields aren't properties of a worker_pool. They only modify how * :c:func:`apply_workqueue_attrs` select pools and thus don't * participate in pool hash calculations or equality comparisons. + * + * If @affn_strict is set, @cpumask isn't a property of a worker_pool + * either. */ /** diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 45d2aae73c96..f03960f094fa 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4625,6 +4625,8 @@ static void wqattrs_clear_for_pool(struct workqueue_attrs *attrs) { attrs->affn_scope = WQ_AFFN_NR_TYPES; attrs->ordered = false; + if (attrs->affn_strict) + cpumask_copy(attrs->cpumask, cpu_possible_mask); } /* hash value of the content of @attr */ @@ -4633,11 +4635,12 @@ static u32 wqattrs_hash(const struct workqueue_attrs *attrs) u32 hash = 0; hash = jhash_1word(attrs->nice, hash); - hash = jhash(cpumask_bits(attrs->cpumask), - BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); + hash = jhash_1word(attrs->affn_strict, hash); hash = jhash(cpumask_bits(attrs->__pod_cpumask), BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); - hash = jhash_1word(attrs->affn_strict, hash); + if (!attrs->affn_strict) + hash = jhash(cpumask_bits(attrs->cpumask), + BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash); return hash; } @@ -4647,11 +4650,11 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, { if (a->nice != b->nice) return false; - if (!cpumask_equal(a->cpumask, b->cpumask)) + if (a->affn_strict != b->affn_strict) return false; if (!cpumask_equal(a->__pod_cpumask, b->__pod_cpumask)) return false; - if (a->affn_strict != b->affn_strict) + if (!a->affn_strict && !cpumask_equal(a->cpumask, b->cpumask)) return false; return true; } -- cgit From 474a549ff4c989427a14fdab851e562c8a63fe24 Mon Sep 17 00:00:00 2001 From: Allen Pais Date: Mon, 25 Mar 2024 18:02:01 +0000 Subject: workqueue: Introduce enable_and_queue_work() convenience function The enable_and_queue_work() function is introduced to streamline the process of enabling and queuing a work item on a specific workqueue. This function combines the functionalities of enable_work() and queue_work() in a single call, providing a concise and convenient API for enabling and queuing work items. The function accepts a target workqueue and a work item as parameters. It first attempts to enable the work item using enable_work(). A successful enable operation means that the work item was previously disabled and is now marked as eligible for execution. If the enable operation is successful, the work item is then queued on the specified workqueue using queue_work(). The function returns true if the work item was successfully enabled and queued, and false otherwise. Note: This function may lead to unnecessary spurious wake-ups in cases where the work item is expected to be dormant but enable/disable are called frequently. Spurious wake-ups refer to the condition where worker threads are woken up without actual work to be done. Callers should be aware of this behavior and may need to employ additional synchronization mechanisms to avoid these overheads if such wake-ups are not desired. This addition aims to enhance code readability and maintainability by providing a unified interface for the common use case of enabling and queuing work items on a workqueue. tj: Made the function comment more compact. Signed-off-by: Allen Pais Signed-off-by: Tejun Heo --- include/linux/workqueue.h | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index bfcf8d38f4b1..2df1188c0f96 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -682,6 +682,32 @@ static inline bool schedule_work(struct work_struct *work) return queue_work(system_wq, work); } +/** + * enable_and_queue_work - Enable and queue a work item on a specific workqueue + * @wq: The target workqueue + * @work: The work item to be enabled and queued + * + * This function combines the operations of enable_work() and queue_work(), + * providing a convenient way to enable and queue a work item in a single call. + * It invokes enable_work() on @work and then queues it if the disable depth + * reached 0. Returns %true if the disable depth reached 0 and @work is queued, + * and %false otherwise. + * + * Note that @work is always queued when disable depth reaches zero. If the + * desired behavior is queueing only if certain events took place while @work is + * disabled, the user should implement the necessary state tracking and perform + * explicit conditional queueing after enable_work(). + */ +static inline bool enable_and_queue_work(struct workqueue_struct *wq, + struct work_struct *work) +{ + if (enable_work(work)) { + queue_work(wq, work); + return true; + } + return false; +} + /* * Detect attempt to flush system-wide workqueues at compile time when possible. * Warn attempt to flush system-wide workqueues at runtime. -- cgit From 77fc5151f9c0e6068f1567b73d33e75a0c35333d Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Sat, 17 Feb 2024 16:42:35 +0000 Subject: device property: Move fwnode_handle_put() into property.h By having this function as static inline in the header, the compiler is able to see if can optmize the call out if (IS_ERR_OR_NULL(fwnode)) This will allow a simpler DEFINE_FREE() call in the following patch. Suggested-by: Sakari Ailus Reviewed-by: Sakari Ailus Acked-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20240217164249.921878-2-jic23@kernel.org Signed-off-by: Jonathan Cameron --- drivers/base/property.c | 14 -------------- include/linux/property.h | 14 +++++++++++++- 2 files changed, 13 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/property.c b/drivers/base/property.c index 7324a704a9a1..6a3a434d0d6f 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -868,20 +868,6 @@ struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode) } EXPORT_SYMBOL_GPL(fwnode_handle_get); -/** - * fwnode_handle_put - Drop reference to a device node - * @fwnode: Pointer to the device node to drop the reference to. - * - * This has to be used when terminating device_for_each_child_node() iteration - * with break or return to prevent stale device node references from being left - * behind. - */ -void fwnode_handle_put(struct fwnode_handle *fwnode) -{ - fwnode_call_void_op(fwnode, put); -} -EXPORT_SYMBOL_GPL(fwnode_handle_put); - /** * fwnode_device_is_available - check if a device is available for use * @fwnode: Pointer to the fwnode of the device. diff --git a/include/linux/property.h b/include/linux/property.h index 3a1045eb786c..93d992a92f59 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -180,7 +180,19 @@ struct fwnode_handle *device_get_named_child_node(const struct device *dev, const char *childname); struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); -void fwnode_handle_put(struct fwnode_handle *fwnode); + +/** + * fwnode_handle_put - Drop reference to a device node + * @fwnode: Pointer to the device node to drop the reference to. + * + * This has to be used when terminating device_for_each_child_node() iteration + * with break or return to prevent stale device node references from being left + * behind. + */ +static inline void fwnode_handle_put(struct fwnode_handle *fwnode) +{ + fwnode_call_void_op(fwnode, put); +} int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); -- cgit From 59ed5e2d505bf5f9b4af64d0021cd0c96aec1f7c Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Sat, 17 Feb 2024 16:42:36 +0000 Subject: device property: Add cleanup.h based fwnode_handle_put() scope based cleanup. Useful where the fwnode_handle was obtained from a call such as fwnode_find_reference() as it will safely do nothing if IS_ERR() is true and will automatically release the reference on the variable leaving scope. Reviewed-by: Andy Shevchenko Acked-by: Greg Kroah-Hartman Reviewed-by: Sakari Ailus Link: https://lore.kernel.org/r/20240217164249.921878-3-jic23@kernel.org Signed-off-by: Jonathan Cameron --- include/linux/property.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/property.h b/include/linux/property.h index 93d992a92f59..1322f0cce77a 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -194,6 +195,8 @@ static inline void fwnode_handle_put(struct fwnode_handle *fwnode) fwnode_call_void_op(fwnode, put); } +DEFINE_FREE(fwnode_handle, struct fwnode_handle *, fwnode_handle_put(_T)) + int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index); int fwnode_irq_get_byname(const struct fwnode_handle *fwnode, const char *name); -- cgit From 365130fd47af6d4317aa16a407874b699ab8d8cb Mon Sep 17 00:00:00 2001 From: Jonathan Cameron Date: Sat, 17 Feb 2024 16:42:38 +0000 Subject: device property: Introduce device_for_each_child_node_scoped() Similar to recently propose for_each_child_of_node_scoped() this new version of the loop macro instantiates a new local struct fwnode_handle * that uses the __free(fwnode_handle) auto cleanup handling so that if a reference to a node is held on early exit from the loop the reference will be released. If the loop runs to completion, the child pointer will be NULL and no action will be taken. The reason this is useful is that it removes the need for fwnode_handle_put() on early loop exits. If there is a need to retain the reference, then return_ptr(child) or no_free_ptr(child) may be used to safely disable the auto cleanup. Acked-by: Greg Kroah-Hartman Reviewed-by: Sakari Ailus Link: https://lore.kernel.org/r/20240217164249.921878-5-jic23@kernel.org Signed-off-by: Jonathan Cameron --- include/linux/property.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/property.h b/include/linux/property.h index 1322f0cce77a..61fc20e5f81f 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -175,6 +175,11 @@ struct fwnode_handle *device_get_next_child_node(const struct device *dev, for (child = device_get_next_child_node(dev, NULL); child; \ child = device_get_next_child_node(dev, child)) +#define device_for_each_child_node_scoped(dev, child) \ + for (struct fwnode_handle *child __free(fwnode_handle) = \ + device_get_next_child_node(dev, NULL); \ + child; child = device_get_next_child_node(dev, child)) + struct fwnode_handle *fwnode_get_named_child_node(const struct fwnode_handle *fwnode, const char *childname); struct fwnode_handle *device_get_named_child_node(const struct device *dev, -- cgit From abeded46bc1922e2249f6665654b5d81e7c5e1f1 Mon Sep 17 00:00:00 2001 From: Igor Pylypiv Date: Thu, 7 Mar 2024 13:44:12 -0800 Subject: scsi: ata: libata-sata: Factor out NCQ Priority configuration helpers Export libata NCQ Priority configuration helpers to be reused for libsas managed SATA devices. Switched locking from spin_lock_irq() to spin_lock_irqsave(). In the future someone might call these helper functions when interrupts are disabled. spin_unlock_irq() could lead to a premature re-enabling of interrupts, whereas spin_unlock_irqrestore() restores the interrupt state to its condition prior to the spin_lock_irqsave() call. Acked-by: Damien Le Moal Reviewed-by: Jason Yan Reviewed-by: Hannes Reinecke Signed-off-by: Igor Pylypiv Link: https://lore.kernel.org/r/20240307214418.3812290-2-ipylypiv@google.com Reviewed-by: Niklas Cassel Signed-off-by: Martin K. Petersen --- drivers/ata/libata-sata.c | 160 +++++++++++++++++++++++++++++++++++----------- include/linux/libata.h | 6 ++ 2 files changed, 128 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index 0fb1934875f2..a8d773003d74 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -848,80 +848,143 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, ata_scsi_lpm_show, ata_scsi_lpm_store); EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); -static ssize_t ata_ncq_prio_supported_show(struct device *device, - struct device_attribute *attr, - char *buf) +/** + * ata_ncq_prio_supported - Check if device supports NCQ Priority + * @ap: ATA port of the target device + * @sdev: SCSI device + * @supported: Address of a boolean to store the result + * + * Helper to check if device supports NCQ Priority feature. + * + * Context: Any context. Takes and releases @ap->lock. + * + * Return: + * * %0 - OK. Status is stored into @supported + * * %-ENODEV - Failed to find the ATA device + */ +int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev, + bool *supported) { - struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; - bool ncq_prio_supported; + unsigned long flags; int rc = 0; - spin_lock_irq(ap->lock); + spin_lock_irqsave(ap->lock, flags); dev = ata_scsi_find_dev(ap, sdev); if (!dev) rc = -ENODEV; else - ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO; - spin_unlock_irq(ap->lock); + *supported = dev->flags & ATA_DFLAG_NCQ_PRIO; + spin_unlock_irqrestore(ap->lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(ata_ncq_prio_supported); + +static ssize_t ata_ncq_prio_supported_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap = ata_shost_to_port(sdev->host); + bool supported; + int rc; + + rc = ata_ncq_prio_supported(ap, sdev, &supported); + if (rc) + return rc; - return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported); + return sysfs_emit(buf, "%d\n", supported); } DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL); EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported); -static ssize_t ata_ncq_prio_enable_show(struct device *device, - struct device_attribute *attr, - char *buf) +/** + * ata_ncq_prio_enabled - Check if NCQ Priority is enabled + * @ap: ATA port of the target device + * @sdev: SCSI device + * @enabled: Address of a boolean to store the result + * + * Helper to check if NCQ Priority feature is enabled. + * + * Context: Any context. Takes and releases @ap->lock. + * + * Return: + * * %0 - OK. Status is stored into @enabled + * * %-ENODEV - Failed to find the ATA device + */ +int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev, + bool *enabled) { - struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; - bool ncq_prio_enable; + unsigned long flags; int rc = 0; - spin_lock_irq(ap->lock); + spin_lock_irqsave(ap->lock, flags); dev = ata_scsi_find_dev(ap, sdev); if (!dev) rc = -ENODEV; else - ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED; - spin_unlock_irq(ap->lock); + *enabled = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED; + spin_unlock_irqrestore(ap->lock, flags); - return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable); + return rc; } +EXPORT_SYMBOL_GPL(ata_ncq_prio_enabled); -static ssize_t ata_ncq_prio_enable_store(struct device *device, - struct device_attribute *attr, - const char *buf, size_t len) +static ssize_t ata_ncq_prio_enable_show(struct device *device, + struct device_attribute *attr, + char *buf) { struct scsi_device *sdev = to_scsi_device(device); - struct ata_port *ap; - struct ata_device *dev; - long int input; - int rc = 0; + struct ata_port *ap = ata_shost_to_port(sdev->host); + bool enabled; + int rc; - rc = kstrtol(buf, 10, &input); + rc = ata_ncq_prio_enabled(ap, sdev, &enabled); if (rc) return rc; - if ((input < 0) || (input > 1)) - return -EINVAL; - ap = ata_shost_to_port(sdev->host); - dev = ata_scsi_find_dev(ap, sdev); - if (unlikely(!dev)) - return -ENODEV; + return sysfs_emit(buf, "%d\n", enabled); +} + +/** + * ata_ncq_prio_enable - Enable/disable NCQ Priority + * @ap: ATA port of the target device + * @sdev: SCSI device + * @enable: true - enable NCQ Priority, false - disable NCQ Priority + * + * Helper to enable/disable NCQ Priority feature. + * + * Context: Any context. Takes and releases @ap->lock. + * + * Return: + * * %0 - OK. Status is stored into @enabled + * * %-ENODEV - Failed to find the ATA device + * * %-EINVAL - NCQ Priority is not supported or CDL is enabled + */ +int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev, + bool enable) +{ + struct ata_device *dev; + unsigned long flags; + int rc = 0; + + spin_lock_irqsave(ap->lock, flags); - spin_lock_irq(ap->lock); + dev = ata_scsi_find_dev(ap, sdev); + if (!dev) { + rc = -ENODEV; + goto unlock; + } if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) { rc = -EINVAL; goto unlock; } - if (input) { + if (enable) { if (dev->flags & ATA_DFLAG_CDL_ENABLED) { ata_dev_err(dev, "CDL must be disabled to enable NCQ priority\n"); @@ -934,9 +997,30 @@ static ssize_t ata_ncq_prio_enable_store(struct device *device, } unlock: - spin_unlock_irq(ap->lock); + spin_unlock_irqrestore(ap->lock, flags); + + return rc; +} +EXPORT_SYMBOL_GPL(ata_ncq_prio_enable); + +static ssize_t ata_ncq_prio_enable_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct scsi_device *sdev = to_scsi_device(device); + struct ata_port *ap = ata_shost_to_port(sdev->host); + bool enable; + int rc; + + rc = kstrtobool(buf, &enable); + if (rc) + return rc; + + rc = ata_ncq_prio_enable(ap, sdev, enable); + if (rc) + return rc; - return rc ? rc : len; + return len; } DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, diff --git a/include/linux/libata.h b/include/linux/libata.h index 26d68115afb8..6dd9a4f9ca7c 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1157,6 +1157,12 @@ extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth); extern int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, int queue_depth); +extern int ata_ncq_prio_supported(struct ata_port *ap, struct scsi_device *sdev, + bool *supported); +extern int ata_ncq_prio_enabled(struct ata_port *ap, struct scsi_device *sdev, + bool *enabled); +extern int ata_ncq_prio_enable(struct ata_port *ap, struct scsi_device *sdev, + bool enable); extern struct ata_device *ata_dev_pair(struct ata_device *adev); extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev); extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap); -- cgit From 7b0c9f8fa3d25622c21eeaa2c095c8a36b8b08b7 Mon Sep 17 00:00:00 2001 From: Dumitru Ceclan Date: Wed, 28 Feb 2024 13:06:19 +0200 Subject: iio: adc: ad_sigma_delta: Add optional irq selection Add optional irq_num attribute to ad_sigma_delta_info structure for selecting the used interrupt line for ADC's conversion completion. Signed-off-by: Dumitru Ceclan Reviewed-by: Nuno Sa Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240228110622.25114-2-mitrutzceclan@gmail.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/ad_sigma_delta.c | 23 ++++++++++++++--------- include/linux/iio/adc/ad_sigma_delta.h | 3 +++ 2 files changed, 17 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c index a602429cdde4..97a05f325df7 100644 --- a/drivers/iio/adc/ad_sigma_delta.c +++ b/drivers/iio/adc/ad_sigma_delta.c @@ -222,11 +222,11 @@ int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta, goto out; sigma_delta->irq_dis = false; - enable_irq(sigma_delta->spi->irq); + enable_irq(sigma_delta->irq_line); timeout = wait_for_completion_timeout(&sigma_delta->completion, 2 * HZ); if (timeout == 0) { sigma_delta->irq_dis = true; - disable_irq_nosync(sigma_delta->spi->irq); + disable_irq_nosync(sigma_delta->irq_line); ret = -EIO; } else { ret = 0; @@ -295,7 +295,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE); sigma_delta->irq_dis = false; - enable_irq(sigma_delta->spi->irq); + enable_irq(sigma_delta->irq_line); ret = wait_for_completion_interruptible_timeout( &sigma_delta->completion, HZ); @@ -315,7 +315,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev, out: if (!sigma_delta->irq_dis) { - disable_irq_nosync(sigma_delta->spi->irq); + disable_irq_nosync(sigma_delta->irq_line); sigma_delta->irq_dis = true; } @@ -396,7 +396,7 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev) goto err_unlock; sigma_delta->irq_dis = false; - enable_irq(sigma_delta->spi->irq); + enable_irq(sigma_delta->irq_line); return 0; @@ -414,7 +414,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev) wait_for_completion_timeout(&sigma_delta->completion, HZ); if (!sigma_delta->irq_dis) { - disable_irq_nosync(sigma_delta->spi->irq); + disable_irq_nosync(sigma_delta->irq_line); sigma_delta->irq_dis = true; } @@ -516,7 +516,7 @@ static irqreturn_t ad_sd_trigger_handler(int irq, void *p) irq_handled: iio_trigger_notify_done(indio_dev->trig); sigma_delta->irq_dis = false; - enable_irq(sigma_delta->spi->irq); + enable_irq(sigma_delta->irq_line); return IRQ_HANDLED; } @@ -587,13 +587,13 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de sigma_delta->irq_dis = true; /* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */ - irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY); + irq_set_status_flags(sigma_delta->irq_line, IRQ_DISABLE_UNLAZY); /* Allow overwriting the flags from firmware */ if (!irq_flags) irq_flags = sigma_delta->info->irq_flags; - ret = devm_request_irq(dev, sigma_delta->spi->irq, + ret = devm_request_irq(dev, sigma_delta->irq_line, ad_sd_data_rdy_trig_poll, irq_flags | IRQF_NO_AUTOEN, indio_dev->name, @@ -673,6 +673,11 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev, } } + if (info->irq_line) + sigma_delta->irq_line = info->irq_line; + else + sigma_delta->irq_line = spi->irq; + iio_device_set_drvdata(indio_dev, sigma_delta); return 0; diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h index 719cf9cc6e1a..383614ebd760 100644 --- a/include/linux/iio/adc/ad_sigma_delta.h +++ b/include/linux/iio/adc/ad_sigma_delta.h @@ -48,6 +48,7 @@ struct iio_dev; * be used. * @irq_flags: flags for the interrupt used by the triggered buffer * @num_slots: Number of sequencer slots + * @irq_line: IRQ for reading conversions. If 0, spi->irq will be used */ struct ad_sigma_delta_info { int (*set_channel)(struct ad_sigma_delta *, unsigned int channel); @@ -62,6 +63,7 @@ struct ad_sigma_delta_info { unsigned int data_reg; unsigned long irq_flags; unsigned int num_slots; + int irq_line; }; /** @@ -89,6 +91,7 @@ struct ad_sigma_delta { unsigned int active_slots; unsigned int current_slot; unsigned int num_slots; + int irq_line; bool status_appended; /* map slots to channels in order to know what to expect from devices */ unsigned int *slots; -- cgit From 9b163e0d330debbf7dcc14b2c3e2dc19a3b50a1d Mon Sep 17 00:00:00 2001 From: David Lechner Date: Mon, 25 Mar 2024 14:22:53 -0500 Subject: spi: remove struct spi_message::is_dma_mapped There are no more users of the deprecated is_dma_mapped in struct spi_message so it can be removed. References in documentation and comments are also removed. A few similar checks if xfer->tx_dma or xfer->rx_dma are not NULL are also removed since these are now guaranteed to be NULL because they were previously set only if is_dma_mapped was true. Signed-off-by: David Lechner Link: https://msgid.link/r/20240325-spi-remove-is_dma_mapped-v2-1-d08d62b61f1c@baylibre.com Signed-off-by: Mark Brown --- Documentation/spi/pxa2xx.rst | 3 --- Documentation/spi/spi-summary.rst | 4 ---- drivers/spi/spi-atmel.c | 8 ++------ drivers/spi/spi-pxa2xx.c | 11 ----------- drivers/spi/spi.c | 7 ------- include/linux/spi/spi.h | 11 +++-------- 6 files changed, 5 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst index 19479b801826..43e0b758803a 100644 --- a/Documentation/spi/pxa2xx.rst +++ b/Documentation/spi/pxa2xx.rst @@ -194,9 +194,6 @@ The following logic is used to determine the type of I/O to be used on a per "spi_transfer" basis:: if spi_message.len > 65536 then - if spi_message.is_dma_mapped or rx_dma_buf != 0 or tx_dma_buf != 0 then - reject premapped transfers - print "rate limited" warning use PIO transfers diff --git a/Documentation/spi/spi-summary.rst b/Documentation/spi/spi-summary.rst index 546de37d6caf..f7f8b1573f25 100644 --- a/Documentation/spi/spi-summary.rst +++ b/Documentation/spi/spi-summary.rst @@ -419,10 +419,6 @@ any more such messages. to make extra copies unless the hardware requires it (e.g. working around hardware errata that force the use of bounce buffering). - If standard dma_map_single() handling of these buffers is inappropriate, - you can use spi_message.is_dma_mapped to tell the controller driver - that you've already provided the relevant DMA addresses. - - The basic I/O primitive is spi_async(). Async requests may be issued in any context (irq handler, task, etc) and completion is reported using a callback provided with the message. diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index bad34998454a..b62f57390d8f 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -987,8 +987,6 @@ static void atmel_spi_pdc_next_xfer(struct spi_controller *host, * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: * - The buffer is either valid for CPU access, else NULL * - If the buffer is valid, so is its DMA address - * - * This driver manages the dma address unless message->is_dma_mapped. */ static int atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer) @@ -1374,8 +1372,7 @@ static int atmel_spi_one_transfer(struct spi_controller *host, * DMA map early, for performance (empties dcache ASAP) and * better fault reporting. */ - if ((!host->cur_msg->is_dma_mapped) - && as->use_pdc) { + if (as->use_pdc) { if (atmel_spi_dma_map_xfer(as, xfer) < 0) return -ENOMEM; } @@ -1454,8 +1451,7 @@ static int atmel_spi_one_transfer(struct spi_controller *host, } } - if (!host->cur_msg->is_dma_mapped - && as->use_pdc) + if (as->use_pdc) atmel_spi_dma_unmap_xfer(host, xfer); if (as->use_pdc) diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index f2a856f6a99e..6c2a14418972 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -944,7 +944,6 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, struct spi_transfer *transfer) { struct driver_data *drv_data = spi_controller_get_devdata(controller); - struct spi_message *message = controller->cur_msg; struct chip_data *chip = spi_get_ctldata(spi); u32 dma_thresh = chip->dma_threshold; u32 dma_burst = chip->dma_burst_size; @@ -959,16 +958,6 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, /* Check if we can DMA this transfer */ if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { - - /* Reject already-mapped transfers; PIO won't always work */ - if (message->is_dma_mapped - || transfer->rx_dma || transfer->tx_dma) { - dev_err(&spi->dev, - "Mapped transfer length of %u is greater than %d\n", - transfer->len, MAX_DMA_LEN); - return -EINVAL; - } - /* Warn ... we force this to PIO mode */ dev_warn_ratelimited(&spi->dev, "DMA disabled for transfer length %u greater than %d\n", diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index ff75838c1b5d..a2f01116ba09 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -3709,9 +3709,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, * to the same values as *xferp, so tx_buf, rx_buf and len * are all identical (as well as most others) * so we just have to fix up len and the pointers. - * - * This also includes support for the depreciated - * spi_message.is_dma_mapped interface. */ /* @@ -3725,12 +3722,8 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, /* Update rx_buf, tx_buf and DMA */ if (xfers[i].rx_buf) xfers[i].rx_buf += offset; - if (xfers[i].rx_dma) - xfers[i].rx_dma += offset; if (xfers[i].tx_buf) xfers[i].tx_buf += offset; - if (xfers[i].tx_dma) - xfers[i].tx_dma += offset; /* Update length */ xfers[i].len = min(maxsize, xfers[i].len - offset); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c459809efee4..b589e2547439 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -955,8 +955,8 @@ struct spi_res { * struct spi_transfer - a read/write buffer pair * @tx_buf: data to be written (DMA-safe memory), or NULL * @rx_buf: data to be read (DMA-safe memory), or NULL - * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped - * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped + * @tx_dma: DMA address of tx_buf, currently not for client use + * @rx_dma: DMA address of rx_buf, currently not for client use * @tx_nbits: number of bits used for writing. If 0 the default * (SPI_NBITS_SINGLE) is used. * @rx_nbits: number of bits used for reading. If 0 the default @@ -1066,8 +1066,7 @@ struct spi_transfer { /* * It's okay if tx_buf == rx_buf (right?). * For MicroWire, one buffer must be NULL. - * Buffers must work with dma_*map_single() calls, unless - * spi_message.is_dma_mapped reports a pre-existing mapping. + * Buffers must work with dma_*map_single() calls. */ const void *tx_buf; void *rx_buf; @@ -1111,8 +1110,6 @@ struct spi_transfer { * struct spi_message - one multi-segment SPI transaction * @transfers: list of transfer segments in this transaction * @spi: SPI device to which the transaction is queued - * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual - * addresses for each transfer buffer * @pre_optimized: peripheral driver pre-optimized the message * @optimized: the message is in the optimized state * @prepared: spi_prepare_message was called for the this message @@ -1146,8 +1143,6 @@ struct spi_message { struct spi_device *spi; - unsigned is_dma_mapped:1; - /* spi_optimize_message() was called for this message */ bool pre_optimized; /* __spi_optimize_message() was called for this message */ -- cgit From 2a82bb02941fb53d1f8df2a360e7798ae3d9d962 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 7 Mar 2024 21:29:12 -0500 Subject: statx: stx_subvol Add a new statx field for (sub)volume identifiers, as implemented by btrfs and bcachefs. This includes bcachefs support; we'll definitely want btrfs support as well. Link: https://lore.kernel.org/linux-fsdevel/2uvhm6gweyl7iyyp2xpfryvcu2g3padagaeqcbiavjyiis6prl@yjm725bizncq/ Signed-off-by: Kent Overstreet Cc: Josef Bacik Cc: Miklos Szeredi Cc: Christian Brauner Cc: David Howells Signed-off-by: Kent Overstreet Link: https://lore.kernel.org/r/20240308022914.196982-1-kent.overstreet@linux.dev Signed-off-by: Johannes Thumshirn Signed-off-by: Christian Brauner --- fs/bcachefs/fs.c | 3 +++ fs/btrfs/inode.c | 3 +++ fs/stat.c | 1 + include/linux/stat.h | 1 + include/uapi/linux/stat.h | 4 +++- 5 files changed, 11 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c index 0ccee05f6887..99a0abeadbe2 100644 --- a/fs/bcachefs/fs.c +++ b/fs/bcachefs/fs.c @@ -841,6 +841,9 @@ static int bch2_getattr(struct mnt_idmap *idmap, stat->blksize = block_bytes(c); stat->blocks = inode->v.i_blocks; + stat->subvol = inode->ei_subvol; + stat->result_mask |= STATX_SUBVOL; + if (request_mask & STATX_BTIME) { stat->result_mask |= STATX_BTIME; stat->btime = bch2_time_to_timespec(c, inode->ei_inode.bi_otime); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 37701531eeb1..8cf692c708d7 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -8779,6 +8779,9 @@ static int btrfs_getattr(struct mnt_idmap *idmap, generic_fillattr(idmap, request_mask, inode, stat); stat->dev = BTRFS_I(inode)->root->anon_dev; + stat->subvol = BTRFS_I(inode)->root->root_key.objectid; + stat->result_mask |= STATX_SUBVOL; + spin_lock(&BTRFS_I(inode)->lock); delalloc_bytes = BTRFS_I(inode)->new_delalloc_bytes; inode_bytes = inode_get_bytes(inode); diff --git a/fs/stat.c b/fs/stat.c index 77cdc69eb422..70bd3e888cfa 100644 --- a/fs/stat.c +++ b/fs/stat.c @@ -658,6 +658,7 @@ cp_statx(const struct kstat *stat, struct statx __user *buffer) tmp.stx_mnt_id = stat->mnt_id; tmp.stx_dio_mem_align = stat->dio_mem_align; tmp.stx_dio_offset_align = stat->dio_offset_align; + tmp.stx_subvol = stat->subvol; return copy_to_user(buffer, &tmp, sizeof(tmp)) ? -EFAULT : 0; } diff --git a/include/linux/stat.h b/include/linux/stat.h index 52150570d37a..bf92441dbad2 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -53,6 +53,7 @@ struct kstat { u32 dio_mem_align; u32 dio_offset_align; u64 change_cookie; + u64 subvol; }; /* These definitions are internal to the kernel for now. Mainly used by nfsd. */ diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h index 2f2ee82d5517..67626d535316 100644 --- a/include/uapi/linux/stat.h +++ b/include/uapi/linux/stat.h @@ -126,8 +126,9 @@ struct statx { __u64 stx_mnt_id; __u32 stx_dio_mem_align; /* Memory buffer alignment for direct I/O */ __u32 stx_dio_offset_align; /* File offset alignment for direct I/O */ + __u64 stx_subvol; /* Subvolume identifier */ /* 0xa0 */ - __u64 __spare3[12]; /* Spare space for future expansion */ + __u64 __spare3[11]; /* Spare space for future expansion */ /* 0x100 */ }; @@ -155,6 +156,7 @@ struct statx { #define STATX_MNT_ID 0x00001000U /* Got stx_mnt_id */ #define STATX_DIOALIGN 0x00002000U /* Want/got direct I/O alignment info */ #define STATX_MNT_ID_UNIQUE 0x00004000U /* Want/got extended stx_mount_id */ +#define STATX_SUBVOL 0x00008000U /* Want/got stx_subvol */ #define STATX__RESERVED 0x80000000U /* Reserved for future struct statx expansion */ -- cgit From 7b30851a70645400ba37a184b9a2f10e1d6987a6 Mon Sep 17 00:00:00 2001 From: "Luis Henriques (SUSE)" Date: Tue, 12 Mar 2024 10:47:57 +0000 Subject: fs_parser: move fsparam_string_empty() helper into header Since both ext4 and overlayfs define the same macro to specify string parameters that may allow empty values, define it in an header file so that this helper can be shared. Signed-off-by: Luis Henriques (SUSE) Link: https://lore.kernel.org/r/20240312104757.27333-1-luis.henriques@linux.dev Signed-off-by: Christian Brauner --- fs/ext4/super.c | 4 ---- fs/overlayfs/params.c | 4 ---- include/linux/fs_parser.h | 4 ++++ 3 files changed, 4 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/ext4/super.c b/fs/ext4/super.c index cfb8449c731f..2e906dbbe015 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1723,10 +1723,6 @@ static const struct constant_table ext4_param_dax[] = { {} }; -/* String parameter that allows empty argument */ -#define fsparam_string_empty(NAME, OPT) \ - __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) - /* * Mount option specification * We don't use fsparam_flag_no because of the way we set the diff --git a/fs/overlayfs/params.c b/fs/overlayfs/params.c index 36dcc530ac28..4860fcc4611b 100644 --- a/fs/overlayfs/params.c +++ b/fs/overlayfs/params.c @@ -139,10 +139,6 @@ static int ovl_verity_mode_def(void) return OVL_VERITY_OFF; } -#define fsparam_string_empty(NAME, OPT) \ - __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) - - const struct fs_parameter_spec ovl_parameter_spec[] = { fsparam_string_empty("lowerdir", Opt_lowerdir), fsparam_string("lowerdir+", Opt_lowerdir_add), diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 01542c4b87a2..d3350979115f 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -132,4 +132,8 @@ static inline bool fs_validate_description(const char *name, #define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL) #define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL) +/* String parameter that allows empty argument */ +#define fsparam_string_empty(NAME, OPT) \ + __fsparam(fs_param_is_string, NAME, OPT, fs_param_can_be_empty, NULL) + #endif /* _LINUX_FS_PARSER_H */ -- cgit From 80d2eefcb4c84aa9018b2a997ab3a4c567bc821a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 25 Mar 2024 08:40:30 +0100 Subject: net: Use backlog-NAPI to clean up the defer_list. The defer_list is a per-CPU list which is used to free skbs outside of the socket lock and on the CPU on which they have been allocated. The list is processed during NAPI callbacks so ideally the list is cleaned up. Should the amount of skbs on the list exceed a certain water mark then the softirq is triggered remotely on the target CPU by invoking a remote function call. The raise of the softirqs via a remote function call leads to waking the ksoftirqd on PREEMPT_RT which is undesired. The backlog-NAPI threads already provide the infrastructure which can be utilized to perform the cleanup of the defer_list. The NAPI state is updated with the input_pkt_queue.lock acquired. It order not to break the state, it is needed to also wake the backlog-NAPI thread with the lock held. This requires to acquire the use the lock in rps_lock_irq*() if the backlog-NAPI threads are used even with RPS disabled. Move the logic of remotely starting softirqs to clean up the defer_list into kick_defer_list_purge(). Make sure a lock is held in rps_lock_irq*() if backlog-NAPI threads are used. Schedule backlog-NAPI for defer_list cleanup if backlog-NAPI is available. Acked-by: Jakub Kicinski Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Paolo Abeni --- include/linux/netdevice.h | 1 + net/core/dev.c | 25 +++++++++++++++++++++---- net/core/skbuff.c | 4 ++-- 3 files changed, 24 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cb37817d6382..e41d30ebaca6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3287,6 +3287,7 @@ static inline void dev_xmit_recursion_dec(void) __this_cpu_dec(softnet_data.xmit.recursion); } +void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); diff --git a/net/core/dev.c b/net/core/dev.c index ea574df49640..85e11dabab48 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -226,7 +226,7 @@ static bool use_backlog_threads(void) static inline void rps_lock_irqsave(struct softnet_data *sd, unsigned long *flags) { - if (IS_ENABLED(CONFIG_RPS)) + if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_save(*flags); @@ -234,7 +234,7 @@ static inline void rps_lock_irqsave(struct softnet_data *sd, static inline void rps_lock_irq_disable(struct softnet_data *sd) { - if (IS_ENABLED(CONFIG_RPS)) + if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_lock_irq(&sd->input_pkt_queue.lock); else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_disable(); @@ -243,7 +243,7 @@ static inline void rps_lock_irq_disable(struct softnet_data *sd) static inline void rps_unlock_irq_restore(struct softnet_data *sd, unsigned long *flags) { - if (IS_ENABLED(CONFIG_RPS)) + if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_restore(*flags); @@ -251,7 +251,7 @@ static inline void rps_unlock_irq_restore(struct softnet_data *sd, static inline void rps_unlock_irq_enable(struct softnet_data *sd) { - if (IS_ENABLED(CONFIG_RPS)) + if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_unlock_irq(&sd->input_pkt_queue.lock); else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) local_irq_enable(); @@ -4722,6 +4722,23 @@ static void napi_schedule_rps(struct softnet_data *sd) __napi_schedule_irqoff(&mysd->backlog); } +void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu) +{ + unsigned long flags; + + if (use_backlog_threads()) { + rps_lock_irqsave(sd, &flags); + + if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) + __napi_schedule_irqoff(&sd->backlog); + + rps_unlock_irq_restore(sd, &flags); + + } else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) { + smp_call_function_single_async(cpu, &sd->defer_csd); + } +} + #ifdef CONFIG_NET_FLOW_LIMIT int netdev_flow_limit_table_len __read_mostly = (1 << 12); #endif diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b99127712e67..17617c29be2d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -7039,8 +7039,8 @@ nodefer: __kfree_skb(skb); /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU * if we are unlucky enough (this seems very unlikely). */ - if (unlikely(kick) && !cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) - smp_call_function_single_async(cpu, &sd->defer_csd); + if (unlikely(kick)) + kick_defer_list_purge(sd, cpu); } static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, -- cgit From 264a2c52062802ea6052011015cb3d919dc14d9f Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Mon, 25 Mar 2024 20:46:19 +0000 Subject: firmware: arm_scmi: Simplify scmi_devm_notifier_unregister Unregistering SCMI notifications using the managed devres interface can be done providing as a reference simply the previously successfully registered notification block since it could have been registered only on one kernel notification_chain: drop any reference to SCMI protocol, events and sources. Devres internal helpers can search for the provided notification block reference and, once found, the associated devres object will already provide the above SCMI references for the event. Signed-off-by: Cristian Marussi Link: https://lore.kernel.org/r/20240325204620.1437237-5-cristian.marussi@arm.com Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/notify.c | 30 ++++-------------------------- include/linux/scmi_protocol.h | 2 -- 2 files changed, 4 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index 27c52531194d..e160ecb22948 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -1513,17 +1513,12 @@ static int scmi_devm_notifier_register(struct scmi_device *sdev, static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) { struct scmi_notifier_devres *dres = res; - struct scmi_notifier_devres *xres = data; + struct notifier_block *nb = data; - if (WARN_ON(!dres || !xres)) + if (WARN_ON(!dres || !nb)) return 0; - return dres->proto_id == xres->proto_id && - dres->evt_id == xres->evt_id && - dres->nb == xres->nb && - ((!dres->src_id && !xres->src_id) || - (dres->src_id && xres->src_id && - dres->__src_id == xres->__src_id)); + return dres->nb == nb; } /** @@ -1531,10 +1526,6 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * notifier_block for an event * @sdev: A reference to an scmi_device whose embedded struct device is to * be used for devres accounting. - * @proto_id: Protocol ID - * @evt_id: Event ID - * @src_id: Source ID, when NULL register for events coming form ALL possible - * sources * @nb: A standard notifier block to register for the specified event * * Generic devres managed helper to explicitly un-register a notifier_block @@ -1544,25 +1535,12 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * Return: 0 on Success */ static int scmi_devm_notifier_unregister(struct scmi_device *sdev, - u8 proto_id, u8 evt_id, - const u32 *src_id, struct notifier_block *nb) { int ret; - struct scmi_notifier_devres dres; - - dres.handle = sdev->handle; - dres.proto_id = proto_id; - dres.evt_id = evt_id; - if (src_id) { - dres.__src_id = *src_id; - dres.src_id = &dres.__src_id; - } else { - dres.src_id = NULL; - } ret = devres_release(&sdev->dev, scmi_devm_release_notifier, - scmi_devm_notifier_match, &dres); + scmi_devm_notifier_match, nb); WARN_ON(ret); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index b807141acc14..a3addb07e00a 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -783,8 +783,6 @@ struct scmi_notify_ops { const u32 *src_id, struct notifier_block *nb); int (*devm_event_notifier_unregister)(struct scmi_device *sdev, - u8 proto_id, u8 evt_id, - const u32 *src_id, struct notifier_block *nb); int (*event_notifier_register)(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, -- cgit From 6e06312035032924fc97f2050bfe85e63ca26514 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 25 Mar 2024 13:41:55 +0000 Subject: net: remove skb_free_datagram_locked() Last user of skb_free_datagram_locked() went away in 2016 with commit 850cbaddb52d ("udp: use it's own memory accounting schema"). Signed-off-by: Eric Dumazet Reviewed-by: Jason Xing Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20240325134155.620531-1-edumazet@google.com Signed-off-by: Paolo Abeni --- include/linux/skbuff.h | 6 ------ net/core/datagram.c | 19 ------------------- 2 files changed, 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0c7c67b3a87b..b945af8a6208 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -4063,12 +4063,6 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset, struct iov_iter *from, int len); int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm); void skb_free_datagram(struct sock *sk, struct sk_buff *skb); -void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len); -static inline void skb_free_datagram_locked(struct sock *sk, - struct sk_buff *skb) -{ - __skb_free_datagram_locked(sk, skb, 0); -} int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags); int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len); int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len); diff --git a/net/core/datagram.c b/net/core/datagram.c index a8b625abe242..e614cfd8e14a 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -324,25 +324,6 @@ void skb_free_datagram(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(skb_free_datagram); -void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len) -{ - bool slow; - - if (!skb_unref(skb)) { - sk_peek_offset_bwd(sk, len); - return; - } - - slow = lock_sock_fast(sk); - sk_peek_offset_bwd(sk, len); - skb_orphan(skb); - unlock_sock_fast(sk, slow); - - /* skb is now orphaned, can be freed outside of locked section */ - __kfree_skb(skb); -} -EXPORT_SYMBOL(__skb_free_datagram_locked); - int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue, struct sk_buff *skb, unsigned int flags, void (*destructor)(struct sock *sk, -- cgit From dad983d8812975b53db83f02ae6b0ad15f018a9e Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 26 Mar 2024 20:07:52 +0200 Subject: spi: pxa2xx: Keep PXA*_SSP types together Keep the PXA*_SSP types together in enum pxa_ssp_type for better maintenance. Signed-off-by: Andy Shevchenko Link: https://msgid.link/r/20240326181027.1418989-3-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- include/linux/pxa2xx_ssp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h index cd1973e6ac4b..844a2743ca94 100644 --- a/include/linux/pxa2xx_ssp.h +++ b/include/linux/pxa2xx_ssp.h @@ -217,9 +217,9 @@ enum pxa_ssp_type { PXA27x_SSP, PXA3xx_SSP, PXA168_SSP, - MMP2_SSP, PXA910_SSP, CE4100_SSP, + MMP2_SSP, MRFLD_SSP, QUARK_X1000_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ -- cgit From 31e7f6c015d9eb35e77ae9868801c53ab0ff19ac Mon Sep 17 00:00:00 2001 From: Javier Carrasco Date: Mon, 25 Mar 2024 10:15:12 +0100 Subject: usb: misc: onboard_hub: rename to onboard_dev This patch prepares onboad_hub to support non-hub devices by renaming the driver files and their content, the headers and their references. The comments and descriptions have been slightly modified to keep coherence and account for the specific cases that only affect onboard hubs (e.g. peer-hub). The "hub" variables in functions where "dev" (and similar names) variables already exist have been renamed to onboard_dev for clarity, which adds a few lines in cases where more than 80 characters are used. No new functionality has been added. Acked-by: Matthias Kaehlcke Signed-off-by: Javier Carrasco Link: https://lore.kernel.org/r/20240325-onboard_xvf3500-v8-2-29e3f9222922@wolfvision.net Signed-off-by: Greg Kroah-Hartman --- .../ABI/testing/sysfs-bus-platform-onboard-usb-dev | 9 + .../ABI/testing/sysfs-bus-platform-onboard-usb-hub | 8 - MAINTAINERS | 4 +- drivers/usb/core/Makefile | 4 +- drivers/usb/core/hub.c | 8 +- drivers/usb/core/hub.h | 2 +- drivers/usb/misc/Kconfig | 16 +- drivers/usb/misc/Makefile | 2 +- drivers/usb/misc/onboard_usb_dev.c | 521 +++++++++++++++++++++ drivers/usb/misc/onboard_usb_dev.h | 103 ++++ drivers/usb/misc/onboard_usb_dev_pdevs.c | 144 ++++++ drivers/usb/misc/onboard_usb_hub.c | 506 -------------------- drivers/usb/misc/onboard_usb_hub.h | 103 ---- drivers/usb/misc/onboard_usb_hub_pdevs.c | 143 ------ include/linux/usb/onboard_dev.h | 18 + include/linux/usb/onboard_hub.h | 18 - 16 files changed, 813 insertions(+), 796 deletions(-) create mode 100644 Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-dev delete mode 100644 Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-hub create mode 100644 drivers/usb/misc/onboard_usb_dev.c create mode 100644 drivers/usb/misc/onboard_usb_dev.h create mode 100644 drivers/usb/misc/onboard_usb_dev_pdevs.c delete mode 100644 drivers/usb/misc/onboard_usb_hub.c delete mode 100644 drivers/usb/misc/onboard_usb_hub.h delete mode 100644 drivers/usb/misc/onboard_usb_hub_pdevs.c create mode 100644 include/linux/usb/onboard_dev.h delete mode 100644 include/linux/usb/onboard_hub.h (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-dev b/Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-dev new file mode 100644 index 000000000000..b06a48c3c85a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-dev @@ -0,0 +1,9 @@ +What: /sys/bus/platform/devices//always_powered_in_suspend +Date: June 2022 +KernelVersion: 5.20 +Contact: Matthias Kaehlcke + linux-usb@vger.kernel.org +Description: + (RW) Controls whether the USB hub remains always powered + during system suspend or not. This attribute is not + available for non-hub devices. diff --git a/Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-hub b/Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-hub deleted file mode 100644 index 42deb0552065..000000000000 --- a/Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-hub +++ /dev/null @@ -1,8 +0,0 @@ -What: /sys/bus/platform/devices//always_powered_in_suspend -Date: June 2022 -KernelVersion: 5.20 -Contact: Matthias Kaehlcke - linux-usb@vger.kernel.org -Description: - (RW) Controls whether the USB hub remains always powered - during system suspend or not. \ No newline at end of file diff --git a/MAINTAINERS b/MAINTAINERS index aa3b947fb080..48cbcab391c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -16439,8 +16439,8 @@ ONBOARD USB HUB DRIVER M: Matthias Kaehlcke L: linux-usb@vger.kernel.org S: Maintained -F: Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-hub -F: drivers/usb/misc/onboard_usb_hub.c +F: Documentation/ABI/testing/sysfs-bus-platform-onboard-usb-dev +F: drivers/usb/misc/onboard_usb_dev.c ONENAND FLASH DRIVER M: Kyungmin Park diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile index 7d338e9c0657..ac006abd13b3 100644 --- a/drivers/usb/core/Makefile +++ b/drivers/usb/core/Makefile @@ -12,8 +12,8 @@ usbcore-$(CONFIG_OF) += of.o usbcore-$(CONFIG_USB_PCI) += hcd-pci.o usbcore-$(CONFIG_ACPI) += usb-acpi.o -ifdef CONFIG_USB_ONBOARD_HUB -usbcore-y += ../misc/onboard_usb_hub_pdevs.o +ifdef CONFIG_USB_ONBOARD_DEV +usbcore-y += ../misc/onboard_usb_dev_pdevs.o endif obj-$(CONFIG_USB) += usbcore.o diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 3ee8455585b6..1dcae5db85b2 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include @@ -1805,7 +1805,7 @@ static void hub_disconnect(struct usb_interface *intf) if (hub->quirk_disable_autosuspend) usb_autopm_put_interface(intf); - onboard_hub_destroy_pdevs(&hub->onboard_hub_devs); + onboard_dev_destroy_pdevs(&hub->onboard_devs); kref_put(&hub->kref, hub_release); } @@ -1924,7 +1924,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) INIT_DELAYED_WORK(&hub->leds, led_work); INIT_DELAYED_WORK(&hub->init_work, NULL); INIT_WORK(&hub->events, hub_event); - INIT_LIST_HEAD(&hub->onboard_hub_devs); + INIT_LIST_HEAD(&hub->onboard_devs); spin_lock_init(&hub->irq_urb_lock); timer_setup(&hub->irq_urb_retry, hub_retry_irq_urb, 0); usb_get_intf(intf); @@ -1954,7 +1954,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) } if (hub_configure(hub, &desc->endpoint[0].desc) >= 0) { - onboard_hub_create_pdevs(hdev, &hub->onboard_hub_devs); + onboard_dev_create_pdevs(hdev, &hub->onboard_devs); return 0; } diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index 43ce21c96a51..3820703b11d8 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -74,7 +74,7 @@ struct usb_hub { spinlock_t irq_urb_lock; struct timer_list irq_urb_retry; struct usb_port **ports; - struct list_head onboard_hub_devs; + struct list_head onboard_devs; }; /** diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig index c510af7baa0d..50b86d531701 100644 --- a/drivers/usb/misc/Kconfig +++ b/drivers/usb/misc/Kconfig @@ -316,18 +316,18 @@ config BRCM_USB_PINMAP signals, which are typically on dedicated pins on the chip, to any gpio. -config USB_ONBOARD_HUB - tristate "Onboard USB hub support" +config USB_ONBOARD_DEV + tristate "Onboard USB device support" depends on OF help - Say Y here if you want to support discrete onboard USB hubs that - don't require an additional control bus for initialization, but - need some non-trivial form of initialization, such as enabling a - power regulator. An example for such a hub is the Realtek - RTS5411. + Say Y here if you want to support discrete onboard USB devices + that don't require an additional control bus for initialization, + but need some non-trivial form of initialization, such as + enabling a power regulator. An example for such device is the + Realtek RTS5411 hub. This driver can be used as a module but its state (module vs builtin) must match the state of the USB subsystem. Enabling this config will enable the driver and it will automatically match the state of the USB subsystem. If this driver is a - module it will be called onboard_usb_hub. + module it will be called onboard_usb_dev. diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile index 0bc732bcb162..0cd5bc8f52fe 100644 --- a/drivers/usb/misc/Makefile +++ b/drivers/usb/misc/Makefile @@ -33,4 +33,4 @@ obj-$(CONFIG_USB_CHAOSKEY) += chaoskey.o obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/ obj-$(CONFIG_USB_LINK_LAYER_TEST) += lvstest.o obj-$(CONFIG_BRCM_USB_PINMAP) += brcmstb-usb-pinmap.o -obj-$(CONFIG_USB_ONBOARD_HUB) += onboard_usb_hub.o +obj-$(CONFIG_USB_ONBOARD_DEV) += onboard_usb_dev.o diff --git a/drivers/usb/misc/onboard_usb_dev.c b/drivers/usb/misc/onboard_usb_dev.c new file mode 100644 index 000000000000..6aee43896216 --- /dev/null +++ b/drivers/usb/misc/onboard_usb_dev.c @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for onboard USB devices + * + * Copyright (c) 2022, Google LLC + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "onboard_usb_dev.h" + +static void onboard_dev_attach_usb_driver(struct work_struct *work); + +static struct usb_device_driver onboard_dev_usbdev_driver; +static DECLARE_WORK(attach_usb_driver_work, onboard_dev_attach_usb_driver); + +/************************** Platform driver **************************/ + +struct usbdev_node { + struct usb_device *udev; + struct list_head list; +}; + +struct onboard_dev { + struct regulator_bulk_data supplies[MAX_SUPPLIES]; + struct device *dev; + const struct onboard_dev_pdata *pdata; + struct gpio_desc *reset_gpio; + bool always_powered_in_suspend; + bool is_powered_on; + bool going_away; + struct list_head udev_list; + struct mutex lock; + struct clk *clk; +}; + +static int onboard_dev_get_regulators(struct onboard_dev *onboard_dev) +{ + const char * const *supply_names = onboard_dev->pdata->supply_names; + unsigned int num_supplies = onboard_dev->pdata->num_supplies; + struct device *dev = onboard_dev->dev; + unsigned int i; + int err; + + if (num_supplies > MAX_SUPPLIES) + return dev_err_probe(dev, -EINVAL, "max %d supplies supported!\n", + MAX_SUPPLIES); + + for (i = 0; i < num_supplies; i++) + onboard_dev->supplies[i].supply = supply_names[i]; + + err = devm_regulator_bulk_get(dev, num_supplies, onboard_dev->supplies); + if (err) + dev_err(dev, "Failed to get regulator supplies: %pe\n", + ERR_PTR(err)); + + return err; +} + +static int onboard_dev_power_on(struct onboard_dev *onboard_dev) +{ + int err; + + err = clk_prepare_enable(onboard_dev->clk); + if (err) { + dev_err(onboard_dev->dev, "failed to enable clock: %pe\n", + ERR_PTR(err)); + return err; + } + + err = regulator_bulk_enable(onboard_dev->pdata->num_supplies, + onboard_dev->supplies); + if (err) { + dev_err(onboard_dev->dev, "failed to enable supplies: %pe\n", + ERR_PTR(err)); + return err; + } + + fsleep(onboard_dev->pdata->reset_us); + gpiod_set_value_cansleep(onboard_dev->reset_gpio, 0); + + onboard_dev->is_powered_on = true; + + return 0; +} + +static int onboard_dev_power_off(struct onboard_dev *onboard_dev) +{ + int err; + + gpiod_set_value_cansleep(onboard_dev->reset_gpio, 1); + + err = regulator_bulk_disable(onboard_dev->pdata->num_supplies, + onboard_dev->supplies); + if (err) { + dev_err(onboard_dev->dev, "failed to disable supplies: %pe\n", + ERR_PTR(err)); + return err; + } + + clk_disable_unprepare(onboard_dev->clk); + + onboard_dev->is_powered_on = false; + + return 0; +} + +static int __maybe_unused onboard_dev_suspend(struct device *dev) +{ + struct onboard_dev *onboard_dev = dev_get_drvdata(dev); + struct usbdev_node *node; + bool power_off = true; + + if (onboard_dev->always_powered_in_suspend) + return 0; + + mutex_lock(&onboard_dev->lock); + + list_for_each_entry(node, &onboard_dev->udev_list, list) { + if (!device_may_wakeup(node->udev->bus->controller)) + continue; + + if (usb_wakeup_enabled_descendants(node->udev)) { + power_off = false; + break; + } + } + + mutex_unlock(&onboard_dev->lock); + + if (!power_off) + return 0; + + return onboard_dev_power_off(onboard_dev); +} + +static int __maybe_unused onboard_dev_resume(struct device *dev) +{ + struct onboard_dev *onboard_dev = dev_get_drvdata(dev); + + if (onboard_dev->is_powered_on) + return 0; + + return onboard_dev_power_on(onboard_dev); +} + +static inline void get_udev_link_name(const struct usb_device *udev, char *buf, + size_t size) +{ + snprintf(buf, size, "usb_dev.%s", dev_name(&udev->dev)); +} + +static int onboard_dev_add_usbdev(struct onboard_dev *onboard_dev, + struct usb_device *udev) +{ + struct usbdev_node *node; + char link_name[64]; + int err; + + mutex_lock(&onboard_dev->lock); + + if (onboard_dev->going_away) { + err = -EINVAL; + goto error; + } + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + err = -ENOMEM; + goto error; + } + + node->udev = udev; + + list_add(&node->list, &onboard_dev->udev_list); + + mutex_unlock(&onboard_dev->lock); + + get_udev_link_name(udev, link_name, sizeof(link_name)); + WARN_ON(sysfs_create_link(&onboard_dev->dev->kobj, &udev->dev.kobj, + link_name)); + + return 0; + +error: + mutex_unlock(&onboard_dev->lock); + + return err; +} + +static void onboard_dev_remove_usbdev(struct onboard_dev *onboard_dev, + const struct usb_device *udev) +{ + struct usbdev_node *node; + char link_name[64]; + + get_udev_link_name(udev, link_name, sizeof(link_name)); + sysfs_remove_link(&onboard_dev->dev->kobj, link_name); + + mutex_lock(&onboard_dev->lock); + + list_for_each_entry(node, &onboard_dev->udev_list, list) { + if (node->udev == udev) { + list_del(&node->list); + kfree(node); + break; + } + } + + mutex_unlock(&onboard_dev->lock); +} + +static ssize_t always_powered_in_suspend_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + const struct onboard_dev *onboard_dev = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%d\n", onboard_dev->always_powered_in_suspend); +} + +static ssize_t always_powered_in_suspend_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct onboard_dev *onboard_dev = dev_get_drvdata(dev); + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret < 0) + return ret; + + onboard_dev->always_powered_in_suspend = val; + + return count; +} +static DEVICE_ATTR_RW(always_powered_in_suspend); + +static struct attribute *onboard_dev_attrs[] = { + &dev_attr_always_powered_in_suspend.attr, + NULL, +}; +ATTRIBUTE_GROUPS(onboard_dev); + +static void onboard_dev_attach_usb_driver(struct work_struct *work) +{ + int err; + + err = driver_attach(&onboard_dev_usbdev_driver.driver); + if (err) + pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err)); +} + +static int onboard_dev_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct onboard_dev *onboard_dev; + int err; + + onboard_dev = devm_kzalloc(dev, sizeof(*onboard_dev), GFP_KERNEL); + if (!onboard_dev) + return -ENOMEM; + + onboard_dev->pdata = device_get_match_data(dev); + if (!onboard_dev->pdata) + return -EINVAL; + + onboard_dev->dev = dev; + + err = onboard_dev_get_regulators(onboard_dev); + if (err) + return err; + + onboard_dev->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(onboard_dev->clk)) + return dev_err_probe(dev, PTR_ERR(onboard_dev->clk), + "failed to get clock\n"); + + onboard_dev->reset_gpio = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(onboard_dev->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(onboard_dev->reset_gpio), + "failed to get reset GPIO\n"); + + mutex_init(&onboard_dev->lock); + INIT_LIST_HEAD(&onboard_dev->udev_list); + + dev_set_drvdata(dev, onboard_dev); + + err = onboard_dev_power_on(onboard_dev); + if (err) + return err; + + /* + * The USB driver might have been detached from the USB devices by + * onboard_dev_remove() (e.g. through an 'unbind' by userspace), + * make sure to re-attach it if needed. + * + * This needs to be done deferred to avoid self-deadlocks on systems + * with nested onboard hubs. + */ + schedule_work(&attach_usb_driver_work); + + return 0; +} + +static void onboard_dev_remove(struct platform_device *pdev) +{ + struct onboard_dev *onboard_dev = dev_get_drvdata(&pdev->dev); + struct usbdev_node *node; + struct usb_device *udev; + + onboard_dev->going_away = true; + + mutex_lock(&onboard_dev->lock); + + /* unbind the USB devices to avoid dangling references to this device */ + while (!list_empty(&onboard_dev->udev_list)) { + node = list_first_entry(&onboard_dev->udev_list, + struct usbdev_node, list); + udev = node->udev; + + /* + * Unbinding the driver will call onboard_dev_remove_usbdev(), + * which acquires onboard_dev->lock. We must release the lock + * first. + */ + get_device(&udev->dev); + mutex_unlock(&onboard_dev->lock); + device_release_driver(&udev->dev); + put_device(&udev->dev); + mutex_lock(&onboard_dev->lock); + } + + mutex_unlock(&onboard_dev->lock); + + onboard_dev_power_off(onboard_dev); +} + +MODULE_DEVICE_TABLE(of, onboard_dev_match); + +static const struct dev_pm_ops __maybe_unused onboard_dev_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(onboard_dev_suspend, onboard_dev_resume) +}; + +static struct platform_driver onboard_dev_driver = { + .probe = onboard_dev_probe, + .remove_new = onboard_dev_remove, + + .driver = { + .name = "onboard-usb-dev", + .of_match_table = onboard_dev_match, + .pm = pm_ptr(&onboard_dev_pm_ops), + .dev_groups = onboard_dev_groups, + }, +}; + +/************************** USB driver **************************/ + +#define VENDOR_ID_CYPRESS 0x04b4 +#define VENDOR_ID_GENESYS 0x05e3 +#define VENDOR_ID_MICROCHIP 0x0424 +#define VENDOR_ID_REALTEK 0x0bda +#define VENDOR_ID_TI 0x0451 +#define VENDOR_ID_VIA 0x2109 + +/* + * Returns the onboard_dev platform device that is associated with the USB + * device passed as parameter. + */ +static struct onboard_dev *_find_onboard_dev(struct device *dev) +{ + struct platform_device *pdev; + struct device_node *np; + struct onboard_dev *onboard_dev; + + pdev = of_find_device_by_node(dev->of_node); + if (!pdev) { + np = of_parse_phandle(dev->of_node, "peer-hub", 0); + if (!np) { + dev_err(dev, "failed to find device node for peer hub\n"); + return ERR_PTR(-EINVAL); + } + + pdev = of_find_device_by_node(np); + of_node_put(np); + + if (!pdev) + return ERR_PTR(-ENODEV); + } + + onboard_dev = dev_get_drvdata(&pdev->dev); + put_device(&pdev->dev); + + /* + * The presence of drvdata indicates that the platform driver finished + * probing. This handles the case where (conceivably) we could be + * running at the exact same time as the platform driver's probe. If + * we detect the race we request probe deferral and we'll come back and + * try again. + */ + if (!onboard_dev) + return ERR_PTR(-EPROBE_DEFER); + + return onboard_dev; +} + +static int onboard_dev_usbdev_probe(struct usb_device *udev) +{ + struct device *dev = &udev->dev; + struct onboard_dev *onboard_dev; + int err; + + /* ignore supported devices without device tree node */ + if (!dev->of_node) + return -ENODEV; + + onboard_dev = _find_onboard_dev(dev); + if (IS_ERR(onboard_dev)) + return PTR_ERR(onboard_dev); + + dev_set_drvdata(dev, onboard_dev); + + err = onboard_dev_add_usbdev(onboard_dev, udev); + if (err) + return err; + + return 0; +} + +static void onboard_dev_usbdev_disconnect(struct usb_device *udev) +{ + struct onboard_dev *onboard_dev = dev_get_drvdata(&udev->dev); + + onboard_dev_remove_usbdev(onboard_dev, udev); +} + +static const struct usb_device_id onboard_dev_id_table[] = { + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 HUB */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 HUB */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 HUB */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 HUB */ + { USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 HUB */ + { USB_DEVICE(VENDOR_ID_TI, 0x8025) }, /* TI USB8020B 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_TI, 0x8027) }, /* TI USB8020B 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 HUB */ + { USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 HUB */ + { USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 HUB */ + { USB_DEVICE(VENDOR_ID_VIA, 0x2817) }, /* VIA VL817 2.0 HUB */ + {} +}; +MODULE_DEVICE_TABLE(usb, onboard_dev_id_table); + +static struct usb_device_driver onboard_dev_usbdev_driver = { + .name = "onboard-usb-dev", + .probe = onboard_dev_usbdev_probe, + .disconnect = onboard_dev_usbdev_disconnect, + .generic_subclass = 1, + .supports_autosuspend = 1, + .id_table = onboard_dev_id_table, +}; + +static int __init onboard_dev_init(void) +{ + int ret; + + ret = usb_register_device_driver(&onboard_dev_usbdev_driver, THIS_MODULE); + if (ret) + return ret; + + ret = platform_driver_register(&onboard_dev_driver); + if (ret) + usb_deregister_device_driver(&onboard_dev_usbdev_driver); + + return ret; +} +module_init(onboard_dev_init); + +static void __exit onboard_dev_exit(void) +{ + usb_deregister_device_driver(&onboard_dev_usbdev_driver); + platform_driver_unregister(&onboard_dev_driver); + + cancel_work_sync(&attach_usb_driver_work); +} +module_exit(onboard_dev_exit); + +MODULE_AUTHOR("Matthias Kaehlcke "); +MODULE_DESCRIPTION("Driver for discrete onboard USB devices"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/misc/onboard_usb_dev.h b/drivers/usb/misc/onboard_usb_dev.h new file mode 100644 index 000000000000..debab2895a53 --- /dev/null +++ b/drivers/usb/misc/onboard_usb_dev.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2022, Google LLC + */ + +#ifndef _USB_MISC_ONBOARD_USB_DEV_H +#define _USB_MISC_ONBOARD_USB_DEV_H + +#define MAX_SUPPLIES 2 + +struct onboard_dev_pdata { + unsigned long reset_us; /* reset pulse width in us */ + unsigned int num_supplies; /* number of supplies */ + const char * const supply_names[MAX_SUPPLIES]; +}; + +static const struct onboard_dev_pdata microchip_usb424_data = { + .reset_us = 1, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct onboard_dev_pdata microchip_usb5744_data = { + .reset_us = 0, + .num_supplies = 2, + .supply_names = { "vdd", "vdd2" }, +}; + +static const struct onboard_dev_pdata realtek_rts5411_data = { + .reset_us = 0, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct onboard_dev_pdata ti_tusb8020b_data = { + .reset_us = 3000, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct onboard_dev_pdata ti_tusb8041_data = { + .reset_us = 3000, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct onboard_dev_pdata cypress_hx3_data = { + .reset_us = 10000, + .num_supplies = 2, + .supply_names = { "vdd", "vdd2" }, +}; + +static const struct onboard_dev_pdata cypress_hx2vl_data = { + .reset_us = 1, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct onboard_dev_pdata genesys_gl850g_data = { + .reset_us = 3, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct onboard_dev_pdata genesys_gl852g_data = { + .reset_us = 50, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct onboard_dev_pdata vialab_vl817_data = { + .reset_us = 10, + .num_supplies = 1, + .supply_names = { "vdd" }, +}; + +static const struct of_device_id onboard_dev_match[] = { + { .compatible = "usb424,2412", .data = µchip_usb424_data, }, + { .compatible = "usb424,2514", .data = µchip_usb424_data, }, + { .compatible = "usb424,2517", .data = µchip_usb424_data, }, + { .compatible = "usb424,2744", .data = µchip_usb5744_data, }, + { .compatible = "usb424,5744", .data = µchip_usb5744_data, }, + { .compatible = "usb451,8025", .data = &ti_tusb8020b_data, }, + { .compatible = "usb451,8027", .data = &ti_tusb8020b_data, }, + { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, + { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, + { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, }, + { .compatible = "usb4b4,6506", .data = &cypress_hx3_data, }, + { .compatible = "usb4b4,6570", .data = &cypress_hx2vl_data, }, + { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, }, + { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, }, + { .compatible = "usb5e3,620", .data = &genesys_gl852g_data, }, + { .compatible = "usb5e3,626", .data = &genesys_gl852g_data, }, + { .compatible = "usbbda,411", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,414", .data = &realtek_rts5411_data, }, + { .compatible = "usbbda,5414", .data = &realtek_rts5411_data, }, + { .compatible = "usb2109,817", .data = &vialab_vl817_data, }, + { .compatible = "usb2109,2817", .data = &vialab_vl817_data, }, + {} +}; + +#endif /* _USB_MISC_ONBOARD_USB_DEV_H */ diff --git a/drivers/usb/misc/onboard_usb_dev_pdevs.c b/drivers/usb/misc/onboard_usb_dev_pdevs.c new file mode 100644 index 000000000000..722504752ce3 --- /dev/null +++ b/drivers/usb/misc/onboard_usb_dev_pdevs.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * API for creating and destroying USB onboard platform devices + * + * Copyright (c) 2022, Google LLC + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "onboard_usb_dev.h" + +struct pdev_list_entry { + struct platform_device *pdev; + struct list_head node; +}; + +static bool of_is_onboard_usb_dev(struct device_node *np) +{ + return !!of_match_node(onboard_dev_match, np); +} + +/** + * onboard_dev_create_pdevs -- create platform devices for onboard USB devices + * @parent_hub : parent hub to scan for connected onboard devices + * @pdev_list : list of onboard platform devices owned by the parent hub + * + * Creates a platform device for each supported onboard device that is connected + * to the given parent hub. The platform device is in charge of initializing the + * device (enable regulators, take the device out of reset, ...). For onboard + * hubs, it can optionally control whether the device remains powered during + * system suspend or not. + * + * To keep track of the platform devices they are added to a list that is owned + * by the parent hub. + * + * Some background about the logic in this function, which can be a bit hard + * to follow: + * + * Root hubs don't have dedicated device tree nodes, but use the node of their + * HCD. The primary and secondary HCD are usually represented by a single DT + * node. That means the root hubs of the primary and secondary HCD share the + * same device tree node (the HCD node). As a result this function can be called + * twice with the same DT node for root hubs. We only want to create a single + * platform device for each physical onboard device, hence for root hubs the + * loop is only executed for the root hub of the primary HCD. Since the function + * scans through all child nodes it still creates pdevs for onboard devices + * connected to the root hub of the secondary HCD if needed. + * + * Further there must be only one platform device for onboard hubs with a peer + * hub (the hub is a single physical device). To achieve this two measures are + * taken: pdevs for onboard hubs with a peer are only created when the function + * is called on behalf of the parent hub that is connected to the primary HCD + * (directly or through other hubs). For onboard hubs connected to root hubs + * the function processes the nodes of both peers. A platform device is only + * created if the peer hub doesn't have one already. + */ +void onboard_dev_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list) +{ + int i; + struct usb_hcd *hcd = bus_to_hcd(parent_hub->bus); + struct device_node *np, *npc; + struct platform_device *pdev; + struct pdev_list_entry *pdle; + + if (!parent_hub->dev.of_node) + return; + + if (!parent_hub->parent && !usb_hcd_is_primary_hcd(hcd)) + return; + + for (i = 1; i <= parent_hub->maxchild; i++) { + np = usb_of_get_device_node(parent_hub, i); + if (!np) + continue; + + if (!of_is_onboard_usb_dev(np)) + goto node_put; + + npc = of_parse_phandle(np, "peer-hub", 0); + if (npc) { + if (!usb_hcd_is_primary_hcd(hcd)) { + of_node_put(npc); + goto node_put; + } + + pdev = of_find_device_by_node(npc); + of_node_put(npc); + + if (pdev) { + put_device(&pdev->dev); + goto node_put; + } + } + + pdev = of_platform_device_create(np, NULL, &parent_hub->dev); + if (!pdev) { + dev_err(&parent_hub->dev, + "failed to create platform device for onboard dev '%pOF'\n", np); + goto node_put; + } + + pdle = kzalloc(sizeof(*pdle), GFP_KERNEL); + if (!pdle) { + of_platform_device_destroy(&pdev->dev, NULL); + goto node_put; + } + + pdle->pdev = pdev; + list_add(&pdle->node, pdev_list); + +node_put: + of_node_put(np); + } +} +EXPORT_SYMBOL_GPL(onboard_dev_create_pdevs); + +/** + * onboard_dev_destroy_pdevs -- free resources of onboard platform devices + * @pdev_list : list of onboard platform devices + * + * Destroys the platform devices in the given list and frees the memory associated + * with the list entry. + */ +void onboard_dev_destroy_pdevs(struct list_head *pdev_list) +{ + struct pdev_list_entry *pdle, *tmp; + + list_for_each_entry_safe(pdle, tmp, pdev_list, node) { + list_del(&pdle->node); + of_platform_device_destroy(&pdle->pdev->dev, NULL); + kfree(pdle); + } +} +EXPORT_SYMBOL_GPL(onboard_dev_destroy_pdevs); diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c deleted file mode 100644 index 5308c54aaabe..000000000000 --- a/drivers/usb/misc/onboard_usb_hub.c +++ /dev/null @@ -1,506 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Driver for onboard USB hubs - * - * Copyright (c) 2022, Google LLC - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "onboard_usb_hub.h" - -static void onboard_hub_attach_usb_driver(struct work_struct *work); - -static struct usb_device_driver onboard_hub_usbdev_driver; -static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver); - -/************************** Platform driver **************************/ - -struct usbdev_node { - struct usb_device *udev; - struct list_head list; -}; - -struct onboard_hub { - struct regulator_bulk_data supplies[MAX_SUPPLIES]; - struct device *dev; - const struct onboard_hub_pdata *pdata; - struct gpio_desc *reset_gpio; - bool always_powered_in_suspend; - bool is_powered_on; - bool going_away; - struct list_head udev_list; - struct mutex lock; - struct clk *clk; -}; - -static int onboard_hub_get_regulators(struct onboard_hub *hub) -{ - const char * const *supply_names = hub->pdata->supply_names; - unsigned int num_supplies = hub->pdata->num_supplies; - struct device *dev = hub->dev; - unsigned int i; - int err; - - if (num_supplies > MAX_SUPPLIES) - return dev_err_probe(dev, -EINVAL, "max %d supplies supported!\n", - MAX_SUPPLIES); - - for (i = 0; i < num_supplies; i++) - hub->supplies[i].supply = supply_names[i]; - - err = devm_regulator_bulk_get(dev, num_supplies, hub->supplies); - if (err) - dev_err(dev, "Failed to get regulator supplies: %pe\n", - ERR_PTR(err)); - - return err; -} - -static int onboard_hub_power_on(struct onboard_hub *hub) -{ - int err; - - err = clk_prepare_enable(hub->clk); - if (err) { - dev_err(hub->dev, "failed to enable clock: %pe\n", ERR_PTR(err)); - return err; - } - - err = regulator_bulk_enable(hub->pdata->num_supplies, hub->supplies); - if (err) { - dev_err(hub->dev, "failed to enable supplies: %pe\n", ERR_PTR(err)); - return err; - } - - fsleep(hub->pdata->reset_us); - gpiod_set_value_cansleep(hub->reset_gpio, 0); - - hub->is_powered_on = true; - - return 0; -} - -static int onboard_hub_power_off(struct onboard_hub *hub) -{ - int err; - - gpiod_set_value_cansleep(hub->reset_gpio, 1); - - err = regulator_bulk_disable(hub->pdata->num_supplies, hub->supplies); - if (err) { - dev_err(hub->dev, "failed to disable supplies: %pe\n", ERR_PTR(err)); - return err; - } - - clk_disable_unprepare(hub->clk); - - hub->is_powered_on = false; - - return 0; -} - -static int __maybe_unused onboard_hub_suspend(struct device *dev) -{ - struct onboard_hub *hub = dev_get_drvdata(dev); - struct usbdev_node *node; - bool power_off = true; - - if (hub->always_powered_in_suspend) - return 0; - - mutex_lock(&hub->lock); - - list_for_each_entry(node, &hub->udev_list, list) { - if (!device_may_wakeup(node->udev->bus->controller)) - continue; - - if (usb_wakeup_enabled_descendants(node->udev)) { - power_off = false; - break; - } - } - - mutex_unlock(&hub->lock); - - if (!power_off) - return 0; - - return onboard_hub_power_off(hub); -} - -static int __maybe_unused onboard_hub_resume(struct device *dev) -{ - struct onboard_hub *hub = dev_get_drvdata(dev); - - if (hub->is_powered_on) - return 0; - - return onboard_hub_power_on(hub); -} - -static inline void get_udev_link_name(const struct usb_device *udev, char *buf, size_t size) -{ - snprintf(buf, size, "usb_dev.%s", dev_name(&udev->dev)); -} - -static int onboard_hub_add_usbdev(struct onboard_hub *hub, struct usb_device *udev) -{ - struct usbdev_node *node; - char link_name[64]; - int err; - - mutex_lock(&hub->lock); - - if (hub->going_away) { - err = -EINVAL; - goto error; - } - - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (!node) { - err = -ENOMEM; - goto error; - } - - node->udev = udev; - - list_add(&node->list, &hub->udev_list); - - mutex_unlock(&hub->lock); - - get_udev_link_name(udev, link_name, sizeof(link_name)); - WARN_ON(sysfs_create_link(&hub->dev->kobj, &udev->dev.kobj, link_name)); - - return 0; - -error: - mutex_unlock(&hub->lock); - - return err; -} - -static void onboard_hub_remove_usbdev(struct onboard_hub *hub, const struct usb_device *udev) -{ - struct usbdev_node *node; - char link_name[64]; - - get_udev_link_name(udev, link_name, sizeof(link_name)); - sysfs_remove_link(&hub->dev->kobj, link_name); - - mutex_lock(&hub->lock); - - list_for_each_entry(node, &hub->udev_list, list) { - if (node->udev == udev) { - list_del(&node->list); - kfree(node); - break; - } - } - - mutex_unlock(&hub->lock); -} - -static ssize_t always_powered_in_suspend_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - const struct onboard_hub *hub = dev_get_drvdata(dev); - - return sysfs_emit(buf, "%d\n", hub->always_powered_in_suspend); -} - -static ssize_t always_powered_in_suspend_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct onboard_hub *hub = dev_get_drvdata(dev); - bool val; - int ret; - - ret = kstrtobool(buf, &val); - if (ret < 0) - return ret; - - hub->always_powered_in_suspend = val; - - return count; -} -static DEVICE_ATTR_RW(always_powered_in_suspend); - -static struct attribute *onboard_hub_attrs[] = { - &dev_attr_always_powered_in_suspend.attr, - NULL, -}; -ATTRIBUTE_GROUPS(onboard_hub); - -static void onboard_hub_attach_usb_driver(struct work_struct *work) -{ - int err; - - err = driver_attach(&onboard_hub_usbdev_driver.driver); - if (err) - pr_err("Failed to attach USB driver: %pe\n", ERR_PTR(err)); -} - -static int onboard_hub_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct onboard_hub *hub; - int err; - - hub = devm_kzalloc(dev, sizeof(*hub), GFP_KERNEL); - if (!hub) - return -ENOMEM; - - hub->pdata = device_get_match_data(dev); - if (!hub->pdata) - return -EINVAL; - - hub->dev = dev; - - err = onboard_hub_get_regulators(hub); - if (err) - return err; - - hub->clk = devm_clk_get_optional(dev, NULL); - if (IS_ERR(hub->clk)) - return dev_err_probe(dev, PTR_ERR(hub->clk), "failed to get clock\n"); - - hub->reset_gpio = devm_gpiod_get_optional(dev, "reset", - GPIOD_OUT_HIGH); - if (IS_ERR(hub->reset_gpio)) - return dev_err_probe(dev, PTR_ERR(hub->reset_gpio), "failed to get reset GPIO\n"); - - mutex_init(&hub->lock); - INIT_LIST_HEAD(&hub->udev_list); - - dev_set_drvdata(dev, hub); - - err = onboard_hub_power_on(hub); - if (err) - return err; - - /* - * The USB driver might have been detached from the USB devices by - * onboard_hub_remove() (e.g. through an 'unbind' by userspace), - * make sure to re-attach it if needed. - * - * This needs to be done deferred to avoid self-deadlocks on systems - * with nested onboard hubs. - */ - schedule_work(&attach_usb_driver_work); - - return 0; -} - -static void onboard_hub_remove(struct platform_device *pdev) -{ - struct onboard_hub *hub = dev_get_drvdata(&pdev->dev); - struct usbdev_node *node; - struct usb_device *udev; - - hub->going_away = true; - - mutex_lock(&hub->lock); - - /* unbind the USB devices to avoid dangling references to this device */ - while (!list_empty(&hub->udev_list)) { - node = list_first_entry(&hub->udev_list, struct usbdev_node, list); - udev = node->udev; - - /* - * Unbinding the driver will call onboard_hub_remove_usbdev(), - * which acquires hub->lock. We must release the lock first. - */ - get_device(&udev->dev); - mutex_unlock(&hub->lock); - device_release_driver(&udev->dev); - put_device(&udev->dev); - mutex_lock(&hub->lock); - } - - mutex_unlock(&hub->lock); - - onboard_hub_power_off(hub); -} - -MODULE_DEVICE_TABLE(of, onboard_hub_match); - -static const struct dev_pm_ops __maybe_unused onboard_hub_pm_ops = { - SET_LATE_SYSTEM_SLEEP_PM_OPS(onboard_hub_suspend, onboard_hub_resume) -}; - -static struct platform_driver onboard_hub_driver = { - .probe = onboard_hub_probe, - .remove_new = onboard_hub_remove, - - .driver = { - .name = "onboard-usb-hub", - .of_match_table = onboard_hub_match, - .pm = pm_ptr(&onboard_hub_pm_ops), - .dev_groups = onboard_hub_groups, - }, -}; - -/************************** USB driver **************************/ - -#define VENDOR_ID_CYPRESS 0x04b4 -#define VENDOR_ID_GENESYS 0x05e3 -#define VENDOR_ID_MICROCHIP 0x0424 -#define VENDOR_ID_REALTEK 0x0bda -#define VENDOR_ID_TI 0x0451 -#define VENDOR_ID_VIA 0x2109 - -/* - * Returns the onboard_hub platform device that is associated with the USB - * device passed as parameter. - */ -static struct onboard_hub *_find_onboard_hub(struct device *dev) -{ - struct platform_device *pdev; - struct device_node *np; - struct onboard_hub *hub; - - pdev = of_find_device_by_node(dev->of_node); - if (!pdev) { - np = of_parse_phandle(dev->of_node, "peer-hub", 0); - if (!np) { - dev_err(dev, "failed to find device node for peer hub\n"); - return ERR_PTR(-EINVAL); - } - - pdev = of_find_device_by_node(np); - of_node_put(np); - - if (!pdev) - return ERR_PTR(-ENODEV); - } - - hub = dev_get_drvdata(&pdev->dev); - put_device(&pdev->dev); - - /* - * The presence of drvdata ('hub') indicates that the platform driver - * finished probing. This handles the case where (conceivably) we could - * be running at the exact same time as the platform driver's probe. If - * we detect the race we request probe deferral and we'll come back and - * try again. - */ - if (!hub) - return ERR_PTR(-EPROBE_DEFER); - - return hub; -} - -static int onboard_hub_usbdev_probe(struct usb_device *udev) -{ - struct device *dev = &udev->dev; - struct onboard_hub *hub; - int err; - - /* ignore supported hubs without device tree node */ - if (!dev->of_node) - return -ENODEV; - - hub = _find_onboard_hub(dev); - if (IS_ERR(hub)) - return PTR_ERR(hub); - - dev_set_drvdata(dev, hub); - - err = onboard_hub_add_usbdev(hub, udev); - if (err) - return err; - - return 0; -} - -static void onboard_hub_usbdev_disconnect(struct usb_device *udev) -{ - struct onboard_hub *hub = dev_get_drvdata(&udev->dev); - - onboard_hub_remove_usbdev(hub, udev); -} - -static const struct usb_device_id onboard_hub_id_table[] = { - { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6504) }, /* CYUSB33{0,1,2}x/CYUSB230x 3.0 */ - { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6506) }, /* CYUSB33{0,1,2}x/CYUSB230x 2.0 */ - { USB_DEVICE(VENDOR_ID_CYPRESS, 0x6570) }, /* CY7C6563x 2.0 */ - { USB_DEVICE(VENDOR_ID_GENESYS, 0x0608) }, /* Genesys Logic GL850G USB 2.0 */ - { USB_DEVICE(VENDOR_ID_GENESYS, 0x0610) }, /* Genesys Logic GL852G USB 2.0 */ - { USB_DEVICE(VENDOR_ID_GENESYS, 0x0620) }, /* Genesys Logic GL3523 USB 3.1 */ - { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2412) }, /* USB2412 USB 2.0 */ - { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2514) }, /* USB2514B USB 2.0 */ - { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2517) }, /* USB2517 USB 2.0 */ - { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x2744) }, /* USB5744 USB 2.0 */ - { USB_DEVICE(VENDOR_ID_MICROCHIP, 0x5744) }, /* USB5744 USB 3.0 */ - { USB_DEVICE(VENDOR_ID_REALTEK, 0x0411) }, /* RTS5411 USB 3.1 */ - { USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */ - { USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */ - { USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */ - { USB_DEVICE(VENDOR_ID_TI, 0x8025) }, /* TI USB8020B 3.0 */ - { USB_DEVICE(VENDOR_ID_TI, 0x8027) }, /* TI USB8020B 2.0 */ - { USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */ - { USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */ - { USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 */ - { USB_DEVICE(VENDOR_ID_VIA, 0x2817) }, /* VIA VL817 2.0 */ - {} -}; -MODULE_DEVICE_TABLE(usb, onboard_hub_id_table); - -static struct usb_device_driver onboard_hub_usbdev_driver = { - .name = "onboard-usb-hub", - .probe = onboard_hub_usbdev_probe, - .disconnect = onboard_hub_usbdev_disconnect, - .generic_subclass = 1, - .supports_autosuspend = 1, - .id_table = onboard_hub_id_table, -}; - -static int __init onboard_hub_init(void) -{ - int ret; - - ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE); - if (ret) - return ret; - - ret = platform_driver_register(&onboard_hub_driver); - if (ret) - usb_deregister_device_driver(&onboard_hub_usbdev_driver); - - return ret; -} -module_init(onboard_hub_init); - -static void __exit onboard_hub_exit(void) -{ - usb_deregister_device_driver(&onboard_hub_usbdev_driver); - platform_driver_unregister(&onboard_hub_driver); - - cancel_work_sync(&attach_usb_driver_work); -} -module_exit(onboard_hub_exit); - -MODULE_AUTHOR("Matthias Kaehlcke "); -MODULE_DESCRIPTION("Driver for discrete onboard USB hubs"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h deleted file mode 100644 index edbca8aa6ea0..000000000000 --- a/drivers/usb/misc/onboard_usb_hub.h +++ /dev/null @@ -1,103 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ -/* - * Copyright (c) 2022, Google LLC - */ - -#ifndef _USB_MISC_ONBOARD_USB_HUB_H -#define _USB_MISC_ONBOARD_USB_HUB_H - -#define MAX_SUPPLIES 2 - -struct onboard_hub_pdata { - unsigned long reset_us; /* reset pulse width in us */ - unsigned int num_supplies; /* number of supplies */ - const char * const supply_names[MAX_SUPPLIES]; -}; - -static const struct onboard_hub_pdata microchip_usb424_data = { - .reset_us = 1, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct onboard_hub_pdata microchip_usb5744_data = { - .reset_us = 0, - .num_supplies = 2, - .supply_names = { "vdd", "vdd2" }, -}; - -static const struct onboard_hub_pdata realtek_rts5411_data = { - .reset_us = 0, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct onboard_hub_pdata ti_tusb8020b_data = { - .reset_us = 3000, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct onboard_hub_pdata ti_tusb8041_data = { - .reset_us = 3000, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct onboard_hub_pdata cypress_hx3_data = { - .reset_us = 10000, - .num_supplies = 2, - .supply_names = { "vdd", "vdd2" }, -}; - -static const struct onboard_hub_pdata cypress_hx2vl_data = { - .reset_us = 1, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct onboard_hub_pdata genesys_gl850g_data = { - .reset_us = 3, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct onboard_hub_pdata genesys_gl852g_data = { - .reset_us = 50, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct onboard_hub_pdata vialab_vl817_data = { - .reset_us = 10, - .num_supplies = 1, - .supply_names = { "vdd" }, -}; - -static const struct of_device_id onboard_hub_match[] = { - { .compatible = "usb424,2412", .data = µchip_usb424_data, }, - { .compatible = "usb424,2514", .data = µchip_usb424_data, }, - { .compatible = "usb424,2517", .data = µchip_usb424_data, }, - { .compatible = "usb424,2744", .data = µchip_usb5744_data, }, - { .compatible = "usb424,5744", .data = µchip_usb5744_data, }, - { .compatible = "usb451,8025", .data = &ti_tusb8020b_data, }, - { .compatible = "usb451,8027", .data = &ti_tusb8020b_data, }, - { .compatible = "usb451,8140", .data = &ti_tusb8041_data, }, - { .compatible = "usb451,8142", .data = &ti_tusb8041_data, }, - { .compatible = "usb4b4,6504", .data = &cypress_hx3_data, }, - { .compatible = "usb4b4,6506", .data = &cypress_hx3_data, }, - { .compatible = "usb4b4,6570", .data = &cypress_hx2vl_data, }, - { .compatible = "usb5e3,608", .data = &genesys_gl850g_data, }, - { .compatible = "usb5e3,610", .data = &genesys_gl852g_data, }, - { .compatible = "usb5e3,620", .data = &genesys_gl852g_data, }, - { .compatible = "usb5e3,626", .data = &genesys_gl852g_data, }, - { .compatible = "usbbda,411", .data = &realtek_rts5411_data, }, - { .compatible = "usbbda,5411", .data = &realtek_rts5411_data, }, - { .compatible = "usbbda,414", .data = &realtek_rts5411_data, }, - { .compatible = "usbbda,5414", .data = &realtek_rts5411_data, }, - { .compatible = "usb2109,817", .data = &vialab_vl817_data, }, - { .compatible = "usb2109,2817", .data = &vialab_vl817_data, }, - {} -}; - -#endif /* _USB_MISC_ONBOARD_USB_HUB_H */ diff --git a/drivers/usb/misc/onboard_usb_hub_pdevs.c b/drivers/usb/misc/onboard_usb_hub_pdevs.c deleted file mode 100644 index ed22a18f4ab7..000000000000 --- a/drivers/usb/misc/onboard_usb_hub_pdevs.c +++ /dev/null @@ -1,143 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * API for creating and destroying USB onboard hub platform devices - * - * Copyright (c) 2022, Google LLC - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "onboard_usb_hub.h" - -struct pdev_list_entry { - struct platform_device *pdev; - struct list_head node; -}; - -static bool of_is_onboard_usb_hub(const struct device_node *np) -{ - return !!of_match_node(onboard_hub_match, np); -} - -/** - * onboard_hub_create_pdevs -- create platform devices for onboard USB hubs - * @parent_hub : parent hub to scan for connected onboard hubs - * @pdev_list : list of onboard hub platform devices owned by the parent hub - * - * Creates a platform device for each supported onboard hub that is connected to - * the given parent hub. The platform device is in charge of initializing the - * hub (enable regulators, take the hub out of reset, ...) and can optionally - * control whether the hub remains powered during system suspend or not. - * - * To keep track of the platform devices they are added to a list that is owned - * by the parent hub. - * - * Some background about the logic in this function, which can be a bit hard - * to follow: - * - * Root hubs don't have dedicated device tree nodes, but use the node of their - * HCD. The primary and secondary HCD are usually represented by a single DT - * node. That means the root hubs of the primary and secondary HCD share the - * same device tree node (the HCD node). As a result this function can be called - * twice with the same DT node for root hubs. We only want to create a single - * platform device for each physical onboard hub, hence for root hubs the loop - * is only executed for the root hub of the primary HCD. Since the function - * scans through all child nodes it still creates pdevs for onboard hubs - * connected to the root hub of the secondary HCD if needed. - * - * Further there must be only one platform device for onboard hubs with a peer - * hub (the hub is a single physical device). To achieve this two measures are - * taken: pdevs for onboard hubs with a peer are only created when the function - * is called on behalf of the parent hub that is connected to the primary HCD - * (directly or through other hubs). For onboard hubs connected to root hubs - * the function processes the nodes of both peers. A platform device is only - * created if the peer hub doesn't have one already. - */ -void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list) -{ - int i; - struct usb_hcd *hcd = bus_to_hcd(parent_hub->bus); - struct device_node *np, *npc; - struct platform_device *pdev; - struct pdev_list_entry *pdle; - - if (!parent_hub->dev.of_node) - return; - - if (!parent_hub->parent && !usb_hcd_is_primary_hcd(hcd)) - return; - - for (i = 1; i <= parent_hub->maxchild; i++) { - np = usb_of_get_device_node(parent_hub, i); - if (!np) - continue; - - if (!of_is_onboard_usb_hub(np)) - goto node_put; - - npc = of_parse_phandle(np, "peer-hub", 0); - if (npc) { - if (!usb_hcd_is_primary_hcd(hcd)) { - of_node_put(npc); - goto node_put; - } - - pdev = of_find_device_by_node(npc); - of_node_put(npc); - - if (pdev) { - put_device(&pdev->dev); - goto node_put; - } - } - - pdev = of_platform_device_create(np, NULL, &parent_hub->dev); - if (!pdev) { - dev_err(&parent_hub->dev, - "failed to create platform device for onboard hub '%pOF'\n", np); - goto node_put; - } - - pdle = kzalloc(sizeof(*pdle), GFP_KERNEL); - if (!pdle) { - of_platform_device_destroy(&pdev->dev, NULL); - goto node_put; - } - - pdle->pdev = pdev; - list_add(&pdle->node, pdev_list); - -node_put: - of_node_put(np); - } -} -EXPORT_SYMBOL_GPL(onboard_hub_create_pdevs); - -/** - * onboard_hub_destroy_pdevs -- free resources of onboard hub platform devices - * @pdev_list : list of onboard hub platform devices - * - * Destroys the platform devices in the given list and frees the memory associated - * with the list entry. - */ -void onboard_hub_destroy_pdevs(struct list_head *pdev_list) -{ - struct pdev_list_entry *pdle, *tmp; - - list_for_each_entry_safe(pdle, tmp, pdev_list, node) { - list_del(&pdle->node); - of_platform_device_destroy(&pdle->pdev->dev, NULL); - kfree(pdle); - } -} -EXPORT_SYMBOL_GPL(onboard_hub_destroy_pdevs); diff --git a/include/linux/usb/onboard_dev.h b/include/linux/usb/onboard_dev.h new file mode 100644 index 000000000000..b79db6d193c8 --- /dev/null +++ b/include/linux/usb/onboard_dev.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __LINUX_USB_ONBOARD_DEV_H +#define __LINUX_USB_ONBOARD_DEV_H + +struct usb_device; +struct list_head; + +#if IS_ENABLED(CONFIG_USB_ONBOARD_DEV) +void onboard_dev_create_pdevs(struct usb_device *parent_dev, struct list_head *pdev_list); +void onboard_dev_destroy_pdevs(struct list_head *pdev_list); +#else +static inline void onboard_dev_create_pdevs(struct usb_device *parent_dev, + struct list_head *pdev_list) {} +static inline void onboard_dev_destroy_pdevs(struct list_head *pdev_list) {} +#endif + +#endif /* __LINUX_USB_ONBOARD_DEV_H */ diff --git a/include/linux/usb/onboard_hub.h b/include/linux/usb/onboard_hub.h deleted file mode 100644 index d9373230556e..000000000000 --- a/include/linux/usb/onboard_hub.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef __LINUX_USB_ONBOARD_HUB_H -#define __LINUX_USB_ONBOARD_HUB_H - -struct usb_device; -struct list_head; - -#if IS_ENABLED(CONFIG_USB_ONBOARD_HUB) -void onboard_hub_create_pdevs(struct usb_device *parent_hub, struct list_head *pdev_list); -void onboard_hub_destroy_pdevs(struct list_head *pdev_list); -#else -static inline void onboard_hub_create_pdevs(struct usb_device *parent_hub, - struct list_head *pdev_list) {} -static inline void onboard_hub_destroy_pdevs(struct list_head *pdev_list) {} -#endif - -#endif /* __LINUX_USB_ONBOARD_HUB_H */ -- cgit From 0bb322be5d389c00740d884351d4ba08fca938aa Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Mon, 25 Mar 2024 17:14:09 -0500 Subject: driver core: Remove unused platform_notify, platform_notify_remove The "platform_notify" and "platform_notify_remove" hooks have been unused since 00ba9357d189 ("ARM: ixp4xx: Drop custom DMA coherency and bouncing"). Remove "platform_notify" and "platform_notify_remove". No functional change intended. Signed-off-by: Bjorn Helgaas Cc: Heikki Krogerus Cc: Linus Walleij Reviewed-by: Andy Shevchenko Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240325221409.1457036-1-helgaas@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/base/core.c | 8 -------- include/linux/device.h | 11 ----------- 2 files changed, 19 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/core.c b/drivers/base/core.c index b93f3c5716ae..78dfa74ee18b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -2331,8 +2331,6 @@ static void fw_devlink_link_device(struct device *dev) /* Device links support end. */ -int (*platform_notify)(struct device *dev) = NULL; -int (*platform_notify_remove)(struct device *dev) = NULL; static struct kobject *dev_kobj; /* /sys/dev/char */ @@ -2380,16 +2378,10 @@ static void device_platform_notify(struct device *dev) acpi_device_notify(dev); software_node_notify(dev); - - if (platform_notify) - platform_notify(dev); } static void device_platform_notify_remove(struct device *dev) { - if (platform_notify_remove) - platform_notify_remove(dev); - software_node_notify_remove(dev); acpi_device_notify_remove(dev); diff --git a/include/linux/device.h b/include/linux/device.h index 97c4b046c09d..c515ba5756e4 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1206,17 +1206,6 @@ int __must_check devm_device_add_groups(struct device *dev, int __must_check devm_device_add_group(struct device *dev, const struct attribute_group *grp); -/* - * Platform "fixup" functions - allow the platform to have their say - * about devices and actions that the general device layer doesn't - * know about. - */ -/* Notify platform of device discovery */ -extern int (*platform_notify)(struct device *dev); - -extern int (*platform_notify_remove)(struct device *dev); - - /* * get_device - atomically increment the reference count for the device. * -- cgit From 95d43290f1e476b3be782dd17642e452d0436266 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Sat, 9 Mar 2024 21:13:06 +0100 Subject: ACPI: bus: Indicate support for _TFP thru _OSC The ACPI thermal driver already uses the _TPF ACPI method to retrieve precise sampling time values, but this is not reported thru _OSC. Fix this by setting bit 9 ("Fast Thermal Sampling support") when evaluating _OSC. Fixes: a2ee7581afd5 ("ACPI: thermal: Add Thermal fast Sampling Period (_TFP) support") Signed-off-by: Armin Wolf Signed-off-by: Rafael J. Wysocki --- drivers/acpi/bus.c | 2 ++ include/linux/acpi.h | 1 + 2 files changed, 3 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d9fa730416f1..9c13a4e43fa8 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -316,6 +316,8 @@ static void acpi_bus_osc_negotiate_platform_control(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_PROCESSOR)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT; + if (IS_ENABLED(CONFIG_ACPI_THERMAL)) + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 34829f2c517a..51bdd9e08f6d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -573,6 +573,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_CPCV2_SUPPORT 0x00000040 #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 +#define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 -- cgit From 6e8345f23ca37d6d41bb76be5d6a705ddf542817 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Sat, 9 Mar 2024 21:13:07 +0100 Subject: ACPI: bus: Indicate support for more than 16 p-states thru _OSC The code responsible for parsing the available p-states should have no problems handling more than 16 p-states. Indicate this by setting bit 10 ("Greater Than 16 p-state support") when evaluating _OSC. Signed-off-by: Armin Wolf Signed-off-by: Rafael J. Wysocki --- drivers/acpi/bus.c | 1 + include/linux/acpi.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 9c13a4e43fa8..d5b0e80dc48e 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -321,6 +321,7 @@ static void acpi_bus_osc_negotiate_platform_control(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_PRMT)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_FFH)) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 51bdd9e08f6d..0e0b027e27e2 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -574,6 +574,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_PCLPI_SUPPORT 0x00000080 #define OSC_SB_OSLPI_SUPPORT 0x00000100 #define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200 +#define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 -- cgit From a8a967a243d71dd635ede076020f665a4df51c63 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Sat, 9 Mar 2024 21:13:08 +0100 Subject: ACPI: bus: Indicate support for the Generic Event Device thru _OSC A device driver for the Generic Event Device (ACPI0013) already exists for quite some time, but support for it was never reported thru _OSC. Fix this by setting bit 11 ("Generic Event Device support") when evaluating _OSC. Fixes: 3db80c230da1 ("ACPI: implement Generic Event Device") Signed-off-by: Armin Wolf Signed-off-by: Rafael J. Wysocki --- drivers/acpi/bus.c | 1 + include/linux/acpi.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index d5b0e80dc48e..0c48b603098a 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -322,6 +322,7 @@ static void acpi_bus_osc_negotiate_platform_control(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_PRMT)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_FFH)) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0e0b027e27e2..b0d909d1f5fc 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -575,6 +575,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_OSLPI_SUPPORT 0x00000100 #define OSC_SB_FAST_THERMAL_SAMPLING_SUPPORT 0x00000200 #define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400 +#define OSC_SB_GED_SUPPORT 0x00000800 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 -- cgit From d0d4f1474e36b195eaad477373127ae621334c01 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Sat, 9 Mar 2024 21:13:09 +0100 Subject: ACPI: Fix Generic Initiator Affinity _OSC bit The ACPI spec says bit 17 should be used to indicate support for Generic Initiator Affinity Structure in SRAT, but we currently set bit 13 ("Interrupt ResourceSource support"). Fix this by actually setting bit 17 when evaluating _OSC. Fixes: 01aabca2fd54 ("ACPI: Let ACPI know we support Generic Initiator Affinity Structures") Signed-off-by: Armin Wolf Signed-off-by: Rafael J. Wysocki --- include/linux/acpi.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index b0d909d1f5fc..e77783e101c3 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -577,8 +577,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400 #define OSC_SB_GED_SUPPORT 0x00000800 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 -#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 +#define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 #define OSC_SB_PRM_SUPPORT 0x00200000 #define OSC_SB_FFH_OPR_SUPPORT 0x00400000 -- cgit From 403ad17c06509794fdf6e4d4b3070bd5b56e2a8e Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Sat, 9 Mar 2024 21:13:10 +0100 Subject: ACPI: bus: Indicate support for IRQ ResourceSource thru _OSC The ACPI IRQ mapping code supports parsing of ResourceSource, but this is not reported thru _OSC. Fix this by setting bit 13 ("Interrupt ResourceSource support") when evaluating _OSC. Fixes: d44fa3d46079 ("ACPI: Add support for ResourceSource/IRQ domain mapping") Signed-off-by: Armin Wolf Signed-off-by: Rafael J. Wysocki --- drivers/acpi/bus.c | 1 + include/linux/acpi.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 0c48b603098a..a87b10eef77d 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -323,6 +323,7 @@ static void acpi_bus_osc_negotiate_platform_control(void) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PCLPI_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_OVER_16_PSTATES_SUPPORT; capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_GED_SUPPORT; + capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_PRMT)) capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PRM_SUPPORT; if (IS_ENABLED(CONFIG_ACPI_FFH)) diff --git a/include/linux/acpi.h b/include/linux/acpi.h index e77783e101c3..168201e4c782 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -577,6 +577,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); #define OSC_SB_OVER_16_PSTATES_SUPPORT 0x00000400 #define OSC_SB_GED_SUPPORT 0x00000800 #define OSC_SB_CPC_DIVERSE_HIGH_SUPPORT 0x00001000 +#define OSC_SB_IRQ_RESOURCE_SOURCE_SUPPORT 0x00002000 #define OSC_SB_CPC_FLEXIBLE_ADR_SPACE 0x00004000 #define OSC_SB_GENERIC_INITIATOR_SUPPORT 0x00020000 #define OSC_SB_NATIVE_USB4_SUPPORT 0x00040000 -- cgit From 4628b804555773daa982d9578d89fe04fcdde374 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Sun, 10 Mar 2024 01:02:08 +0000 Subject: regulator: axp20x: fix typo-ed identifier The registers to set the X-Powers AXP313 regulators are of course "CONTROL" registers, not "CONRTOL" ones. Fix the typo in the header file and in its users. No functional change. Signed-off-by: Andre Przywara Acked-by: Jernej Skrabec Tested-by: Ryan Walklin Link: https://lore.kernel.org/r/20240310010211.28653-2-andre.przywara@arm.com Signed-off-by: Lee Jones --- drivers/regulator/axp20x-regulator.c | 10 +++++----- include/linux/mfd/axp20x.h | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index c657820b0bbb..7bcc2d508b65 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -733,21 +733,21 @@ static const struct linear_range axp313a_dcdc3_ranges[] = { static const struct regulator_desc axp313a_regulators[] = { AXP_DESC_RANGES(AXP313A, DCDC1, "dcdc1", "vin1", axp313a_dcdc1_ranges, AXP313A_DCDC1_NUM_VOLTAGES, - AXP313A_DCDC1_CONRTOL, AXP313A_DCDC_V_OUT_MASK, + AXP313A_DCDC1_CONTROL, AXP313A_DCDC_V_OUT_MASK, AXP313A_OUTPUT_CONTROL, BIT(0)), AXP_DESC_RANGES(AXP313A, DCDC2, "dcdc2", "vin2", axp313a_dcdc2_ranges, AXP313A_DCDC23_NUM_VOLTAGES, - AXP313A_DCDC2_CONRTOL, AXP313A_DCDC_V_OUT_MASK, + AXP313A_DCDC2_CONTROL, AXP313A_DCDC_V_OUT_MASK, AXP313A_OUTPUT_CONTROL, BIT(1)), AXP_DESC_RANGES(AXP313A, DCDC3, "dcdc3", "vin3", axp313a_dcdc3_ranges, AXP313A_DCDC23_NUM_VOLTAGES, - AXP313A_DCDC3_CONRTOL, AXP313A_DCDC_V_OUT_MASK, + AXP313A_DCDC3_CONTROL, AXP313A_DCDC_V_OUT_MASK, AXP313A_OUTPUT_CONTROL, BIT(2)), AXP_DESC(AXP313A, ALDO1, "aldo1", "vin1", 500, 3500, 100, - AXP313A_ALDO1_CONRTOL, AXP313A_LDO_V_OUT_MASK, + AXP313A_ALDO1_CONTROL, AXP313A_LDO_V_OUT_MASK, AXP313A_OUTPUT_CONTROL, BIT(3)), AXP_DESC(AXP313A, DLDO1, "dldo1", "vin1", 500, 3500, 100, - AXP313A_DLDO1_CONRTOL, AXP313A_LDO_V_OUT_MASK, + AXP313A_DLDO1_CONTROL, AXP313A_LDO_V_OUT_MASK, AXP313A_OUTPUT_CONTROL, BIT(4)), AXP_DESC_FIXED(AXP313A, RTC_LDO, "rtc-ldo", "vin1", 1800), }; diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index f1755163dd9f..9b2b7ac2107f 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -104,11 +104,11 @@ enum axp20x_variants { #define AXP313A_ON_INDICATE 0x00 #define AXP313A_OUTPUT_CONTROL 0x10 -#define AXP313A_DCDC1_CONRTOL 0x13 -#define AXP313A_DCDC2_CONRTOL 0x14 -#define AXP313A_DCDC3_CONRTOL 0x15 -#define AXP313A_ALDO1_CONRTOL 0x16 -#define AXP313A_DLDO1_CONRTOL 0x17 +#define AXP313A_DCDC1_CONTROL 0x13 +#define AXP313A_DCDC2_CONTROL 0x14 +#define AXP313A_DCDC3_CONTROL 0x15 +#define AXP313A_ALDO1_CONTROL 0x16 +#define AXP313A_DLDO1_CONTROL 0x17 #define AXP313A_SHUTDOWN_CTRL 0x1a #define AXP313A_IRQ_EN 0x20 #define AXP313A_IRQ_STATE 0x21 -- cgit From b5bfc8ab2484e8e62ae3ffccaecfe5d82c19f51f Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Sun, 10 Mar 2024 01:02:10 +0000 Subject: mfd: axp20x: Add support for AXP717 PMIC The AXP717a is a PMIC chip produced by X-Powers, it can be connected to an I2C or RSB bus. It's a rather complete PMIC, with many regulators, interrupts, an ADC and battery charging functionality. It also offer USB type-C CC pin handling. Describe the regmap and the MFD bits, along with the registers exposed via I2C or RSB. This covers the regulator, interrupts and power key devices for now. Advertise the device using the new compatible string. Signed-off-by: Andre Przywara Acked-by: Jernej Skrabec Tested-by: Ryan Walklin Link: https://lore.kernel.org/r/20240310010211.28653-4-andre.przywara@arm.com Signed-off-by: Lee Jones --- drivers/mfd/axp20x-i2c.c | 2 ++ drivers/mfd/axp20x-rsb.c | 1 + drivers/mfd/axp20x.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/mfd/axp20x.h | 67 ++++++++++++++++++++++++++++++++++ 4 files changed, 160 insertions(+) (limited to 'include/linux') diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c index 68d3560cfe4a..b8e7ac89f697 100644 --- a/drivers/mfd/axp20x-i2c.c +++ b/drivers/mfd/axp20x-i2c.c @@ -65,6 +65,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = { { .compatible = "x-powers,axp221", .data = (void *)AXP221_ID }, { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID }, { .compatible = "x-powers,axp313a", .data = (void *)AXP313A_ID }, + { .compatible = "x-powers,axp717", .data = (void *)AXP717_ID }, { .compatible = "x-powers,axp803", .data = (void *)AXP803_ID }, { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID }, { .compatible = "x-powers,axp15060", .data = (void *)AXP15060_ID }, @@ -81,6 +82,7 @@ static const struct i2c_device_id axp20x_i2c_id[] = { { "axp221", 0 }, { "axp223", 0 }, { "axp313a", 0 }, + { "axp717", 0 }, { "axp803", 0 }, { "axp806", 0 }, { "axp15060", 0 }, diff --git a/drivers/mfd/axp20x-rsb.c b/drivers/mfd/axp20x-rsb.c index 214bc0d84d44..059656f2a1bd 100644 --- a/drivers/mfd/axp20x-rsb.c +++ b/drivers/mfd/axp20x-rsb.c @@ -58,6 +58,7 @@ static void axp20x_rsb_remove(struct sunxi_rsb_device *rdev) static const struct of_device_id axp20x_rsb_of_match[] = { { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID }, + { .compatible = "x-powers,axp717", .data = (void *)AXP717_ID }, { .compatible = "x-powers,axp803", .data = (void *)AXP803_ID }, { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID }, { .compatible = "x-powers,axp809", .data = (void *)AXP809_ID }, diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c index d8daa593ebd5..48ce6ea693ce 100644 --- a/drivers/mfd/axp20x.c +++ b/drivers/mfd/axp20x.c @@ -42,6 +42,7 @@ static const char * const axp20x_model_names[] = { "AXP223", "AXP288", "AXP313a", + "AXP717", "AXP803", "AXP806", "AXP809", @@ -207,6 +208,25 @@ static const struct regmap_access_table axp313a_volatile_table = { .n_yes_ranges = ARRAY_SIZE(axp313a_volatile_ranges), }; +static const struct regmap_range axp717_writeable_ranges[] = { + regmap_reg_range(AXP717_IRQ0_EN, AXP717_IRQ4_EN), + regmap_reg_range(AXP717_DCDC_OUTPUT_CONTROL, AXP717_CPUSLDO_CONTROL), +}; + +static const struct regmap_range axp717_volatile_ranges[] = { + regmap_reg_range(AXP717_IRQ0_STATE, AXP717_IRQ4_STATE), +}; + +static const struct regmap_access_table axp717_writeable_table = { + .yes_ranges = axp717_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(axp717_writeable_ranges), +}; + +static const struct regmap_access_table axp717_volatile_table = { + .yes_ranges = axp717_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(axp717_volatile_ranges), +}; + static const struct regmap_range axp806_volatile_ranges[] = { regmap_reg_range(AXP20X_IRQ1_STATE, AXP20X_IRQ2_STATE), }; @@ -317,6 +337,11 @@ static const struct resource axp313a_pek_resources[] = { DEFINE_RES_IRQ_NAMED(AXP313A_IRQ_PEK_FAL_EDGE, "PEK_DBF"), }; +static const struct resource axp717_pek_resources[] = { + DEFINE_RES_IRQ_NAMED(AXP717_IRQ_PEK_RIS_EDGE, "PEK_DBR"), + DEFINE_RES_IRQ_NAMED(AXP717_IRQ_PEK_FAL_EDGE, "PEK_DBF"), +}; + static const struct resource axp803_pek_resources[] = { DEFINE_RES_IRQ_NAMED(AXP803_IRQ_PEK_RIS_EDGE, "PEK_DBR"), DEFINE_RES_IRQ_NAMED(AXP803_IRQ_PEK_FAL_EDGE, "PEK_DBF"), @@ -391,6 +416,15 @@ static const struct regmap_config axp313a_regmap_config = { .cache_type = REGCACHE_MAPLE, }; +static const struct regmap_config axp717_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .wr_table = &axp717_writeable_table, + .volatile_table = &axp717_volatile_table, + .max_register = AXP717_CPUSLDO_CONTROL, + .cache_type = REGCACHE_RBTREE, +}; + static const struct regmap_config axp806_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -589,6 +623,40 @@ static const struct regmap_irq axp313a_regmap_irqs[] = { INIT_REGMAP_IRQ(AXP313A, DIE_TEMP_HIGH, 0, 0), }; +static const struct regmap_irq axp717_regmap_irqs[] = { + INIT_REGMAP_IRQ(AXP717, SOC_DROP_LVL2, 0, 7), + INIT_REGMAP_IRQ(AXP717, SOC_DROP_LVL1, 0, 6), + INIT_REGMAP_IRQ(AXP717, GAUGE_NEW_SOC, 0, 4), + INIT_REGMAP_IRQ(AXP717, BOOST_OVER_V, 0, 2), + INIT_REGMAP_IRQ(AXP717, VBUS_OVER_V, 0, 1), + INIT_REGMAP_IRQ(AXP717, VBUS_FAULT, 0, 0), + INIT_REGMAP_IRQ(AXP717, VBUS_PLUGIN, 1, 7), + INIT_REGMAP_IRQ(AXP717, VBUS_REMOVAL, 1, 6), + INIT_REGMAP_IRQ(AXP717, BATT_PLUGIN, 1, 5), + INIT_REGMAP_IRQ(AXP717, BATT_REMOVAL, 1, 4), + INIT_REGMAP_IRQ(AXP717, PEK_SHORT, 1, 3), + INIT_REGMAP_IRQ(AXP717, PEK_LONG, 1, 2), + INIT_REGMAP_IRQ(AXP717, PEK_FAL_EDGE, 1, 1), + INIT_REGMAP_IRQ(AXP717, PEK_RIS_EDGE, 1, 0), + INIT_REGMAP_IRQ(AXP717, WDOG_EXPIRE, 2, 7), + INIT_REGMAP_IRQ(AXP717, LDO_OVER_CURR, 2, 6), + INIT_REGMAP_IRQ(AXP717, BATT_OVER_CURR, 2, 5), + INIT_REGMAP_IRQ(AXP717, CHARG_DONE, 2, 4), + INIT_REGMAP_IRQ(AXP717, CHARG, 2, 3), + INIT_REGMAP_IRQ(AXP717, DIE_TEMP_HIGH, 2, 2), + INIT_REGMAP_IRQ(AXP717, CHARG_TIMER, 2, 1), + INIT_REGMAP_IRQ(AXP717, BATT_OVER_V, 2, 0), + INIT_REGMAP_IRQ(AXP717, BC_USB_DONE, 3, 7), + INIT_REGMAP_IRQ(AXP717, BC_USB_CHNG, 3, 6), + INIT_REGMAP_IRQ(AXP717, BATT_QUIT_TEMP_HIGH, 3, 4), + INIT_REGMAP_IRQ(AXP717, BATT_CHG_TEMP_HIGH, 3, 3), + INIT_REGMAP_IRQ(AXP717, BATT_CHG_TEMP_LOW, 3, 2), + INIT_REGMAP_IRQ(AXP717, BATT_ACT_TEMP_HIGH, 3, 1), + INIT_REGMAP_IRQ(AXP717, BATT_ACT_TEMP_LOW, 3, 0), + INIT_REGMAP_IRQ(AXP717, TYPEC_REMOVE, 4, 6), + INIT_REGMAP_IRQ(AXP717, TYPEC_PLUGIN, 4, 5), +}; + static const struct regmap_irq axp803_regmap_irqs[] = { INIT_REGMAP_IRQ(AXP803, ACIN_OVER_V, 0, 7), INIT_REGMAP_IRQ(AXP803, ACIN_PLUGIN, 0, 6), @@ -776,6 +844,17 @@ static const struct regmap_irq_chip axp313a_regmap_irq_chip = { .num_regs = 1, }; +static const struct regmap_irq_chip axp717_regmap_irq_chip = { + .name = "axp717_irq_chip", + .status_base = AXP717_IRQ0_STATE, + .ack_base = AXP717_IRQ0_STATE, + .unmask_base = AXP717_IRQ0_EN, + .init_ack_masked = true, + .irqs = axp717_regmap_irqs, + .num_irqs = ARRAY_SIZE(axp717_regmap_irqs), + .num_regs = 5, +}; + static const struct regmap_irq_chip axp803_regmap_irq_chip = { .name = "axp803", .status_base = AXP20X_IRQ1_STATE, @@ -941,6 +1020,11 @@ static struct mfd_cell axp313a_cells[] = { MFD_CELL_RES("axp313a-pek", axp313a_pek_resources), }; +static struct mfd_cell axp717_cells[] = { + MFD_CELL_NAME("axp20x-regulator"), + MFD_CELL_RES("axp20x-pek", axp717_pek_resources), +}; + static const struct resource axp288_adc_resources[] = { DEFINE_RES_IRQ_NAMED(AXP288_IRQ_GPADC, "GPADC"), }; @@ -1181,6 +1265,12 @@ int axp20x_match_device(struct axp20x_dev *axp20x) axp20x->regmap_cfg = &axp313a_regmap_config; axp20x->regmap_irq_chip = &axp313a_regmap_irq_chip; break; + case AXP717_ID: + axp20x->nr_cells = ARRAY_SIZE(axp717_cells); + axp20x->cells = axp717_cells; + axp20x->regmap_cfg = &axp717_regmap_config; + axp20x->regmap_irq_chip = &axp717_regmap_irq_chip; + break; case AXP803_ID: axp20x->nr_cells = ARRAY_SIZE(axp803_cells); axp20x->cells = axp803_cells; diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 9b2b7ac2107f..fd7a41a69526 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -19,6 +19,7 @@ enum axp20x_variants { AXP223_ID, AXP288_ID, AXP313A_ID, + AXP717_ID, AXP803_ID, AXP806_ID, AXP809_ID, @@ -113,6 +114,38 @@ enum axp20x_variants { #define AXP313A_IRQ_EN 0x20 #define AXP313A_IRQ_STATE 0x21 +#define AXP717_ON_INDICATE 0x00 +#define AXP717_IRQ0_EN 0x40 +#define AXP717_IRQ1_EN 0x41 +#define AXP717_IRQ2_EN 0x42 +#define AXP717_IRQ3_EN 0x43 +#define AXP717_IRQ4_EN 0x44 +#define AXP717_IRQ0_STATE 0x48 +#define AXP717_IRQ1_STATE 0x49 +#define AXP717_IRQ2_STATE 0x4a +#define AXP717_IRQ3_STATE 0x4b +#define AXP717_IRQ4_STATE 0x4c +#define AXP717_DCDC_OUTPUT_CONTROL 0x80 +#define AXP717_DCDC1_CONTROL 0x83 +#define AXP717_DCDC2_CONTROL 0x84 +#define AXP717_DCDC3_CONTROL 0x85 +#define AXP717_DCDC4_CONTROL 0x86 +#define AXP717_LDO0_OUTPUT_CONTROL 0x90 +#define AXP717_LDO1_OUTPUT_CONTROL 0x91 +#define AXP717_ALDO1_CONTROL 0x93 +#define AXP717_ALDO2_CONTROL 0x94 +#define AXP717_ALDO3_CONTROL 0x95 +#define AXP717_ALDO4_CONTROL 0x96 +#define AXP717_BLDO1_CONTROL 0x97 +#define AXP717_BLDO2_CONTROL 0x98 +#define AXP717_BLDO3_CONTROL 0x99 +#define AXP717_BLDO4_CONTROL 0x9a +#define AXP717_CLDO1_CONTROL 0x9b +#define AXP717_CLDO2_CONTROL 0x9c +#define AXP717_CLDO3_CONTROL 0x9d +#define AXP717_CLDO4_CONTROL 0x9e +#define AXP717_CPUSLDO_CONTROL 0x9f + #define AXP806_STARTUP_SRC 0x00 #define AXP806_CHIP_ID 0x03 #define AXP806_PWR_OUT_CTRL1 0x10 @@ -732,6 +765,40 @@ enum axp313a_irqs { AXP313A_IRQ_PEK_RIS_EDGE, }; +enum axp717_irqs { + AXP717_IRQ_VBUS_FAULT, + AXP717_IRQ_VBUS_OVER_V, + AXP717_IRQ_BOOST_OVER_V, + AXP717_IRQ_GAUGE_NEW_SOC = 4, + AXP717_IRQ_SOC_DROP_LVL1 = 6, + AXP717_IRQ_SOC_DROP_LVL2, + AXP717_IRQ_PEK_RIS_EDGE, + AXP717_IRQ_PEK_FAL_EDGE, + AXP717_IRQ_PEK_LONG, + AXP717_IRQ_PEK_SHORT, + AXP717_IRQ_BATT_REMOVAL, + AXP717_IRQ_BATT_PLUGIN, + AXP717_IRQ_VBUS_REMOVAL, + AXP717_IRQ_VBUS_PLUGIN, + AXP717_IRQ_BATT_OVER_V, + AXP717_IRQ_CHARG_TIMER, + AXP717_IRQ_DIE_TEMP_HIGH, + AXP717_IRQ_CHARG, + AXP717_IRQ_CHARG_DONE, + AXP717_IRQ_BATT_OVER_CURR, + AXP717_IRQ_LDO_OVER_CURR, + AXP717_IRQ_WDOG_EXPIRE, + AXP717_IRQ_BATT_ACT_TEMP_LOW, + AXP717_IRQ_BATT_ACT_TEMP_HIGH, + AXP717_IRQ_BATT_CHG_TEMP_LOW, + AXP717_IRQ_BATT_CHG_TEMP_HIGH, + AXP717_IRQ_BATT_QUIT_TEMP_HIGH, + AXP717_IRQ_BC_USB_CHNG = 30, + AXP717_IRQ_BC_USB_DONE, + AXP717_IRQ_TYPEC_PLUGIN = 37, + AXP717_IRQ_TYPEC_REMOVE, +}; + enum axp803_irqs { AXP803_IRQ_ACIN_OVER_V = 1, AXP803_IRQ_ACIN_PLUGIN, -- cgit From d2ac3df75c3a995064cfac0171e082a30d8c4c66 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Sun, 10 Mar 2024 01:02:11 +0000 Subject: regulator: axp20x: add support for the AXP717 The X-Powers AXP717 is a typical PMIC from X-Powers, featuring four DC/DC converters and 15 LDOs, on the regulator side. Describe the chip's voltage settings and switch registers, how the voltages are encoded, and connect this to the MFD device via its regulator ID. Signed-off-by: Andre Przywara Acked-by: Mark Brown Tested-by: Ryan Walklin Link: https://lore.kernel.org/r/20240310010211.28653-5-andre.przywara@arm.com Signed-off-by: Lee Jones --- drivers/regulator/axp20x-regulator.c | 84 ++++++++++++++++++++++++++++++++++++ include/linux/mfd/axp20x.h | 21 +++++++++ 2 files changed, 105 insertions(+) (limited to 'include/linux') diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 7bcc2d508b65..34fcdd82b2ea 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -138,6 +138,12 @@ #define AXP313A_DCDC_V_OUT_MASK GENMASK(6, 0) #define AXP313A_LDO_V_OUT_MASK GENMASK(4, 0) +#define AXP717_DCDC1_NUM_VOLTAGES 88 +#define AXP717_DCDC2_NUM_VOLTAGES 107 +#define AXP717_DCDC3_NUM_VOLTAGES 104 +#define AXP717_DCDC_V_OUT_MASK GENMASK(6, 0) +#define AXP717_LDO_V_OUT_MASK GENMASK(4, 0) + #define AXP803_PWR_OUT_DCDC1_MASK BIT_MASK(0) #define AXP803_PWR_OUT_DCDC2_MASK BIT_MASK(1) #define AXP803_PWR_OUT_DCDC3_MASK BIT_MASK(2) @@ -752,6 +758,79 @@ static const struct regulator_desc axp313a_regulators[] = { AXP_DESC_FIXED(AXP313A, RTC_LDO, "rtc-ldo", "vin1", 1800), }; +static const struct linear_range axp717_dcdc1_ranges[] = { + REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000), + REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000), +}; + +static const struct linear_range axp717_dcdc2_ranges[] = { + REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000), + REGULATOR_LINEAR_RANGE(1220000, 71, 87, 20000), + REGULATOR_LINEAR_RANGE(1600000, 88, 107, 100000), +}; + +static const struct linear_range axp717_dcdc3_ranges[] = { + REGULATOR_LINEAR_RANGE(500000, 0, 70, 10000), + REGULATOR_LINEAR_RANGE(1220000, 71, 102, 20000), +}; + +static const struct regulator_desc axp717_regulators[] = { + AXP_DESC_RANGES(AXP717, DCDC1, "dcdc1", "vin1", + axp717_dcdc1_ranges, AXP717_DCDC1_NUM_VOLTAGES, + AXP717_DCDC1_CONTROL, AXP717_DCDC_V_OUT_MASK, + AXP717_DCDC_OUTPUT_CONTROL, BIT(0)), + AXP_DESC_RANGES(AXP717, DCDC2, "dcdc2", "vin2", + axp717_dcdc2_ranges, AXP717_DCDC2_NUM_VOLTAGES, + AXP717_DCDC2_CONTROL, AXP717_DCDC_V_OUT_MASK, + AXP717_DCDC_OUTPUT_CONTROL, BIT(1)), + AXP_DESC_RANGES(AXP717, DCDC3, "dcdc3", "vin3", + axp717_dcdc3_ranges, AXP717_DCDC3_NUM_VOLTAGES, + AXP717_DCDC3_CONTROL, AXP717_DCDC_V_OUT_MASK, + AXP717_DCDC_OUTPUT_CONTROL, BIT(2)), + AXP_DESC(AXP717, DCDC4, "dcdc4", "vin4", 1000, 3700, 100, + AXP717_DCDC4_CONTROL, AXP717_DCDC_V_OUT_MASK, + AXP717_DCDC_OUTPUT_CONTROL, BIT(3)), + AXP_DESC(AXP717, ALDO1, "aldo1", "vin1", 500, 3500, 100, + AXP717_ALDO1_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(0)), + AXP_DESC(AXP717, ALDO2, "aldo2", "vin1", 500, 3500, 100, + AXP717_ALDO2_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(1)), + AXP_DESC(AXP717, ALDO3, "aldo3", "vin1", 500, 3500, 100, + AXP717_ALDO3_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(2)), + AXP_DESC(AXP717, ALDO4, "aldo4", "vin1", 500, 3500, 100, + AXP717_ALDO4_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(3)), + AXP_DESC(AXP717, BLDO1, "bldo1", "vin1", 500, 3500, 100, + AXP717_BLDO1_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(4)), + AXP_DESC(AXP717, BLDO2, "bldo2", "vin1", 500, 3500, 100, + AXP717_BLDO2_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(5)), + AXP_DESC(AXP717, BLDO3, "bldo3", "vin1", 500, 3500, 100, + AXP717_BLDO3_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(6)), + AXP_DESC(AXP717, BLDO4, "bldo4", "vin1", 500, 3500, 100, + AXP717_BLDO4_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO0_OUTPUT_CONTROL, BIT(7)), + AXP_DESC(AXP717, CLDO1, "cldo1", "vin1", 500, 3500, 100, + AXP717_CLDO1_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO1_OUTPUT_CONTROL, BIT(0)), + AXP_DESC(AXP717, CLDO2, "cldo2", "vin1", 500, 3500, 100, + AXP717_CLDO2_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO1_OUTPUT_CONTROL, BIT(1)), + AXP_DESC(AXP717, CLDO3, "cldo3", "vin1", 500, 3500, 100, + AXP717_CLDO3_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO1_OUTPUT_CONTROL, BIT(2)), + AXP_DESC(AXP717, CLDO4, "cldo4", "vin1", 500, 3500, 100, + AXP717_CLDO4_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO1_OUTPUT_CONTROL, BIT(3)), + AXP_DESC(AXP717, CPUSLDO, "cpusldo", "vin1", 500, 1400, 50, + AXP717_CPUSLDO_CONTROL, AXP717_LDO_V_OUT_MASK, + AXP717_LDO1_OUTPUT_CONTROL, BIT(4)), +}; + /* DCDC ranges shared with AXP813 */ static const struct linear_range axp803_dcdc234_ranges[] = { REGULATOR_LINEAR_RANGE(500000, @@ -1253,6 +1332,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) step = 150; break; case AXP313A_ID: + case AXP717_ID: case AXP15060_ID: /* The DCDC PWM frequency seems to be fixed to 3 MHz. */ if (dcdcfreq != 0) { @@ -1479,6 +1559,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev) regulators = axp313a_regulators; nregulators = AXP313A_REG_ID_MAX; break; + case AXP717_ID: + regulators = axp717_regulators; + nregulators = AXP717_REG_ID_MAX; + break; case AXP803_ID: regulators = axp803_regulators; nregulators = AXP803_REG_ID_MAX; diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index fd7a41a69526..8c0a33a2e9ce 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -466,6 +466,27 @@ enum { AXP313A_REG_ID_MAX, }; +enum { + AXP717_DCDC1 = 0, + AXP717_DCDC2, + AXP717_DCDC3, + AXP717_DCDC4, + AXP717_ALDO1, + AXP717_ALDO2, + AXP717_ALDO3, + AXP717_ALDO4, + AXP717_BLDO1, + AXP717_BLDO2, + AXP717_BLDO3, + AXP717_BLDO4, + AXP717_CLDO1, + AXP717_CLDO2, + AXP717_CLDO3, + AXP717_CLDO4, + AXP717_CPUSLDO, + AXP717_REG_ID_MAX, +}; + enum { AXP806_DCDCA = 0, AXP806_DCDCB, -- cgit From e6c6fea5c314d90dbfca4983f2ea46388aab3bb0 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 5 Mar 2024 17:22:34 +0100 Subject: backlight: Match backlight device against struct fb_info.bl_dev Framebuffer drivers for devices with dedicated backlight are supposed to set struct fb_info.bl_dev to the backlight's respective device. Use the value to match backlight and framebuffer in the backlight core code. The code first tests against struct backlight_ops.check_ops. If this test succeeds, it performs the test against fbdev. So backlight drivers can override the later test as before. Fbdev's backlight support depends on CONFIG_FB_BACKLIGHT. To avoid ifdef in the code, the new helper fb_bl_device() returns the backlight device, or NULL if the config option has been disabled. The test in the backlight code will then do nothing. v4: * declare empty fb_bl_device() as static inline * export fb_bl_device() v3: * hide ifdef in fb_bl_device() (Lee) * no if-else blocks (Andy) Signed-off-by: Thomas Zimmermann Reviewed-by: Daniel Thompson Reviewed-by: Javier Martinez Canillas Link: https://lore.kernel.org/r/20240305162425.23845-2-tzimmermann@suse.de Signed-off-by: Lee Jones --- drivers/video/backlight/backlight.c | 8 ++++++-- drivers/video/fbdev/core/fb_backlight.c | 6 ++++++ include/linux/fb.h | 9 +++++++++ 3 files changed, 21 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 86e1cdc8e369..4f7973c6fcc7 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -98,7 +98,9 @@ static int fb_notifier_callback(struct notifier_block *self, { struct backlight_device *bd; struct fb_event *evdata = data; - int node = evdata->info->node; + struct fb_info *info = evdata->info; + struct backlight_device *fb_bd = fb_bl_device(info); + int node = info->node; int fb_blank = 0; /* If we aren't interested in this event, skip it immediately ... */ @@ -110,7 +112,9 @@ static int fb_notifier_callback(struct notifier_block *self, if (!bd->ops) goto out; - if (bd->ops->check_fb && !bd->ops->check_fb(bd, evdata->info)) + if (bd->ops->check_fb && !bd->ops->check_fb(bd, info)) + goto out; + if (fb_bd && fb_bd != bd) goto out; fb_blank = *(int *)evdata->data; diff --git a/drivers/video/fbdev/core/fb_backlight.c b/drivers/video/fbdev/core/fb_backlight.c index e2d3b3adc870..6fdaa9f81be9 100644 --- a/drivers/video/fbdev/core/fb_backlight.c +++ b/drivers/video/fbdev/core/fb_backlight.c @@ -30,4 +30,10 @@ void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max) mutex_unlock(&fb_info->bl_curve_mutex); } EXPORT_SYMBOL_GPL(fb_bl_default_curve); + +struct backlight_device *fb_bl_device(struct fb_info *info) +{ + return info->bl_dev; +} +EXPORT_SYMBOL(fb_bl_device); #endif diff --git a/include/linux/fb.h b/include/linux/fb.h index 0dd27364d56f..75c5f800467c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -738,6 +738,15 @@ extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev); extern void framebuffer_release(struct fb_info *info); extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max); +#if IS_ENABLED(CONFIG_FB_BACKLIGHT) +struct backlight_device *fb_bl_device(struct fb_info *info); +#else +static inline struct backlight_device *fb_bl_device(struct fb_info *info) +{ + return NULL; +} +#endif + /* fbmon.c */ #define FB_MAXTIMINGS 0 #define FB_VSYNCTIMINGS 1 -- cgit From 397b7493729288ac308c5f4ff3496eaeb1080293 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 5 Mar 2024 17:22:39 +0100 Subject: backlight: pwm-backlight: Remove struct backlight_ops.check_fb MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The internal check_fb callback from struct pwm_bl_data is never implemented. The driver's implementation of check_fb always returns true, which is the backlight core's default if no implementation has been set. So remove the code from the driver. v2: * reword commit message Signed-off-by: Thomas Zimmermann Cc: Uwe Kleine-König Acked-by: Uwe Kleine-König Reviewed-by: Daniel Thompson Reviewed-by: Javier Martinez Canillas Link: https://lore.kernel.org/r/20240305162425.23845-7-tzimmermann@suse.de Signed-off-by: Lee Jones --- drivers/video/backlight/pwm_bl.c | 12 ------------ include/linux/pwm_backlight.h | 1 - 2 files changed, 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index ffcebf6aa76a..61d30bc98eea 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -34,7 +33,6 @@ struct pwm_bl_data { int brightness); void (*notify_after)(struct device *, int brightness); - int (*check_fb)(struct device *, struct fb_info *); void (*exit)(struct device *); }; @@ -129,17 +127,8 @@ static int pwm_backlight_update_status(struct backlight_device *bl) return 0; } -static int pwm_backlight_check_fb(struct backlight_device *bl, - struct fb_info *info) -{ - struct pwm_bl_data *pb = bl_get_data(bl); - - return !pb->check_fb || pb->check_fb(pb->dev, info); -} - static const struct backlight_ops pwm_backlight_ops = { .update_status = pwm_backlight_update_status, - .check_fb = pwm_backlight_check_fb, }; #ifdef CONFIG_OF @@ -482,7 +471,6 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->notify = data->notify; pb->notify_after = data->notify_after; - pb->check_fb = data->check_fb; pb->exit = data->exit; pb->dev = &pdev->dev; pb->enabled = false; diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h index cdd2ac366bc7..0bf80e98d5b4 100644 --- a/include/linux/pwm_backlight.h +++ b/include/linux/pwm_backlight.h @@ -19,7 +19,6 @@ struct platform_pwm_backlight_data { int (*notify)(struct device *dev, int brightness); void (*notify_after)(struct device *dev, int brightness); void (*exit)(struct device *dev); - int (*check_fb)(struct device *dev, struct fb_info *info); }; #endif -- cgit From 0a4be7263749945a3882f7a0e2e5b1c45c31064e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 5 Mar 2024 17:22:43 +0100 Subject: backlight: Add controls_device callback to struct backlight_ops Replace check_fb with controls_device in struct backlight_ops. The new callback interface takes a Linux device instead of a framebuffer. Resolves one of the dependencies of backlight.h on fb.h. The few drivers that had custom implementations of check_fb can easily use the framebuffer's Linux device instead. Update them accordingly. Signed-off-by: Thomas Zimmermann Reviewed-by: Daniel Thompson Reviewed-by: Javier Martinez Canillas Link: https://lore.kernel.org/r/20240305162425.23845-11-tzimmermann@suse.de Signed-off-by: Lee Jones --- drivers/video/backlight/backlight.c | 2 +- drivers/video/backlight/bd6107.c | 12 ++++++------ drivers/video/backlight/gpio_backlight.c | 12 ++++++------ drivers/video/backlight/lv5207lp.c | 12 ++++++------ include/linux/backlight.h | 16 ++++++++-------- 5 files changed, 27 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 4f7973c6fcc7..2bd4299206ae 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -112,7 +112,7 @@ static int fb_notifier_callback(struct notifier_block *self, if (!bd->ops) goto out; - if (bd->ops->check_fb && !bd->ops->check_fb(bd, info)) + if (bd->ops->controls_device && !bd->ops->controls_device(bd, info->device)) goto out; if (fb_bd && fb_bd != bd) goto out; diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c index b1e7126380ef..6be2c67ba85c 100644 --- a/drivers/video/backlight/bd6107.c +++ b/drivers/video/backlight/bd6107.c @@ -99,18 +99,18 @@ static int bd6107_backlight_update_status(struct backlight_device *backlight) return 0; } -static int bd6107_backlight_check_fb(struct backlight_device *backlight, - struct fb_info *info) +static bool bd6107_backlight_controls_device(struct backlight_device *backlight, + struct device *display_dev) { struct bd6107 *bd = bl_get_data(backlight); - return !bd->pdata->dev || bd->pdata->dev == info->device; + return !bd->pdata->dev || bd->pdata->dev == display_dev; } static const struct backlight_ops bd6107_backlight_ops = { - .options = BL_CORE_SUSPENDRESUME, - .update_status = bd6107_backlight_update_status, - .check_fb = bd6107_backlight_check_fb, + .options = BL_CORE_SUSPENDRESUME, + .update_status = bd6107_backlight_update_status, + .controls_device = bd6107_backlight_controls_device, }; static int bd6107_probe(struct i2c_client *client) diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c index e0c8c2a3f5dc..4476c317ce29 100644 --- a/drivers/video/backlight/gpio_backlight.c +++ b/drivers/video/backlight/gpio_backlight.c @@ -30,18 +30,18 @@ static int gpio_backlight_update_status(struct backlight_device *bl) return 0; } -static int gpio_backlight_check_fb(struct backlight_device *bl, - struct fb_info *info) +static bool gpio_backlight_controls_device(struct backlight_device *bl, + struct device *display_dev) { struct gpio_backlight *gbl = bl_get_data(bl); - return !gbl->dev || gbl->dev == info->device; + return !gbl->dev || gbl->dev == display_dev; } static const struct backlight_ops gpio_backlight_ops = { - .options = BL_CORE_SUSPENDRESUME, - .update_status = gpio_backlight_update_status, - .check_fb = gpio_backlight_check_fb, + .options = BL_CORE_SUSPENDRESUME, + .update_status = gpio_backlight_update_status, + .controls_device = gpio_backlight_controls_device, }; static int gpio_backlight_probe(struct platform_device *pdev) diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c index 1f1d06b4e119..0cf00fee0f60 100644 --- a/drivers/video/backlight/lv5207lp.c +++ b/drivers/video/backlight/lv5207lp.c @@ -62,18 +62,18 @@ static int lv5207lp_backlight_update_status(struct backlight_device *backlight) return 0; } -static int lv5207lp_backlight_check_fb(struct backlight_device *backlight, - struct fb_info *info) +static bool lv5207lp_backlight_controls_device(struct backlight_device *backlight, + struct device *display_dev) { struct lv5207lp *lv = bl_get_data(backlight); - return !lv->pdata->dev || lv->pdata->dev == info->device; + return !lv->pdata->dev || lv->pdata->dev == display_dev; } static const struct backlight_ops lv5207lp_backlight_ops = { - .options = BL_CORE_SUSPENDRESUME, - .update_status = lv5207lp_backlight_update_status, - .check_fb = lv5207lp_backlight_check_fb, + .options = BL_CORE_SUSPENDRESUME, + .update_status = lv5207lp_backlight_update_status, + .controls_device = lv5207lp_backlight_controls_device, }; static int lv5207lp_probe(struct i2c_client *client) diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 614653e07e3a..2db4c70053c4 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -13,6 +13,7 @@ #include #include #include +#include /** * enum backlight_update_reason - what method was used to update backlight @@ -110,7 +111,6 @@ enum backlight_scale { }; struct backlight_device; -struct fb_info; /** * struct backlight_ops - backlight operations @@ -160,18 +160,18 @@ struct backlight_ops { int (*get_brightness)(struct backlight_device *); /** - * @check_fb: Check the framebuffer device. + * @controls_device: Check against the display device * - * Check if given framebuffer device is the one bound to this backlight. - * This operation is optional and if not implemented it is assumed that the - * fbdev is always the one bound to the backlight. + * Check if the backlight controls the given display device. This + * operation is optional and if not implemented it is assumed that + * the display is always the one controlled by the backlight. * * RETURNS: * - * If info is NULL or the info matches the fbdev bound to the backlight return true. - * If info does not match the fbdev bound to the backlight return false. + * If display_dev is NULL or display_dev matches the device controlled by + * the backlight, return true. Otherwise return false. */ - int (*check_fb)(struct backlight_device *bd, struct fb_info *info); + bool (*controls_device)(struct backlight_device *bd, struct device *display_dev); }; /** -- cgit From b7ad4c67ed945524ab38bd61676e684eff84fc2a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 19 Mar 2024 10:37:21 +0100 Subject: backlight: omap1: Remove unused struct omap_backlight_config.set_power The callback set_power in struct omap_backlight_config is not implemented anywhere. Remove it from the structure and driver. Signed-off-by: Thomas Zimmermann Reviewed-by: Sam Ravnborg Reviewed-by: Daniel Thompson Reviewed-by: Dan Carpenter Link: https://lore.kernel.org/r/20240319093915.31778-3-tzimmermann@suse.de Signed-off-by: Lee Jones --- drivers/video/backlight/omap1_bl.c | 3 --- include/linux/platform_data/omap1_bl.h | 1 - 2 files changed, 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c index 69a49384b3de..84d148f38595 100644 --- a/drivers/video/backlight/omap1_bl.c +++ b/drivers/video/backlight/omap1_bl.c @@ -39,9 +39,6 @@ static inline void omapbl_send_enable(int enable) static void omapbl_blank(struct omap_backlight *bl, int mode) { - if (bl->pdata->set_power) - bl->pdata->set_power(bl->dev, mode); - switch (mode) { case FB_BLANK_NORMAL: case FB_BLANK_VSYNC_SUSPEND: diff --git a/include/linux/platform_data/omap1_bl.h b/include/linux/platform_data/omap1_bl.h index 5e8b17d77a5f..3d0bab31a0a9 100644 --- a/include/linux/platform_data/omap1_bl.h +++ b/include/linux/platform_data/omap1_bl.h @@ -6,7 +6,6 @@ struct omap_backlight_config { int default_intensity; - int (*set_power)(struct device *dev, int state); }; #endif -- cgit From 4551978bb50a8d59b49629deebacd73478a8b1e1 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 19 Mar 2024 10:37:25 +0100 Subject: backlight: Remove fb_blank from struct backlight_properties Remove the field fb_blank from struct backlight_properties and remove all code that still sets or reads it. Backlight blank status is now tracked exclusively in struct backlight_properties.state. The core backlight code keeps the fb_blank and state fields in sync, but doesn't do anything else with fb_blank. Several drivers initialize fb_blank to FB_BLANK_UNBLANK to enable the backlight. This is already the default for the state field. So we can delete the fb_blank code from core and drivers and rely on the state field. Signed-off-by: Thomas Zimmermann Cc: Flavio Suligoi Cc: Nicolas Ferre Cc: Alexandre Belloni Cc: Claudiu Beznea Tested-by: Flavio Suligoi Reviewed-by: Daniel Thompson Reviewed-by: Dan Carpenter Link: https://lore.kernel.org/r/20240319093915.31778-7-tzimmermann@suse.de Signed-off-by: Lee Jones --- drivers/video/backlight/backlight.c | 2 -- drivers/video/backlight/mp3309c.c | 1 - drivers/video/backlight/omap1_bl.c | 1 - drivers/video/fbdev/atmel_lcdfb.c | 1 - .../fbdev/omap2/omapfb/displays/panel-dsi-cm.c | 1 - .../omap2/omapfb/displays/panel-sony-acx565akm.c | 1 - include/linux/backlight.h | 25 +--------------------- 7 files changed, 1 insertion(+), 31 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 86e1cdc8e369..691f1f3030e9 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -118,14 +118,12 @@ static int fb_notifier_callback(struct notifier_block *self, bd->fb_bl_on[node] = true; if (!bd->use_count++) { bd->props.state &= ~BL_CORE_FBBLANK; - bd->props.fb_blank = FB_BLANK_UNBLANK; backlight_update_status(bd); } } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) { bd->fb_bl_on[node] = false; if (!(--bd->use_count)) { bd->props.state |= BL_CORE_FBBLANK; - bd->props.fb_blank = fb_blank; backlight_update_status(bd); } } diff --git a/drivers/video/backlight/mp3309c.c b/drivers/video/backlight/mp3309c.c index c80a1481e742..7d73e85b8c14 100644 --- a/drivers/video/backlight/mp3309c.c +++ b/drivers/video/backlight/mp3309c.c @@ -363,7 +363,6 @@ static int mp3309c_probe(struct i2c_client *client) props.scale = BACKLIGHT_SCALE_LINEAR; props.type = BACKLIGHT_RAW; props.power = FB_BLANK_UNBLANK; - props.fb_blank = FB_BLANK_UNBLANK; chip->bl = devm_backlight_device_register(dev, "mp3309c", dev, chip, &mp3309c_bl_ops, &props); if (IS_ERR(chip->bl)) diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c index 636b390f78f0..e461e19231ae 100644 --- a/drivers/video/backlight/omap1_bl.c +++ b/drivers/video/backlight/omap1_bl.c @@ -139,7 +139,6 @@ static int omapbl_probe(struct platform_device *pdev) omap_cfg_reg(PWL); /* Conflicts with UART3 */ - dev->props.fb_blank = FB_BLANK_UNBLANK; dev->props.brightness = pdata->default_intensity; omapbl_update_status(dev); diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c index 9e391e5eaf9d..5574fb0361ee 100644 --- a/drivers/video/fbdev/atmel_lcdfb.c +++ b/drivers/video/fbdev/atmel_lcdfb.c @@ -153,7 +153,6 @@ static void init_backlight(struct atmel_lcdfb_info *sinfo) sinfo->backlight = bl; bl->props.power = FB_BLANK_UNBLANK; - bl->props.fb_blank = FB_BLANK_UNBLANK; bl->props.brightness = atmel_bl_get_brightness(bl); } diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index 47683a6c0076..274bdf7b3b45 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -1215,7 +1215,6 @@ static int dsicm_probe(struct platform_device *pdev) ddata->bldev = bldev; - bldev->props.fb_blank = FB_BLANK_UNBLANK; bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = 255; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c index 9d3ce234a787..71d2e015960c 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c @@ -753,7 +753,6 @@ static int acx565akm_probe(struct spi_device *spi) } memset(&props, 0, sizeof(props)); - props.fb_blank = FB_BLANK_UNBLANK; props.power = FB_BLANK_UNBLANK; props.type = BACKLIGHT_RAW; diff --git a/include/linux/backlight.h b/include/linux/backlight.h index 614653e07e3a..31600b144d79 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -218,25 +218,6 @@ struct backlight_properties { */ int power; - /** - * @fb_blank: The power state from the FBIOBLANK ioctl. - * - * When the FBIOBLANK ioctl is called @fb_blank is set to the - * blank parameter and the update_status() operation is called. - * - * When the backlight device is enabled @fb_blank is set - * to FB_BLANK_UNBLANK. When the backlight device is disabled - * @fb_blank is set to FB_BLANK_POWERDOWN. - * - * Backlight drivers should avoid using this property. It has been - * replaced by state & BL_CORE_FBLANK (although most drivers should - * use backlight_is_blank() as the preferred means to get the blank - * state). - * - * fb_blank is deprecated and will be removed. - */ - int fb_blank; - /** * @type: The type of backlight supported. * @@ -366,7 +347,6 @@ static inline int backlight_enable(struct backlight_device *bd) return 0; bd->props.power = FB_BLANK_UNBLANK; - bd->props.fb_blank = FB_BLANK_UNBLANK; bd->props.state &= ~BL_CORE_FBBLANK; return backlight_update_status(bd); @@ -382,7 +362,6 @@ static inline int backlight_disable(struct backlight_device *bd) return 0; bd->props.power = FB_BLANK_POWERDOWN; - bd->props.fb_blank = FB_BLANK_POWERDOWN; bd->props.state |= BL_CORE_FBBLANK; return backlight_update_status(bd); @@ -395,15 +374,13 @@ static inline int backlight_disable(struct backlight_device *bd) * Display is expected to be blank if any of these is true:: * * 1) if power in not UNBLANK - * 2) if fb_blank is not UNBLANK - * 3) if state indicate BLANK or SUSPENDED + * 2) if state indicate BLANK or SUSPENDED * * Returns true if display is expected to be blank, false otherwise. */ static inline bool backlight_is_blank(const struct backlight_device *bd) { return bd->props.power != FB_BLANK_UNBLANK || - bd->props.fb_blank != FB_BLANK_UNBLANK || bd->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK); } -- cgit From 822c91e72eac568ed8d83765634f00decb45666c Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 4 Mar 2024 21:57:30 +0100 Subject: leds: trigger: Store brightness set by led_trigger_event() If a simple trigger is assigned to a LED, then the LED may be off until the next led_trigger_event() call. This may be an issue for simple triggers with rare led_trigger_event() calls, e.g. power supply charging indicators (drivers/power/supply/power_supply_leds.c). Therefore persist the brightness value of the last led_trigger_event() call and use this value if the trigger is assigned to a LED. In addition add a getter for the trigger brightness value. Signed-off-by: Heiner Kallweit Reviewed-by: Takashi Iwai Link: https://lore.kernel.org/r/b1358b25-3f30-458d-8240-5705ae007a8a@gmail.com Signed-off-by: Lee Jones --- drivers/leds/led-triggers.c | 6 ++++-- include/linux/leds.h | 15 +++++++++++++++ 2 files changed, 19 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 0f5ac30053ad..b1b323b19301 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -194,11 +194,11 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig) spin_unlock(&trig->leddev_list_lock); led_cdev->trigger = trig; + ret = 0; if (trig->activate) ret = trig->activate(led_cdev); else - ret = 0; - + led_set_brightness(led_cdev, trig->brightness); if (ret) goto err_activate; @@ -387,6 +387,8 @@ void led_trigger_event(struct led_trigger *trig, if (!trig) return; + trig->brightness = brightness; + rcu_read_lock(); list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) led_set_brightness(led_cdev, brightness); diff --git a/include/linux/leds.h b/include/linux/leds.h index db6b114bb3d9..7964ee38b2a3 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -455,6 +455,9 @@ struct led_trigger { int (*activate)(struct led_classdev *led_cdev); void (*deactivate)(struct led_classdev *led_cdev); + /* Brightness set by led_trigger_event */ + enum led_brightness brightness; + /* LED-private triggers have this set */ struct led_hw_trigger_type *trigger_type; @@ -508,6 +511,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) return led_cdev->trigger_data; } +static inline enum led_brightness +led_trigger_get_brightness(const struct led_trigger *trigger) +{ + return trigger ? trigger->brightness : LED_OFF; +} + #define module_led_trigger(__led_trigger) \ module_driver(__led_trigger, led_trigger_register, \ led_trigger_unregister) @@ -544,6 +553,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev) return NULL; } +static inline enum led_brightness +led_trigger_get_brightness(const struct led_trigger *trigger) +{ + return LED_OFF; +} + #endif /* CONFIG_LEDS_TRIGGERS */ /* Trigger specific enum */ -- cgit From ab2ab9e69ef9734b875ce6d43fe3f9f90135daae Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Mon, 4 Mar 2024 21:59:47 +0100 Subject: leds: trigger: audio: Remove this trigger Now that the audio trigger is fully integrated in sound/core/control_led.c, we can remove it here. Signed-off-by: Heiner Kallweit Reviewed-by: Takashi Iwai Link: https://lore.kernel.org/r/1e339779-6d04-4392-8ea2-5592c0fd1aa2@gmail.com Signed-off-by: Lee Jones --- arch/mips/configs/ci20_defconfig | 1 - drivers/leds/trigger/ledtrig-audio.c | 67 ------------------------------------ include/linux/leds.h | 14 -------- 3 files changed, 82 deletions(-) delete mode 100644 drivers/leds/trigger/ledtrig-audio.c (limited to 'include/linux') diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index cdf2a782dee1..7827b2b392f6 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -152,7 +152,6 @@ CONFIG_LEDS_TRIGGER_CAMERA=m CONFIG_LEDS_TRIGGER_PANIC=y CONFIG_LEDS_TRIGGER_NETDEV=y CONFIG_LEDS_TRIGGER_PATTERN=y -CONFIG_LEDS_TRIGGER_AUDIO=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_JZ4740=y CONFIG_DMADEVICES=y diff --git a/drivers/leds/trigger/ledtrig-audio.c b/drivers/leds/trigger/ledtrig-audio.c deleted file mode 100644 index 2ecd4b760fc3..000000000000 --- a/drivers/leds/trigger/ledtrig-audio.c +++ /dev/null @@ -1,67 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -// -// Audio Mute LED trigger -// - -#include -#include -#include -#include "../leds.h" - -static enum led_brightness audio_state[NUM_AUDIO_LEDS]; - -static int ledtrig_audio_mute_activate(struct led_classdev *led_cdev) -{ - led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MUTE]); - return 0; -} - -static int ledtrig_audio_micmute_activate(struct led_classdev *led_cdev) -{ - led_set_brightness_nosleep(led_cdev, audio_state[LED_AUDIO_MICMUTE]); - return 0; -} - -static struct led_trigger ledtrig_audio[NUM_AUDIO_LEDS] = { - [LED_AUDIO_MUTE] = { - .name = "audio-mute", - .activate = ledtrig_audio_mute_activate, - }, - [LED_AUDIO_MICMUTE] = { - .name = "audio-micmute", - .activate = ledtrig_audio_micmute_activate, - }, -}; - -enum led_brightness ledtrig_audio_get(enum led_audio type) -{ - return audio_state[type]; -} -EXPORT_SYMBOL_GPL(ledtrig_audio_get); - -void ledtrig_audio_set(enum led_audio type, enum led_brightness state) -{ - audio_state[type] = state; - led_trigger_event(&ledtrig_audio[type], state); -} -EXPORT_SYMBOL_GPL(ledtrig_audio_set); - -static int __init ledtrig_audio_init(void) -{ - led_trigger_register(&ledtrig_audio[LED_AUDIO_MUTE]); - led_trigger_register(&ledtrig_audio[LED_AUDIO_MICMUTE]); - return 0; -} -module_init(ledtrig_audio_init); - -static void __exit ledtrig_audio_exit(void) -{ - led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MUTE]); - led_trigger_unregister(&ledtrig_audio[LED_AUDIO_MICMUTE]); -} -module_exit(ledtrig_audio_exit); - -MODULE_DESCRIPTION("LED trigger for audio mute control"); -MODULE_LICENSE("GPL v2"); -MODULE_ALIAS("ledtrig:audio-mute"); -MODULE_ALIAS("ledtrig:audio-micmute"); diff --git a/include/linux/leds.h b/include/linux/leds.h index 7964ee38b2a3..6300313c46b7 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -705,18 +705,4 @@ enum led_audio { NUM_AUDIO_LEDS }; -#if IS_ENABLED(CONFIG_LEDS_TRIGGER_AUDIO) -enum led_brightness ledtrig_audio_get(enum led_audio type); -void ledtrig_audio_set(enum led_audio type, enum led_brightness state); -#else -static inline enum led_brightness ledtrig_audio_get(enum led_audio type) -{ - return LED_OFF; -} -static inline void ledtrig_audio_set(enum led_audio type, - enum led_brightness state) -{ -} -#endif - #endif /* __LINUX_LEDS_H_INCLUDED */ -- cgit From 67bb37c05a6b56e0e1f804706145a52f655af3f1 Mon Sep 17 00:00:00 2001 From: Louis Chauvet Date: Wed, 27 Mar 2024 09:43:36 +0100 Subject: spi: spi-omap2-mcspi.c: revert "Toggle CS after each word" Commit 5cbc7ca987fb ("spi: spi-omap2-mcspi.c: Toggle CS after each word") introduced the toggling of CS after each word for the omap2-mcspi controller. The implementation is not respectful of the actual spi_message content, so the CS can be raised after each word even if the transfer structure asks to keep the CS active for the whole operation. As it is not used anyway in the current Linux tree, it can be safely removed. Signed-off-by: Louis Chauvet Link: https://msgid.link/r/20240327-spi-omap2-mcspi-multi-mode-v3-1-c4ac329dd5a2@bootlin.com Signed-off-by: Mark Brown --- drivers/spi/spi-omap2-mcspi.c | 15 --------------- include/linux/platform_data/spi-omap2-mcspi.h | 3 --- 2 files changed, 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index ddf1c684bcc7..601acec37eca 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -1175,13 +1175,6 @@ static int omap2_mcspi_transfer_one(struct spi_controller *ctlr, t->bits_per_word == spi->bits_per_word) par_override = 0; } - if (cd && cd->cs_per_word) { - chconf = mcspi->ctx.modulctrl; - chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE; - mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf); - mcspi->ctx.modulctrl = - mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); - } chconf = mcspi_cached_chconf0(spi); chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; @@ -1240,14 +1233,6 @@ out: status = omap2_mcspi_setup_transfer(spi, NULL); } - if (cd && cd->cs_per_word) { - chconf = mcspi->ctx.modulctrl; - chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE; - mcspi_write_reg(ctlr, OMAP2_MCSPI_MODULCTRL, chconf); - mcspi->ctx.modulctrl = - mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL); - } - omap2_mcspi_set_enable(spi, 0); if (spi_get_csgpiod(spi, 0)) diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h index 3b400b1919a9..9e3c15b4ac91 100644 --- a/include/linux/platform_data/spi-omap2-mcspi.h +++ b/include/linux/platform_data/spi-omap2-mcspi.h @@ -16,9 +16,6 @@ struct omap2_mcspi_platform_config { struct omap2_mcspi_device_config { unsigned turbo_mode:1; - - /* toggle chip select after every word */ - unsigned cs_per_word:1; }; #endif -- cgit From 368d67dab4cc4a3ffd39fbd062b2f5796cdbb37b Mon Sep 17 00:00:00 2001 From: Abel Vesa Date: Sun, 24 Mar 2024 20:50:17 +0200 Subject: phy: Add Embedded DisplayPort and DisplayPort submodes In some cases, a DP PHY needs to be configured to work in eDP mode. So add submodes for both DP and eDP so they can be used by the controllers for specifying the mode the PHY should be configured in. Signed-off-by: Abel Vesa Reviewed-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20240324-x1e80100-phy-edp-compatible-refactor-v5-1-a0db5f3150bc@linaro.org Signed-off-by: Vinod Koul --- include/linux/phy/phy-dp.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/phy/phy-dp.h b/include/linux/phy/phy-dp.h index 18cad23642cd..9cce5766bc0b 100644 --- a/include/linux/phy/phy-dp.h +++ b/include/linux/phy/phy-dp.h @@ -8,6 +8,9 @@ #include +#define PHY_SUBMODE_DP 0 +#define PHY_SUBMODE_EDP 1 + /** * struct phy_configure_opts_dp - DisplayPort PHY configuration set * -- cgit From 6e9b01909a811555ff3326cf80a5847169c57806 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 26 Mar 2024 21:02:12 -0700 Subject: net: remove gfp_mask from napi_alloc_skb() __napi_alloc_skb() is napi_alloc_skb() with the added flexibility of choosing gfp_mask. This is a NAPI function, so GFP_ATOMIC is implied. The only practical choice the caller has is whether to set __GFP_NOWARN. But that's a false choice, too, allocation failures in atomic context will happen, and printing warnings in logs, effectively for a packet drop, is both too much and very likely non-actionable. This leads me to a conclusion that most uses of napi_alloc_skb() are simply misguided, and should use __GFP_NOWARN in the first place. We also have a "standard" way of reporting allocation failures via the queue stat API (qstats::rx-alloc-fail). The direct motivation for this patch is that one of the drivers used at Meta calls napi_alloc_skb() (so prior to this patch without __GFP_NOWARN), and the resulting OOM warning is the top networking warning in our fleet. Reviewed-by: Alexander Lobakin Reviewed-by: Simon Horman Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20240327040213.3153864-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- Documentation/mm/page_frags.rst | 2 +- Documentation/translations/zh_CN/mm/page_frags.rst | 2 +- drivers/net/ethernet/intel/i40e/i40e_txrx.c | 4 +--- drivers/net/ethernet/intel/i40e/i40e_xsk.c | 3 +-- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 4 +--- drivers/net/ethernet/intel/ice/ice_txrx.c | 3 +-- drivers/net/ethernet/intel/ice/ice_xsk.c | 3 +-- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 5 ++--- drivers/net/ethernet/intel/igc/igc_main.c | 3 +-- drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 3 +-- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 5 ++--- include/linux/skbuff.h | 8 +------- net/core/skbuff.c | 9 ++++----- 13 files changed, 18 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/Documentation/mm/page_frags.rst b/Documentation/mm/page_frags.rst index a81617e688a8..503ca6cdb804 100644 --- a/Documentation/mm/page_frags.rst +++ b/Documentation/mm/page_frags.rst @@ -25,7 +25,7 @@ to be disabled when executing the fragment allocation. The network stack uses two separate caches per CPU to handle fragment allocation. The netdev_alloc_cache is used by callers making use of the netdev_alloc_frag and __netdev_alloc_skb calls. The napi_alloc_cache is -used by callers of the __napi_alloc_frag and __napi_alloc_skb calls. The +used by callers of the __napi_alloc_frag and napi_alloc_skb calls. The main difference between these two calls is the context in which they may be called. The "netdev" prefixed functions are usable in any context as these functions will disable interrupts, while the "napi" prefixed functions are diff --git a/Documentation/translations/zh_CN/mm/page_frags.rst b/Documentation/translations/zh_CN/mm/page_frags.rst index 20bd3fafdc8c..a5b22486a913 100644 --- a/Documentation/translations/zh_CN/mm/page_frags.rst +++ b/Documentation/translations/zh_CN/mm/page_frags.rst @@ -25,7 +25,7 @@ sk_buff->head使用,或者用于skb_shared_info的 “frags” 部分。 网络堆栈在每个CPU使用两个独立的缓存来处理碎片分配。netdev_alloc_cache被使用 netdev_alloc_frag和__netdev_alloc_skb调用的调用者使用。napi_alloc_cache -被调用__napi_alloc_frag和__napi_alloc_skb的调用者使用。这两个调用的主要区别是 +被调用__napi_alloc_frag和napi_alloc_skb的调用者使用。这两个调用的主要区别是 它们可能被调用的环境。“netdev” 前缀的函数可以在任何上下文中使用,因为这些函数 将禁用中断,而 ”napi“ 前缀的函数只可以在softirq上下文中使用。 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0d7177083708..ac2fcc5ac595 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2144,9 +2144,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, */ /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 11500003af0d..a85b425794df 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -301,8 +301,7 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) goto out; diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index b71484c87a84..32bb604a1382 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1334,9 +1334,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, net_prefetch(va); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - IAVF_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, IAVF_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index 97d41d6ebf1f..8bb743f78fcb 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1051,8 +1051,7 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 1857220d27fe..aa81d1162b81 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -555,8 +555,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) } net_prefetch(xdp->data_meta); - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 6dd7a66bb897..f940f650cd78 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -3005,8 +3005,7 @@ struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, /* prefetch first cache line of first page */ net_prefetch(va); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE, - GFP_ATOMIC); + skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE); if (unlikely(!skb)) { idpf_rx_put_page(rx_buf); @@ -3060,7 +3059,7 @@ static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, struct sk_buff *skb; /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC); + skb = napi_alloc_skb(&rxq->q_vector->napi, size); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 35ad40a803cb..0cd923c8ac9b 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2712,8 +2712,7 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring, net_prefetch(xdp->data_meta); - skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index d34d715c59eb..397cb773fabb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -220,8 +220,7 @@ static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring, net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 24cd80490d19..bcdde68a099a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -5109,9 +5109,8 @@ static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch, unsigned int datasize = xdp->data_end - xdp->data; struct sk_buff *skb; - skb = __napi_alloc_skb(&ch->rxtx_napi, - xdp->data_end - xdp->data_hard_start, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&ch->rxtx_napi, + xdp->data_end - xdp->data_hard_start); if (unlikely(!skb)) return NULL; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 517e546a120a..b7f1ecdaec38 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3350,13 +3350,7 @@ static inline void *napi_alloc_frag_align(unsigned int fragsz, return __napi_alloc_frag_align(fragsz, -align); } -struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, - unsigned int length, gfp_t gfp_mask); -static inline struct sk_buff *napi_alloc_skb(struct napi_struct *napi, - unsigned int length) -{ - return __napi_alloc_skb(napi, length, GFP_ATOMIC); -} +struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int length); void napi_consume_skb(struct sk_buff *skb, int budget); void napi_skb_free_stolen_head(struct sk_buff *skb); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 17617c29be2d..a1be84be5d35 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -775,10 +775,9 @@ skb_fail: EXPORT_SYMBOL(__netdev_alloc_skb); /** - * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance + * napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance * @napi: napi instance this buffer was allocated for * @len: length to allocate - * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages * * Allocate a new sk_buff for use in NAPI receive. This buffer will * attempt to allocate the head from a special reserved region used @@ -787,9 +786,9 @@ EXPORT_SYMBOL(__netdev_alloc_skb); * * %NULL is returned if there is no free memory. */ -struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, - gfp_t gfp_mask) +struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) { + gfp_t gfp_mask = GFP_ATOMIC | __GFP_NOWARN; struct napi_alloc_cache *nc; struct sk_buff *skb; bool pfmemalloc; @@ -860,7 +859,7 @@ skb_success: skb_fail: return skb; } -EXPORT_SYMBOL(__napi_alloc_skb); +EXPORT_SYMBOL(napi_alloc_skb); void skb_add_rx_frag_netmem(struct sk_buff *skb, int i, netmem_ref netmem, int off, int size, unsigned int truesize) -- cgit From ca7e324e8ad385a2da15049953c04ea7310687f7 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 15:22:39 +0100 Subject: compiler_types: add Endianness-dependent __counted_by_{le,be} Some structures contain flexible arrays at the end and the counter for them, but the counter has explicit Endianness and thus __counted_by() can't be used directly. To increase test coverage for potential problems without breaking anything, introduce __counted_by_{le,be}() defined depending on platform's Endianness to either __counted_by() when applicable or noop otherwise. Maybe it would be a good idea to introduce such attributes on compiler level if possible, but for now let's stop on what we have. Acked-by: Kees Cook Acked-by: Gustavo A. R. Silva Signed-off-by: Alexander Lobakin Reviewed-by: Przemek Kitszel Link: https://lore.kernel.org/r/20240327142241.1745989-2-aleksander.lobakin@intel.com Signed-off-by: Jakub Kicinski --- Documentation/conf.py | 2 ++ include/linux/compiler_types.h | 11 +++++++++++ scripts/kernel-doc | 1 + 3 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/Documentation/conf.py b/Documentation/conf.py index d148f3e8dd57..0c2205d536b3 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -75,6 +75,8 @@ if major >= 3: "__rcu", "__user", "__force", + "__counted_by_le", + "__counted_by_be", # include/linux/compiler_attributes.h: "__alias", diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 2abaa3a825a9..a29ba6ef1e27 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -282,6 +282,17 @@ struct ftrace_likely_data { #define __no_sanitize_or_inline __always_inline #endif +/* + * Apply __counted_by() when the Endianness matches to increase test coverage. + */ +#ifdef __LITTLE_ENDIAN +#define __counted_by_le(member) __counted_by(member) +#define __counted_by_be(member) +#else +#define __counted_by_le(member) +#define __counted_by_be(member) __counted_by(member) +#endif + /* Do not trap wrapping arithmetic within an annotated function. */ #ifdef CONFIG_UBSAN_SIGNED_WRAP # define __signed_wrap __attribute__((no_sanitize("signed-integer-overflow"))) diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 967f1abb0edb..1474e95dbe4f 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1143,6 +1143,7 @@ sub dump_struct($$) { $members =~ s/\s*$attribute/ /gi; $members =~ s/\s*__aligned\s*\([^;]*\)/ /gos; $members =~ s/\s*__counted_by\s*\([^;]*\)/ /gos; + $members =~ s/\s*__counted_by_(le|be)\s*\([^;]*\)/ /gos; $members =~ s/\s*__packed\s*/ /gos; $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos; $members =~ s/\s*____cacheline_aligned_in_smp/ /gos; -- cgit From 21d9ba5bc5516e1d8a90c4a26de90855a1e4fb59 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Tue, 26 Mar 2024 14:32:07 +0100 Subject: net: phylink: add PHY_F_RXC_ALWAYS_ON to PHY dev flags Some MAC controllers (e.g. stmmac) require their connected PHY to continuously provide a receive clock signal. This can cause issues in two cases: 1. The clock signal hasn't been started yet by the time the MAC driver initializes its hardware. This can make the initialization fail, as in the case of the rzn1 GMAC1 driver. 2. The clock signal is cut during a power saving event. By the time the MAC is brought back up, the clock signal is still not active since phylink_start hasn't been called yet. This brings us back to case 1. If a PHY driver reads this flag, it should ensure that the receive clock signal is started as soon as possible, and that it isn't brought down when the PHY goes into suspend. Signed-off-by: Russell King (Oracle) [rgantois: commit log] Signed-off-by: Romain Gantois Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20240326-rxc_bugfix-v6-1-24a74e5c761f@bootlin.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/phylink.c | 10 +++++++++- include/linux/phy.h | 1 + include/linux/phylink.h | 4 ++++ 3 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 503fd7c40523..2bb583543dea 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1923,6 +1923,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy, phy_interface_t interface) { + u32 flags = 0; + if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED || (pl->cfg_link_an_mode == MLO_AN_INBAND && phy_interface_mode_is_8023z(interface) && !pl->sfp_bus))) @@ -1931,7 +1933,10 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy, if (pl->phydev) return -EBUSY; - return phy_attach_direct(pl->netdev, phy, 0, interface); + if (pl->config->mac_requires_rxc) + flags |= PHY_F_RXC_ALWAYS_ON; + + return phy_attach_direct(pl->netdev, phy, flags, interface); } /** @@ -2034,6 +2039,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl, pl->link_config.interface = pl->link_interface; } + if (pl->config->mac_requires_rxc) + flags |= PHY_F_RXC_ALWAYS_ON; + ret = phy_attach_direct(pl->netdev, phy_dev, flags, pl->link_interface); phy_device_free(phy_dev); diff --git a/include/linux/phy.h b/include/linux/phy.h index 3f68b8239bb1..e6e83304558e 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -778,6 +778,7 @@ struct phy_device { /* Generic phy_device::dev_flags */ #define PHY_F_NO_IRQ 0x80000000 +#define PHY_F_RXC_ALWAYS_ON 0x40000000 static inline struct phy_device *to_phy_device(const struct device *dev) { diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 9a57deefcb07..fef8ae66b988 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -138,6 +138,9 @@ enum phylink_op_type { * @poll_fixed_state: if true, starts link_poll, * if MAC link is at %MLO_AN_FIXED mode. * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM. + * @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY. + * The PHY driver should start the clock signal as soon as + * possible and avoid stopping it during suspend events. * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. @@ -150,6 +153,7 @@ struct phylink_config { enum phylink_op_type type; bool poll_fixed_state; bool mac_managed_pm; + bool mac_requires_rxc; bool ovr_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); -- cgit From dceb393a0a8e82ff05b7c1f6c911d0779d856a7a Mon Sep 17 00:00:00 2001 From: Romain Gantois Date: Tue, 26 Mar 2024 14:32:08 +0100 Subject: net: phylink: add rxc_always_on flag to phylink_pcs Some MAC drivers (e.g. stmmac) require a continuous receive clock signal to be generated by a PCS that is handled by a standalone PCS driver. Such a PCS driver does not have access to a PHY device, thus cannot check the PHY_F_RXC_ALWAYS_ON flag. They cannot check max_requires_rxc in the phylink config either, since it is a private member. Therefore, a new flag is needed to signal to the PCS that it should keep the RX clock signal up at all times. Co-developed-by: Russell King (Oracle) Signed-off-by: Russell King (Oracle) Signed-off-by: Romain Gantois Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20240326-rxc_bugfix-v6-2-24a74e5c761f@bootlin.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/phylink.c | 15 +++++++++++++++ include/linux/phylink.h | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 2bb583543dea..84a97088dfc6 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1042,6 +1042,21 @@ static void phylink_pcs_poll_start(struct phylink *pl) mod_timer(&pl->link_poll, jiffies + HZ); } +int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs) +{ + int ret = 0; + + /* Signal to PCS driver that MAC requires RX clock for init */ + if (pl->config->mac_requires_rxc) + pcs->rxc_always_on = true; + + if (pcs->ops->pcs_pre_init) + ret = pcs->ops->pcs_pre_init(pcs); + + return ret; +} +EXPORT_SYMBOL_GPL(phylink_pcs_pre_init); + static void phylink_mac_config(struct phylink *pl, const struct phylink_link_state *state) { diff --git a/include/linux/phylink.h b/include/linux/phylink.h index fef8ae66b988..5ea6b2ad2396 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -396,6 +396,10 @@ struct phylink_pcs_ops; * @phylink: pointer to &struct phylink_config * @neg_mode: provide PCS neg mode via "mode" argument * @poll: poll the PCS for link changes + * @rxc_always_on: The MAC driver requires the reference clock + * to always be on. Standalone PCS drivers which + * do not have access to a PHY device can check + * this instead of PHY_F_RXC_ALWAYS_ON. * * This structure is designed to be embedded within the PCS private data, * and will be passed between phylink and the PCS. @@ -408,6 +412,7 @@ struct phylink_pcs { struct phylink *phylink; bool neg_mode; bool poll; + bool rxc_always_on; }; /** @@ -422,6 +427,8 @@ struct phylink_pcs { * @pcs_an_restart: restart 802.3z BaseX autonegotiation. * @pcs_link_up: program the PCS for the resolved link configuration * (where necessary). + * @pcs_pre_init: configure PCS components necessary for MAC hardware + * initialization e.g. RX clock for stmmac. */ struct phylink_pcs_ops { int (*pcs_validate)(struct phylink_pcs *pcs, unsigned long *supported, @@ -441,6 +448,7 @@ struct phylink_pcs_ops { void (*pcs_an_restart)(struct phylink_pcs *pcs); void (*pcs_link_up)(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); + int (*pcs_pre_init)(struct phylink_pcs *pcs); }; #if 0 /* For kernel-doc purposes only. */ @@ -546,6 +554,34 @@ void pcs_an_restart(struct phylink_pcs *pcs); */ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, phy_interface_t interface, int speed, int duplex); + +/** + * pcs_pre_init() - Configure PCS components necessary for MAC initialization + * @pcs: a pointer to a &struct phylink_pcs. + * + * This function can be called by MAC drivers through the + * phylink_pcs_pre_init() wrapper, before their hardware is initialized. It + * should not be called after the link is brought up, as reconfiguring the PCS + * at this point could break the link. + * + * Some MAC devices require specific hardware initialization to be performed by + * their associated PCS device before they can properly initialize their own + * hardware. An example of this is the initialization of stmmac controllers, + * which requires an active REF_CLK signal to be provided by the PHY/PCS. + * + * By calling phylink_pcs_pre_init(), MAC drivers can ensure that the PCS is + * setup in a way that allows for successful hardware initialization. + * + * The specific configuration performed by pcs_pre_init() is dependent on the + * model of PCS and the requirements of the MAC device attached to it. PCS + * driver authors should consider whether their target device is to be used in + * conjunction with a MAC device whose driver calls phylink_pcs_pre_init(). MAC + * driver authors should document their requirements for the PCS + * pre-initialization. + * + */ +int pcs_pre_init(struct phylink_pcs *pcs); + #endif struct phylink *phylink_create(struct phylink_config *, @@ -565,6 +601,8 @@ void phylink_disconnect_phy(struct phylink *); void phylink_mac_change(struct phylink *, bool up); void phylink_pcs_change(struct phylink_pcs *, bool up); +int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs); + void phylink_start(struct phylink *); void phylink_stop(struct phylink *); -- cgit From ba5206881843e16b74a07c37970dcc44d22f8f6f Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 28 Mar 2024 15:33:40 -0700 Subject: spi: spi.h: add missing kernel-doc for @last_cs_index_mask kernel-doc complains about last_cs_index_mask not described, so add its description. spi.h:778: warning: Function parameter or struct member 'last_cs_index_mask' not described in 'spi_controller' Fixes: 4d8ff6b0991d ("spi: Add multi-cs memories support in SPI core") Signed-off-by: Randy Dunlap Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240328223340.17159-1-rdunlap@infradead.org Signed-off-by: Mark Brown --- include/linux/spi/spi.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b589e2547439..e8e1e798924f 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -453,6 +453,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs. * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected + * @last_cs_index_mask: bit mask the last chip selects that were used * @xfer_completion: used by core transfer_one_message() * @busy: message pump is busy * @running: message pump is running -- cgit From 4d4d2d4346857bf778fafaa97d6f76bb1663e3c9 Mon Sep 17 00:00:00 2001 From: Marco Pagani Date: Tue, 5 Mar 2024 20:29:26 +0100 Subject: fpga: manager: add owner module and take its refcount The current implementation of the fpga manager assumes that the low-level module registers a driver for the parent device and uses its owner pointer to take the module's refcount. This approach is problematic since it can lead to a null pointer dereference while attempting to get the manager if the parent device does not have a driver. To address this problem, add a module owner pointer to the fpga_manager struct and use it to take the module's refcount. Modify the functions for registering the manager to take an additional owner module parameter and rename them to avoid conflicts. Use the old function names for helper macros that automatically set the module that registers the manager as the owner. This ensures compatibility with existing low-level control modules and reduces the chances of registering a manager without setting the owner. Also, update the documentation to keep it consistent with the new interface for registering an fpga manager. Other changes: opportunistically move put_device() from __fpga_mgr_get() to fpga_mgr_get() and of_fpga_mgr_get() to improve code clarity since the manager device is taken in these functions. Fixes: 654ba4cc0f3e ("fpga manager: ensure lifetime with of_fpga_mgr_get") Suggested-by: Greg Kroah-Hartman Suggested-by: Xu Yilun Signed-off-by: Marco Pagani Acked-by: Xu Yilun Link: https://lore.kernel.org/r/20240305192926.84886-1-marpagan@redhat.com Signed-off-by: Xu Yilun --- Documentation/driver-api/fpga/fpga-mgr.rst | 34 ++++++++----- drivers/fpga/fpga-mgr.c | 82 ++++++++++++++++++------------ include/linux/fpga/fpga-mgr.h | 26 +++++++--- 3 files changed, 89 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst index 49c0a9512653..8d2b79f696c1 100644 --- a/Documentation/driver-api/fpga/fpga-mgr.rst +++ b/Documentation/driver-api/fpga/fpga-mgr.rst @@ -24,7 +24,8 @@ How to support a new FPGA device -------------------------------- To add another FPGA manager, write a driver that implements a set of ops. The -probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as:: +probe function calls ``fpga_mgr_register()`` or ``fpga_mgr_register_full()``, +such as:: static const struct fpga_manager_ops socfpga_fpga_ops = { .write_init = socfpga_fpga_ops_configure_init, @@ -69,10 +70,11 @@ probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as:: } Alternatively, the probe function could call one of the resource managed -register functions, devm_fpga_mgr_register() or devm_fpga_mgr_register_full(). -When these functions are used, the parameter syntax is the same, but the call -to fpga_mgr_unregister() should be removed. In the above example, the -socfpga_fpga_remove() function would not be required. +register functions, ``devm_fpga_mgr_register()`` or +``devm_fpga_mgr_register_full()``. When these functions are used, the +parameter syntax is the same, but the call to ``fpga_mgr_unregister()`` should be +removed. In the above example, the ``socfpga_fpga_remove()`` function would not be +required. The ops will implement whatever device specific register writes are needed to do the programming sequence for this particular FPGA. These ops return 0 for @@ -125,15 +127,19 @@ API for implementing a new FPGA Manager driver * struct fpga_manager - the FPGA manager struct * struct fpga_manager_ops - Low level FPGA manager driver ops * struct fpga_manager_info - Parameter structure for fpga_mgr_register_full() -* fpga_mgr_register_full() - Create and register an FPGA manager using the +* __fpga_mgr_register_full() - Create and register an FPGA manager using the fpga_mgr_info structure to provide the full flexibility of options -* fpga_mgr_register() - Create and register an FPGA manager using standard +* __fpga_mgr_register() - Create and register an FPGA manager using standard arguments -* devm_fpga_mgr_register_full() - Resource managed version of - fpga_mgr_register_full() -* devm_fpga_mgr_register() - Resource managed version of fpga_mgr_register() +* __devm_fpga_mgr_register_full() - Resource managed version of + __fpga_mgr_register_full() +* __devm_fpga_mgr_register() - Resource managed version of __fpga_mgr_register() * fpga_mgr_unregister() - Unregister an FPGA manager +Helper macros ``fpga_mgr_register_full()``, ``fpga_mgr_register()``, +``devm_fpga_mgr_register_full()``, and ``devm_fpga_mgr_register()`` are available +to ease the registration. + .. kernel-doc:: include/linux/fpga/fpga-mgr.h :functions: fpga_mgr_states @@ -147,16 +153,16 @@ API for implementing a new FPGA Manager driver :functions: fpga_manager_info .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: fpga_mgr_register_full + :functions: __fpga_mgr_register_full .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: fpga_mgr_register + :functions: __fpga_mgr_register .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: devm_fpga_mgr_register_full + :functions: __devm_fpga_mgr_register_full .. kernel-doc:: drivers/fpga/fpga-mgr.c - :functions: devm_fpga_mgr_register + :functions: __devm_fpga_mgr_register .. kernel-doc:: drivers/fpga/fpga-mgr.c :functions: fpga_mgr_unregister diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c index 06651389c592..0f4035b089a2 100644 --- a/drivers/fpga/fpga-mgr.c +++ b/drivers/fpga/fpga-mgr.c @@ -664,20 +664,16 @@ static struct attribute *fpga_mgr_attrs[] = { }; ATTRIBUTE_GROUPS(fpga_mgr); -static struct fpga_manager *__fpga_mgr_get(struct device *dev) +static struct fpga_manager *__fpga_mgr_get(struct device *mgr_dev) { struct fpga_manager *mgr; - mgr = to_fpga_manager(dev); + mgr = to_fpga_manager(mgr_dev); - if (!try_module_get(dev->parent->driver->owner)) - goto err_dev; + if (!try_module_get(mgr->mops_owner)) + mgr = ERR_PTR(-ENODEV); return mgr; - -err_dev: - put_device(dev); - return ERR_PTR(-ENODEV); } static int fpga_mgr_dev_match(struct device *dev, const void *data) @@ -693,12 +689,18 @@ static int fpga_mgr_dev_match(struct device *dev, const void *data) */ struct fpga_manager *fpga_mgr_get(struct device *dev) { - struct device *mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, - fpga_mgr_dev_match); + struct fpga_manager *mgr; + struct device *mgr_dev; + + mgr_dev = class_find_device(&fpga_mgr_class, NULL, dev, fpga_mgr_dev_match); if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(mgr_dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(fpga_mgr_get); @@ -711,13 +713,18 @@ EXPORT_SYMBOL_GPL(fpga_mgr_get); */ struct fpga_manager *of_fpga_mgr_get(struct device_node *node) { - struct device *dev; + struct fpga_manager *mgr; + struct device *mgr_dev; - dev = class_find_device_by_of_node(&fpga_mgr_class, node); - if (!dev) + mgr_dev = class_find_device_by_of_node(&fpga_mgr_class, node); + if (!mgr_dev) return ERR_PTR(-ENODEV); - return __fpga_mgr_get(dev); + mgr = __fpga_mgr_get(mgr_dev); + if (IS_ERR(mgr)) + put_device(mgr_dev); + + return mgr; } EXPORT_SYMBOL_GPL(of_fpga_mgr_get); @@ -727,7 +734,7 @@ EXPORT_SYMBOL_GPL(of_fpga_mgr_get); */ void fpga_mgr_put(struct fpga_manager *mgr) { - module_put(mgr->dev.parent->driver->owner); + module_put(mgr->mops_owner); put_device(&mgr->dev); } EXPORT_SYMBOL_GPL(fpga_mgr_put); @@ -766,9 +773,10 @@ void fpga_mgr_unlock(struct fpga_manager *mgr) EXPORT_SYMBOL_GPL(fpga_mgr_unlock); /** - * fpga_mgr_register_full - create and register an FPGA Manager device + * __fpga_mgr_register_full - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @info: parameters for fpga manager + * @owner: owner module containing the ops * * The caller of this function is responsible for calling fpga_mgr_unregister(). * Using devm_fpga_mgr_register_full() instead is recommended. @@ -776,7 +784,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_unlock); * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ struct fpga_manager * -fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) +__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { const struct fpga_manager_ops *mops = info->mops; struct fpga_manager *mgr; @@ -804,6 +813,8 @@ fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *in mutex_init(&mgr->ref_mutex); + mgr->mops_owner = owner; + mgr->name = info->name; mgr->mops = info->mops; mgr->priv = info->priv; @@ -841,14 +852,15 @@ error_kfree: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_mgr_register_full); +EXPORT_SYMBOL_GPL(__fpga_mgr_register_full); /** - * fpga_mgr_register - create and register an FPGA Manager device + * __fpga_mgr_register - create and register an FPGA Manager device * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data + * @owner: owner module containing the ops * * The caller of this function is responsible for calling fpga_mgr_unregister(). * Using devm_fpga_mgr_register() instead is recommended. This simple @@ -859,8 +871,8 @@ EXPORT_SYMBOL_GPL(fpga_mgr_register_full); * Return: pointer to struct fpga_manager pointer or ERR_PTR() */ struct fpga_manager * -fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv) +__fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, struct module *owner) { struct fpga_manager_info info = { 0 }; @@ -868,9 +880,9 @@ fpga_mgr_register(struct device *parent, const char *name, info.mops = mops; info.priv = priv; - return fpga_mgr_register_full(parent, &info); + return __fpga_mgr_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(fpga_mgr_register); +EXPORT_SYMBOL_GPL(__fpga_mgr_register); /** * fpga_mgr_unregister - unregister an FPGA manager @@ -900,9 +912,10 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) } /** - * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() + * __devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register() * @parent: fpga manager device from pdev * @info: parameters for fpga manager + * @owner: owner module containing the ops * * Return: fpga manager pointer on success, negative error code otherwise. * @@ -910,7 +923,8 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res) * function will be called automatically when the managing device is detached. */ struct fpga_manager * -devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info) +__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner) { struct fpga_mgr_devres *dr; struct fpga_manager *mgr; @@ -919,7 +933,7 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf if (!dr) return ERR_PTR(-ENOMEM); - mgr = fpga_mgr_register_full(parent, info); + mgr = __fpga_mgr_register_full(parent, info, owner); if (IS_ERR(mgr)) { devres_free(dr); return mgr; @@ -930,14 +944,15 @@ devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_inf return mgr; } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register_full); /** - * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() + * __devm_fpga_mgr_register - resource managed variant of fpga_mgr_register() * @parent: fpga manager device from pdev * @name: fpga manager name * @mops: pointer to structure of fpga manager ops * @priv: fpga manager private data + * @owner: owner module containing the ops * * Return: fpga manager pointer on success, negative error code otherwise. * @@ -946,8 +961,9 @@ EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full); * device is detached. */ struct fpga_manager * -devm_fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv) +__devm_fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, + struct module *owner) { struct fpga_manager_info info = { 0 }; @@ -955,9 +971,9 @@ devm_fpga_mgr_register(struct device *parent, const char *name, info.mops = mops; info.priv = priv; - return devm_fpga_mgr_register_full(parent, &info); + return __devm_fpga_mgr_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(devm_fpga_mgr_register); +EXPORT_SYMBOL_GPL(__devm_fpga_mgr_register); static void fpga_mgr_dev_release(struct device *dev) { diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index 54f63459efd6..0d4fe068f3d8 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -201,6 +201,7 @@ struct fpga_manager_ops { * @state: state of fpga manager * @compat_id: FPGA manager id for compatibility check. * @mops: pointer to struct of fpga manager ops + * @mops_owner: module containing the mops * @priv: low level driver private date */ struct fpga_manager { @@ -210,6 +211,7 @@ struct fpga_manager { enum fpga_mgr_states state; struct fpga_compat_id *compat_id; const struct fpga_manager_ops *mops; + struct module *mops_owner; void *priv; }; @@ -230,18 +232,30 @@ struct fpga_manager *fpga_mgr_get(struct device *dev); void fpga_mgr_put(struct fpga_manager *mgr); +#define fpga_mgr_register_full(parent, info) \ + __fpga_mgr_register_full(parent, info, THIS_MODULE) struct fpga_manager * -fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info); +__fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner); +#define fpga_mgr_register(parent, name, mops, priv) \ + __fpga_mgr_register(parent, name, mops, priv, THIS_MODULE) struct fpga_manager * -fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv); +__fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, struct module *owner); + void fpga_mgr_unregister(struct fpga_manager *mgr); +#define devm_fpga_mgr_register_full(parent, info) \ + __devm_fpga_mgr_register_full(parent, info, THIS_MODULE) struct fpga_manager * -devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info); +__devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info, + struct module *owner); +#define devm_fpga_mgr_register(parent, name, mops, priv) \ + __devm_fpga_mgr_register(parent, name, mops, priv, THIS_MODULE) struct fpga_manager * -devm_fpga_mgr_register(struct device *parent, const char *name, - const struct fpga_manager_ops *mops, void *priv); +__devm_fpga_mgr_register(struct device *parent, const char *name, + const struct fpga_manager_ops *mops, void *priv, + struct module *owner); #endif /*_LINUX_FPGA_MGR_H */ -- cgit From 1da11f822042eb6ef4b6064dc048f157a7852529 Mon Sep 17 00:00:00 2001 From: Marco Pagani Date: Fri, 22 Mar 2024 18:18:37 +0100 Subject: fpga: bridge: add owner module and take its refcount The current implementation of the fpga bridge assumes that the low-level module registers a driver for the parent device and uses its owner pointer to take the module's refcount. This approach is problematic since it can lead to a null pointer dereference while attempting to get the bridge if the parent device does not have a driver. To address this problem, add a module owner pointer to the fpga_bridge struct and use it to take the module's refcount. Modify the function for registering a bridge to take an additional owner module parameter and rename it to avoid conflicts. Use the old function name for a helper macro that automatically sets the module that registers the bridge as the owner. This ensures compatibility with existing low-level control modules and reduces the chances of registering a bridge without setting the owner. Also, update the documentation to keep it consistent with the new interface for registering an fpga bridge. Other changes: opportunistically move put_device() from __fpga_bridge_get() to fpga_bridge_get() and of_fpga_bridge_get() to improve code clarity since the bridge device is taken in these functions. Fixes: 21aeda950c5f ("fpga: add fpga bridge framework") Suggested-by: Greg Kroah-Hartman Suggested-by: Xu Yilun Reviewed-by: Russ Weight Signed-off-by: Marco Pagani Acked-by: Xu Yilun Link: https://lore.kernel.org/r/20240322171839.233864-1-marpagan@redhat.com Signed-off-by: Xu Yilun --- Documentation/driver-api/fpga/fpga-bridge.rst | 7 +++- drivers/fpga/fpga-bridge.c | 57 +++++++++++++++------------ include/linux/fpga/fpga-bridge.h | 10 +++-- 3 files changed, 43 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst index 604208534095..833f68fb0700 100644 --- a/Documentation/driver-api/fpga/fpga-bridge.rst +++ b/Documentation/driver-api/fpga/fpga-bridge.rst @@ -6,9 +6,12 @@ API to implement a new FPGA bridge * struct fpga_bridge - The FPGA Bridge structure * struct fpga_bridge_ops - Low level Bridge driver ops -* fpga_bridge_register() - Create and register a bridge +* __fpga_bridge_register() - Create and register a bridge * fpga_bridge_unregister() - Unregister a bridge +The helper macro ``fpga_bridge_register()`` automatically sets +the module that registers the FPGA bridge as the owner. + .. kernel-doc:: include/linux/fpga/fpga-bridge.h :functions: fpga_bridge @@ -16,7 +19,7 @@ API to implement a new FPGA bridge :functions: fpga_bridge_ops .. kernel-doc:: drivers/fpga/fpga-bridge.c - :functions: fpga_bridge_register + :functions: __fpga_bridge_register .. kernel-doc:: drivers/fpga/fpga-bridge.c :functions: fpga_bridge_unregister diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 79c473b3c7c3..8ef395b49bf8 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -55,33 +55,26 @@ int fpga_bridge_disable(struct fpga_bridge *bridge) } EXPORT_SYMBOL_GPL(fpga_bridge_disable); -static struct fpga_bridge *__fpga_bridge_get(struct device *dev, +static struct fpga_bridge *__fpga_bridge_get(struct device *bridge_dev, struct fpga_image_info *info) { struct fpga_bridge *bridge; - int ret = -ENODEV; - bridge = to_fpga_bridge(dev); + bridge = to_fpga_bridge(bridge_dev); bridge->info = info; - if (!mutex_trylock(&bridge->mutex)) { - ret = -EBUSY; - goto err_dev; - } + if (!mutex_trylock(&bridge->mutex)) + return ERR_PTR(-EBUSY); - if (!try_module_get(dev->parent->driver->owner)) - goto err_ll_mod; + if (!try_module_get(bridge->br_ops_owner)) { + mutex_unlock(&bridge->mutex); + return ERR_PTR(-ENODEV); + } dev_dbg(&bridge->dev, "get\n"); return bridge; - -err_ll_mod: - mutex_unlock(&bridge->mutex); -err_dev: - put_device(dev); - return ERR_PTR(ret); } /** @@ -98,13 +91,18 @@ err_dev: struct fpga_bridge *of_fpga_bridge_get(struct device_node *np, struct fpga_image_info *info) { - struct device *dev; + struct fpga_bridge *bridge; + struct device *bridge_dev; - dev = class_find_device_by_of_node(&fpga_bridge_class, np); - if (!dev) + bridge_dev = class_find_device_by_of_node(&fpga_bridge_class, np); + if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(of_fpga_bridge_get); @@ -125,6 +123,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data) struct fpga_bridge *fpga_bridge_get(struct device *dev, struct fpga_image_info *info) { + struct fpga_bridge *bridge; struct device *bridge_dev; bridge_dev = class_find_device(&fpga_bridge_class, NULL, dev, @@ -132,7 +131,11 @@ struct fpga_bridge *fpga_bridge_get(struct device *dev, if (!bridge_dev) return ERR_PTR(-ENODEV); - return __fpga_bridge_get(bridge_dev, info); + bridge = __fpga_bridge_get(bridge_dev, info); + if (IS_ERR(bridge)) + put_device(bridge_dev); + + return bridge; } EXPORT_SYMBOL_GPL(fpga_bridge_get); @@ -146,7 +149,7 @@ void fpga_bridge_put(struct fpga_bridge *bridge) dev_dbg(&bridge->dev, "put\n"); bridge->info = NULL; - module_put(bridge->dev.parent->driver->owner); + module_put(bridge->br_ops_owner); mutex_unlock(&bridge->mutex); put_device(&bridge->dev); } @@ -316,18 +319,19 @@ static struct attribute *fpga_bridge_attrs[] = { ATTRIBUTE_GROUPS(fpga_bridge); /** - * fpga_bridge_register - create and register an FPGA Bridge device + * __fpga_bridge_register - create and register an FPGA Bridge device * @parent: FPGA bridge device from pdev * @name: FPGA bridge name * @br_ops: pointer to structure of fpga bridge ops * @priv: FPGA bridge private data + * @owner: owner module containing the br_ops * * Return: struct fpga_bridge pointer or ERR_PTR() */ struct fpga_bridge * -fpga_bridge_register(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, - void *priv) +__fpga_bridge_register(struct device *parent, const char *name, + const struct fpga_bridge_ops *br_ops, + void *priv, struct module *owner) { struct fpga_bridge *bridge; int id, ret; @@ -357,6 +361,7 @@ fpga_bridge_register(struct device *parent, const char *name, bridge->name = name; bridge->br_ops = br_ops; + bridge->br_ops_owner = owner; bridge->priv = priv; bridge->dev.groups = br_ops->groups; @@ -386,7 +391,7 @@ error_kfree: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_bridge_register); +EXPORT_SYMBOL_GPL(__fpga_bridge_register); /** * fpga_bridge_unregister - unregister an FPGA bridge diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index 223da48a6d18..94c4edd047e5 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h @@ -45,6 +45,7 @@ struct fpga_bridge_info { * @dev: FPGA bridge device * @mutex: enforces exclusive reference to bridge * @br_ops: pointer to struct of FPGA bridge ops + * @br_ops_owner: module containing the br_ops * @info: fpga image specific information * @node: FPGA bridge list node * @priv: low level driver private date @@ -54,6 +55,7 @@ struct fpga_bridge { struct device dev; struct mutex mutex; /* for exclusive reference to bridge */ const struct fpga_bridge_ops *br_ops; + struct module *br_ops_owner; struct fpga_image_info *info; struct list_head node; void *priv; @@ -79,10 +81,12 @@ int of_fpga_bridge_get_to_list(struct device_node *np, struct fpga_image_info *info, struct list_head *bridge_list); +#define fpga_bridge_register(parent, name, br_ops, priv) \ + __fpga_bridge_register(parent, name, br_ops, priv, THIS_MODULE) struct fpga_bridge * -fpga_bridge_register(struct device *parent, const char *name, - const struct fpga_bridge_ops *br_ops, - void *priv); +__fpga_bridge_register(struct device *parent, const char *name, + const struct fpga_bridge_ops *br_ops, void *priv, + struct module *owner); void fpga_bridge_unregister(struct fpga_bridge *br); #endif /* _LINUX_FPGA_BRIDGE_H */ -- cgit From 63c15822b8dd02a2423cfd92232245ace3f7a11b Mon Sep 17 00:00:00 2001 From: Syed Nayyar Waris Date: Wed, 27 Mar 2024 16:23:38 +0100 Subject: lib/bitmap: add bitmap_{read,write}() The two new functions allow reading/writing values of length up to BITS_PER_LONG bits at arbitrary position in the bitmap. The code was taken from "bitops: Introduce the for_each_set_clump macro" by Syed Nayyar Waris with a number of changes and simplifications: - instead of using roundup(), which adds an unnecessary dependency on , we calculate space as BITS_PER_LONG-offset; - indentation is reduced by not using else-clauses (suggested by checkpatch for bitmap_get_value()); - bitmap_get_value()/bitmap_set_value() are renamed to bitmap_read() and bitmap_write(); - some redundant computations are omitted. Cc: Arnd Bergmann Signed-off-by: Syed Nayyar Waris Signed-off-by: William Breathitt Gray Link: https://lore.kernel.org/lkml/fe12eedf3666f4af5138de0e70b67a07c7f40338.1592224129.git.syednwaris@gmail.com/ Suggested-by: Yury Norov Co-developed-by: Alexander Potapenko Signed-off-by: Alexander Potapenko Reviewed-by: Andy Shevchenko Acked-by: Yury Norov Signed-off-by: Yury Norov Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- include/linux/bitmap.h | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index aa4096126553..914c23e96f26 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -83,6 +83,10 @@ struct device; * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst * bitmap_get_value8(map, start) Get 8bit value from map at start * bitmap_set_value8(map, value, start) Set 8bit value to map at start + * bitmap_read(map, start, nbits) Read an nbits-sized value from + * map at start + * bitmap_write(map, value, start, nbits) Write an nbits-sized value to + * map at start * * Note, bitmap_zero() and bitmap_fill() operate over the region of * unsigned longs, that is, bits behind bitmap till the unsigned long @@ -754,6 +758,79 @@ static inline void bitmap_set_value8(unsigned long *map, unsigned long value, map[index] |= value << offset; } +/** + * bitmap_read - read a value of n-bits from the memory region + * @map: address to the bitmap memory region + * @start: bit offset of the n-bit value + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG + * + * Returns: value of @nbits bits located at the @start bit offset within the + * @map memory region. For @nbits = 0 and @nbits > BITS_PER_LONG the return + * value is undefined. + */ +static inline unsigned long bitmap_read(const unsigned long *map, + unsigned long start, + unsigned long nbits) +{ + size_t index = BIT_WORD(start); + unsigned long offset = start % BITS_PER_LONG; + unsigned long space = BITS_PER_LONG - offset; + unsigned long value_low, value_high; + + if (unlikely(!nbits || nbits > BITS_PER_LONG)) + return 0; + + if (space >= nbits) + return (map[index] >> offset) & BITMAP_LAST_WORD_MASK(nbits); + + value_low = map[index] & BITMAP_FIRST_WORD_MASK(start); + value_high = map[index + 1] & BITMAP_LAST_WORD_MASK(start + nbits); + return (value_low >> offset) | (value_high << space); +} + +/** + * bitmap_write - write n-bit value within a memory region + * @map: address to the bitmap memory region + * @value: value to write, clamped to nbits + * @start: bit offset of the n-bit value + * @nbits: size of value in bits, nonzero, up to BITS_PER_LONG. + * + * bitmap_write() behaves as-if implemented as @nbits calls of __assign_bit(), + * i.e. bits beyond @nbits are ignored: + * + * for (bit = 0; bit < nbits; bit++) + * __assign_bit(start + bit, bitmap, val & BIT(bit)); + * + * For @nbits == 0 and @nbits > BITS_PER_LONG no writes are performed. + */ +static inline void bitmap_write(unsigned long *map, unsigned long value, + unsigned long start, unsigned long nbits) +{ + size_t index; + unsigned long offset; + unsigned long space; + unsigned long mask; + bool fit; + + if (unlikely(!nbits || nbits > BITS_PER_LONG)) + return; + + mask = BITMAP_LAST_WORD_MASK(nbits); + value &= mask; + offset = start % BITS_PER_LONG; + space = BITS_PER_LONG - offset; + fit = space >= nbits; + index = BIT_WORD(start); + + map[index] &= (fit ? (~(mask << offset)) : ~BITMAP_FIRST_WORD_MASK(start)); + map[index] |= value << offset; + if (fit) + return; + + map[index + 1] &= BITMAP_FIRST_WORD_MASK(start + nbits); + map[index + 1] |= (value >> space); +} + #endif /* __ASSEMBLY__ */ #endif /* __LINUX_BITMAP_H */ -- cgit From 72cc1980a0ef3ccad0d539e7dace63d0d7d432a4 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 16:23:41 +0100 Subject: bitops: add missing prototype check Commit 8238b4579866 ("wait_on_bit: add an acquire memory barrier") added a new bitop, test_bit_acquire(), with proper wrapping in order to try to optimize it at compile-time, but missed the list of bitops used for checking their prototypes a bit below. The functions added have consistent prototypes, so that no more changes are required and no functional changes take place. Fixes: 8238b4579866 ("wait_on_bit: add an acquire memory barrier") Reviewed-by: Przemek Kitszel Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- include/linux/bitops.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 2ba557e067fe..f7f5a783da2a 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -80,6 +80,7 @@ __check_bitop_pr(__test_and_set_bit); __check_bitop_pr(__test_and_clear_bit); __check_bitop_pr(__test_and_change_bit); __check_bitop_pr(test_bit); +__check_bitop_pr(test_bit_acquire); #undef __check_bitop_pr -- cgit From 7d8296b250f2eed73f1758607926d4d258dea5d4 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 16:23:42 +0100 Subject: bitops: make BYTES_TO_BITS() treewide-available Avoid open-coding that simple expression each time by moving BYTES_TO_BITS() from the probes code to to export it to the rest of the kernel. Simplify the macro while at it. `BITS_PER_LONG / sizeof(long)` always equals to %BITS_PER_BYTE, regardless of the target architecture. Do the same for the tools ecosystem as well (incl. its version of bitops.h). The previous implementation had its implicit type of long, while the new one is int, so adjust the format literal accordingly in the perf code. Suggested-by: Andy Shevchenko Reviewed-by: Przemek Kitszel Acked-by: Yury Norov Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- include/linux/bitops.h | 2 ++ kernel/trace/trace_probe.c | 2 -- tools/include/linux/bitops.h | 2 ++ tools/perf/util/probe-finder.c | 4 +--- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bitops.h b/include/linux/bitops.h index f7f5a783da2a..e0cd09eb91cd 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -21,6 +21,8 @@ #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) +#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE) + extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w); diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index dfe3ee6035ec..87337a0c8e03 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -1180,8 +1180,6 @@ parse_probe_arg(char *arg, const struct fetch_type *type, return ret; } -#define BYTES_TO_BITS(nb) ((BITS_PER_LONG * (nb)) / sizeof(long)) - /* Bitfield type needs to be parsed into a fetch function */ static int __parse_bitfield_probe_arg(const char *bf, const struct fetch_type *t, diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index 7319f6ced108..272f15d0e434 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -20,6 +20,8 @@ #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) +#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_BYTE) + extern unsigned int __sw_hweight8(unsigned int w); extern unsigned int __sw_hweight16(unsigned int w); extern unsigned int __sw_hweight32(unsigned int w); diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index c8923375e30d..630e16c54ed5 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -186,8 +186,6 @@ static_var: return ret2; } -#define BYTES_TO_BITS(nb) ((nb) * BITS_PER_LONG / sizeof(long)) - static int convert_variable_type(Dwarf_Die *vr_die, struct probe_trace_arg *tvar, const char *cast, bool user_access) @@ -217,7 +215,7 @@ static int convert_variable_type(Dwarf_Die *vr_die, total = dwarf_bytesize(vr_die); if (boffs < 0 || total < 0) return -ENOENT; - ret = snprintf(buf, 16, "b%d@%d/%zd", bsize, boffs, + ret = snprintf(buf, 16, "b%d@%d/%d", bsize, boffs, BYTES_TO_BITS(total)); goto formatted; } -- cgit From 5259401ef8f4b010bc0f9740868e9147ccc45899 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 16:23:43 +0100 Subject: bitops: let the compiler optimize {__,}assign_bit() Since commit b03fc1173c0c ("bitops: let optimize out non-atomic bitops on compile-time constants"), the compilers are able to expand inline bitmap operations to compile-time initializers when possible. However, during the round of replacement if-__set-else-__clear with __assign_bit() as per Andy's advice, bloat-o-meter showed +1024 bytes difference in object code size for one module (even one function), where the pattern: DECLARE_BITMAP(foo) = { }; // on the stack, zeroed if (a) __set_bit(const_bit_num, foo); if (b) __set_bit(another_const_bit_num, foo); ... is heavily used, although there should be no difference: the bitmap is zeroed, so the second half of __assign_bit() should be compiled-out as a no-op. I either missed the fact that __assign_bit() has bitmap pointer marked as `volatile` (as we usually do for bitops) or was hoping that the compilers would at least try to look past the `volatile` for __always_inline functions. Anyhow, due to that attribute, the compilers were always compiling the whole expression and no mentioned compile-time optimizations were working. Convert __assign_bit() to a macro since it's a very simple if-else and all of the checks are performed inside __set_bit() and __clear_bit(), thus that wrapper has to be as transparent as possible. After that change, despite it showing only -20 bytes change for vmlinux (due to that it's still relatively unpopular), no drastic code size changes happen when replacing if-set-else-clear for onstack bitmaps with __assign_bit(), meaning the compiler now expands them to the actual operations will all the expected optimizations. Atomic assign_bit() is less affected due to its nature, but let's convert it to a macro as well to keep the code consistent and not leave a place for possible suboptimal codegen. Moreover, with certain kernel configuration it actually gives some saves (x86): do_ip_setsockopt 4154 4099 -55 Suggested-by: Yury Norov # assign_bit(), too Cc: Andy Shevchenko Reviewed-by: Przemek Kitszel Acked-by: Yury Norov Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- include/linux/bitops.h | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bitops.h b/include/linux/bitops.h index e0cd09eb91cd..b25dc8742124 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -275,23 +275,11 @@ static inline unsigned long fns(unsigned long word, unsigned int n) * @addr: the address to start counting from * @value: the value to assign */ -static __always_inline void assign_bit(long nr, volatile unsigned long *addr, - bool value) -{ - if (value) - set_bit(nr, addr); - else - clear_bit(nr, addr); -} +#define assign_bit(nr, addr, value) \ + ((value) ? set_bit((nr), (addr)) : clear_bit((nr), (addr))) -static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, - bool value) -{ - if (value) - __set_bit(nr, addr); - else - __clear_bit(nr, addr); -} +#define __assign_bit(nr, addr, value) \ + ((value) ? __set_bit((nr), (addr)) : __clear_bit((nr), (addr))) /** * __ptr_set_bit - Set bit in a pointer's value -- cgit From 8fab6a9d72e4fa0b0607b3a8011a909cabae79f7 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 16:23:44 +0100 Subject: linkmode: convert linkmode_{test,set,clear,mod}_bit() to macros Since commit b03fc1173c0c ("bitops: let optimize out non-atomic bitops on compile-time constants"), the non-atomic bitops are macros which can be expanded by the compilers into compile-time expressions, which will result in better optimized object code. Unfortunately, turned out that passing `volatile` to those macros discards any possibility of optimization, as the compilers then don't even try to look whether the passed bitmap is known at compilation time. In addition to that, the mentioned linkmode helpers are marked with `inline`, not `__always_inline`, meaning that it's not guaranteed some compiler won't uninline them for no reason, which will also effectively prevent them from being optimized (it's a well-known thing the compilers sometimes uninline `2 + 2`). Convert linkmode_*_bit() from inlines to macros. Their calling convention are 1:1 with the corresponding bitops, so that it's not even needed to enumerate and map the arguments, only the names. No changes in vmlinux' object code (compiled by LLVM for x86_64) whatsoever, but that doesn't necessarily means the change is meaningless. Reviewed-by: Przemek Kitszel Acked-by: Jakub Kicinski Acked-by: Yury Norov Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- include/linux/linkmode.h | 27 ++++----------------------- 1 file changed, 4 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index 287f590ed56b..d94bfd9ac8cc 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -43,29 +43,10 @@ static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1, return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static inline void linkmode_set_bit(int nr, volatile unsigned long *addr) -{ - __set_bit(nr, addr); -} - -static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr) -{ - __clear_bit(nr, addr); -} - -static inline void linkmode_mod_bit(int nr, volatile unsigned long *addr, - int set) -{ - if (set) - linkmode_set_bit(nr, addr); - else - linkmode_clear_bit(nr, addr); -} - -static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr) -{ - return test_bit(nr, addr); -} +#define linkmode_test_bit test_bit +#define linkmode_set_bit __set_bit +#define linkmode_clear_bit __clear_bit +#define linkmode_mod_bit __assign_bit static inline void linkmode_set_bit_array(const int *array, int array_size, unsigned long *addr) -- cgit From a37fbe666c016fd89e4460d0ebfcea05baba46dc Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 16:23:49 +0100 Subject: bitmap: introduce generic optimized bitmap_size() The number of times yet another open coded `BITS_TO_LONGS(nbits) * sizeof(long)` can be spotted is huge. Some generic helper is long overdue. Add one, bitmap_size(), but with one detail. BITS_TO_LONGS() uses DIV_ROUND_UP(). The latter works well when both divident and divisor are compile-time constants or when the divisor is not a pow-of-2. When it is however, the compilers sometimes tend to generate suboptimal code (GCC 13): 48 83 c0 3f add $0x3f,%rax 48 c1 e8 06 shr $0x6,%rax 48 8d 14 c5 00 00 00 00 lea 0x0(,%rax,8),%rdx %BITS_PER_LONG is always a pow-2 (either 32 or 64), but GCC still does full division of `nbits + 63` by it and then multiplication by 8. Instead of BITS_TO_LONGS(), use ALIGN() and then divide by 8. GCC: 8d 50 3f lea 0x3f(%rax),%edx c1 ea 03 shr $0x3,%edx 81 e2 f8 ff ff 1f and $0x1ffffff8,%edx Now it shifts `nbits + 63` by 3 positions (IOW performs fast division by 8) and then masks bits[2:0]. bloat-o-meter: add/remove: 0/0 grow/shrink: 20/133 up/down: 156/-773 (-617) Clang does it better and generates the same code before/after starting from -O1, except that with the ALIGN() approach it uses %edx and thus still saves some bytes: add/remove: 0/0 grow/shrink: 9/133 up/down: 18/-538 (-520) Note that we can't expand DIV_ROUND_UP() by adding a check and using this approach there, as it's used in array declarations where expressions are not allowed. Add this helper to tools/ as well. Reviewed-by: Przemek Kitszel Acked-by: Yury Norov Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- drivers/md/dm-clone-metadata.c | 5 ----- drivers/s390/cio/idset.c | 2 +- include/linux/bitmap.h | 8 +++++--- include/linux/cpumask.h | 2 +- lib/math/prime_numbers.c | 2 -- tools/include/linux/bitmap.h | 7 ++++--- 6 files changed, 11 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/dm-clone-metadata.c b/drivers/md/dm-clone-metadata.c index c43d55672bce..47c1fa7aad8b 100644 --- a/drivers/md/dm-clone-metadata.c +++ b/drivers/md/dm-clone-metadata.c @@ -465,11 +465,6 @@ static void __destroy_persistent_data_structures(struct dm_clone_metadata *cmd) /*---------------------------------------------------------------------------*/ -static size_t bitmap_size(unsigned long nr_bits) -{ - return BITS_TO_LONGS(nr_bits) * sizeof(long); -} - static int __dirty_map_init(struct dirty_map *dmap, unsigned long nr_words, unsigned long nr_regions) { diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 0a1105a483bf..e5f28370a903 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c @@ -18,7 +18,7 @@ struct idset { static inline unsigned long idset_bitmap_size(int num_ssid, int num_id) { - return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); + return bitmap_size(size_mul(num_ssid, num_id)); } static struct idset *idset_new(int num_ssid, int num_id) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 914c23e96f26..363e0b184a45 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -226,9 +226,11 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = 0; @@ -238,7 +240,7 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = ~0UL; @@ -249,7 +251,7 @@ static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, unsigned int nbits) { - unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + unsigned int len = bitmap_size(nbits); if (small_const_nbits(nbits)) *dst = *src; diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1c29947db848..6519f9c77709 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -853,7 +853,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) */ static inline unsigned int cpumask_size(void) { - return BITS_TO_LONGS(large_cpumask_bits) * sizeof(long); + return bitmap_size(large_cpumask_bits); } /* diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c index d42cebf7407f..d3b64b10da1c 100644 --- a/lib/math/prime_numbers.c +++ b/lib/math/prime_numbers.c @@ -6,8 +6,6 @@ #include #include -#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) - struct primes { struct rcu_head rcu; unsigned long last, sz; diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h index 8c6852dba04f..210c13b1b857 100644 --- a/tools/include/linux/bitmap.h +++ b/tools/include/linux/bitmap.h @@ -26,13 +26,14 @@ bool __bitmap_intersects(const unsigned long *bitmap1, #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) +#define bitmap_size(nbits) (ALIGN(nbits, BITS_PER_LONG) / BITS_PER_BYTE) + static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) { if (small_const_nbits(nbits)) *dst = 0UL; else { - int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); - memset(dst, 0, len); + memset(dst, 0, bitmap_size(nbits)); } } @@ -84,7 +85,7 @@ static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, */ static inline unsigned long *bitmap_zalloc(int nbits) { - return calloc(1, BITS_TO_LONGS(nbits) * sizeof(unsigned long)); + return calloc(1, bitmap_size(nbits)); } /* -- cgit From b44759705f7dc95d539d145b4c2edcaf079e7c33 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 16:23:50 +0100 Subject: bitmap: make bitmap_{get,set}_value8() use bitmap_{read,write}() Now that we have generic bitmap_read() and bitmap_write(), which are inline and try to take care of non-bound-crossing and aligned cases to keep them optimized, collapse bitmap_{get,set}_value8() into simple wrappers around the former ones. bloat-o-meter shows no difference in vmlinux and -2 bytes for gpio-pca953x.ko, which says the optimization didn't suffer due to that change. The converted helpers have the value width embedded and always compile-time constant and that helps a lot. Suggested-by: Yury Norov Signed-off-by: Yury Norov Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- include/linux/bitmap.h | 38 +++++--------------------------------- 1 file changed, 5 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 363e0b184a45..8c4768c44a01 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -727,39 +727,6 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask) bitmap_from_arr64(dst, &mask, 64); } -/** - * bitmap_get_value8 - get an 8-bit value within a memory region - * @map: address to the bitmap memory region - * @start: bit offset of the 8-bit value; must be a multiple of 8 - * - * Returns the 8-bit value located at the @start bit offset within the @src - * memory region. - */ -static inline unsigned long bitmap_get_value8(const unsigned long *map, - unsigned long start) -{ - const size_t index = BIT_WORD(start); - const unsigned long offset = start % BITS_PER_LONG; - - return (map[index] >> offset) & 0xFF; -} - -/** - * bitmap_set_value8 - set an 8-bit value within a memory region - * @map: address to the bitmap memory region - * @value: the 8-bit value; values wider than 8 bits may clobber bitmap - * @start: bit offset of the 8-bit value; must be a multiple of 8 - */ -static inline void bitmap_set_value8(unsigned long *map, unsigned long value, - unsigned long start) -{ - const size_t index = BIT_WORD(start); - const unsigned long offset = start % BITS_PER_LONG; - - map[index] &= ~(0xFFUL << offset); - map[index] |= value << offset; -} - /** * bitmap_read - read a value of n-bits from the memory region * @map: address to the bitmap memory region @@ -833,6 +800,11 @@ static inline void bitmap_write(unsigned long *map, unsigned long value, map[index + 1] |= (value >> space); } +#define bitmap_get_value8(map, start) \ + bitmap_read(map, start, BITS_PER_BYTE) +#define bitmap_set_value8(map, value, start) \ + bitmap_write(map, value, start, BITS_PER_BYTE) + #endif /* __ASSEMBLY__ */ #endif /* __LINUX_BITMAP_H */ -- cgit From 117aef12a7b1b797bce9f66b156c65eab850b5b5 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Wed, 27 Mar 2024 16:23:52 +0100 Subject: ip_tunnel: use a separate struct to store tunnel params in the kernel Unlike IPv6 tunnels which use purely-kernel __ip6_tnl_parm structure to store params inside the kernel, IPv4 tunnel code uses the same ip_tunnel_parm which is being used to talk with the userspace. This makes it difficult to alter or add any fields or use a different format for whatever data. Define struct ip_tunnel_parm_kern, a 1:1 copy of ip_tunnel_parm for now, and use it throughout the code. Define the pieces, where the copy user <-> kernel happens, as standalone functions, and copy the data there field-by-field, so that the kernel-side structure could be easily modified later on and the users wouldn't have to care about this. Reviewed-by: Simon Horman Signed-off-by: Alexander Lobakin Signed-off-by: David S. Miller --- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.c | 30 ++++++----- .../net/ethernet/mellanox/mlxsw/spectrum_ipip.h | 2 +- .../net/ethernet/mellanox/mlxsw/spectrum_span.c | 4 +- include/linux/netdevice.h | 7 +-- include/net/ip_tunnels.h | 25 +++++++-- net/ipv4/ip_gre.c | 17 +++--- net/ipv4/ip_tunnel.c | 63 +++++++++++++++++----- net/ipv4/ip_tunnel_core.c | 2 +- net/ipv4/ip_vti.c | 12 ++--- net/ipv4/ipip.c | 12 ++--- net/ipv4/ipmr.c | 2 +- net/ipv6/addrconf.c | 3 +- net/ipv6/sit.c | 33 ++++++------ 13 files changed, 137 insertions(+), 75 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 3340b4a694c3..d67df358a52f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -8,7 +8,7 @@ #include "spectrum_ipip.h" #include "reg.h" -struct ip_tunnel_parm +struct ip_tunnel_parm_kern mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev) { struct ip_tunnel *tun = netdev_priv(ol_dev); @@ -24,7 +24,8 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) return tun->parms; } -static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms) +static bool +mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms) { return !!(parms->i_flags & TUNNEL_KEY); } @@ -34,7 +35,8 @@ static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms) return !!(parms->i_flags & TUNNEL_KEY); } -static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms) +static bool +mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms) { return !!(parms->o_flags & TUNNEL_KEY); } @@ -44,7 +46,7 @@ static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms) return !!(parms->o_flags & TUNNEL_KEY); } -static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms) +static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms) { return mlxsw_sp_ipip_parms4_has_ikey(parms) ? be32_to_cpu(parms->i_key) : 0; @@ -56,7 +58,7 @@ static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms) be32_to_cpu(parms->i_key) : 0; } -static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms) +static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms) { return mlxsw_sp_ipip_parms4_has_okey(parms) ? be32_to_cpu(parms->o_key) : 0; @@ -69,7 +71,7 @@ static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms) } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms) +mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms) { return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr }; } @@ -81,7 +83,7 @@ mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms) } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms) +mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms) { return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr }; } @@ -96,7 +98,7 @@ union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4; + struct ip_tunnel_parm_kern parms4; struct __ip6_tnl_parm parms6; switch (proto) { @@ -115,7 +117,9 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms4; + + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4; } @@ -124,7 +128,7 @@ static union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4; + struct ip_tunnel_parm_kern parms4; struct __ip6_tnl_parm parms6; switch (proto) { @@ -150,7 +154,7 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) static struct mlxsw_sp_ipip_parms mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev) { - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); return (struct mlxsw_sp_ipip_parms) { .proto = MLXSW_SP_L3_PROTO_IPV4, @@ -187,8 +191,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp, { u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); + struct ip_tunnel_parm_kern parms; char rtdp_pl[MLXSW_REG_RTDP_LEN]; - struct ip_tunnel_parm parms; unsigned int type_check; bool has_ikey; u32 daddr4; @@ -252,7 +256,7 @@ static struct mlxsw_sp_rif_ipip_lb_config mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ? diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index a35f009da561..a66173779641 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -9,7 +9,7 @@ #include #include -struct ip_tunnel_parm +struct ip_tunnel_parm_kern mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); struct __ip6_tnl_parm mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index af50ff9e5f26..f0cbc6b9b041 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -413,8 +413,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, __be32 *saddrp, __be32 *daddrp) { struct ip_tunnel *tun = netdev_priv(to_dev); + struct ip_tunnel_parm_kern parms; struct net_device *dev = NULL; - struct ip_tunnel_parm parms; struct rtable *rt = NULL; struct flowi4 fl4; @@ -451,7 +451,7 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { - struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); + struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; bool inherit_tos = tparm.iph.tos & 0x1; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index e41d30ebaca6..55c7cf9404a4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -59,7 +59,7 @@ struct ethtool_ops; struct kernel_hwtstamp_config; struct phy_device; struct dsa_port; -struct ip_tunnel_parm; +struct ip_tunnel_parm_kern; struct macsec_context; struct macsec_ops; struct netdev_name_node; @@ -1327,7 +1327,7 @@ struct netdev_net_notifier { * queue id bound to an AF_XDP socket. The flags field specifies if * only RX, only Tx, or both should be woken up using the flags * XDP_WAKEUP_RX and XDP_WAKEUP_TX. - * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, + * int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm_kern *p, * int cmd); * Add, change, delete or get information on an IPv4 tunnel. * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev); @@ -1583,7 +1583,8 @@ struct net_device_ops { int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); int (*ndo_tunnel_ctl)(struct net_device *dev, - struct ip_tunnel_parm *p, int cmd); + struct ip_tunnel_parm_kern *p, + int cmd); struct net_device * (*ndo_get_peer_dev)(struct net_device *dev); int (*ndo_fill_forward_path)(struct net_device_path_ctx *ctx, struct net_device_path *path); diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 5cd64bb2104d..20f0319ab149 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -110,6 +110,17 @@ struct ip_tunnel_prl_entry { struct metadata_dst; +/* Kernel-side copy of ip_tunnel_parm */ +struct ip_tunnel_parm_kern { + char name[IFNAMSIZ]; + int link; + __be16 i_flags; + __be16 o_flags; + __be32 i_key; + __be32 o_key; + struct iphdr iph; +}; + struct ip_tunnel { struct ip_tunnel __rcu *next; struct hlist_node hash_node; @@ -136,7 +147,7 @@ struct ip_tunnel { struct dst_cache dst_cache; - struct ip_tunnel_parm parms; + struct ip_tunnel_parm_kern parms; int mlink; int encap_hlen; /* Encap header length (FOU,GUE) */ @@ -291,7 +302,11 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const struct iphdr *tnl_params, const u8 protocol); void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, const u8 proto, int tunnel_hlen); -int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); +int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, + int cmd); +bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, + const void __user *data); +bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp); int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd); int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict); @@ -307,16 +322,16 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst, bool log_ecn_error); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark); + struct ip_tunnel_parm_kern *p, __u32 fwmark); int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark); + struct ip_tunnel_parm_kern *p, __u32 fwmark); void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], struct ip_tunnel_encap *encap); void ip_tunnel_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms); + struct ip_tunnel_parm_kern *parms); extern const struct header_ops ip_tunnel_header_ops; __be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 7b16c211b904..8853c065a985 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -785,7 +785,8 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu) } } -static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, +static int ipgre_tunnel_ctl(struct net_device *dev, + struct ip_tunnel_parm_kern *p, int cmd) { int err; @@ -1131,7 +1132,7 @@ static int erspan_validate(struct nlattr *tb[], struct nlattr *data[], static int ipgre_netlink_parms(struct net_device *dev, struct nlattr *data[], struct nlattr *tb[], - struct ip_tunnel_parm *parms, + struct ip_tunnel_parm_kern *parms, __u32 *fwmark) { struct ip_tunnel *t = netdev_priv(dev); @@ -1198,7 +1199,7 @@ static int ipgre_netlink_parms(struct net_device *dev, static int erspan_netlink_parms(struct net_device *dev, struct nlattr *data[], struct nlattr *tb[], - struct ip_tunnel_parm *parms, + struct ip_tunnel_parm_kern *parms, __u32 *fwmark) { struct ip_tunnel *t = netdev_priv(dev); @@ -1357,7 +1358,7 @@ static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - struct ip_tunnel_parm p; + struct ip_tunnel_parm_kern p; __u32 fwmark = 0; int err; @@ -1375,7 +1376,7 @@ static int erspan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - struct ip_tunnel_parm p; + struct ip_tunnel_parm_kern p; __u32 fwmark = 0; int err; @@ -1394,8 +1395,8 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); + struct ip_tunnel_parm_kern p; __u32 fwmark = t->fwmark; - struct ip_tunnel_parm p; int err; err = ipgre_newlink_encap_setup(dev, data); @@ -1423,8 +1424,8 @@ static int erspan_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); + struct ip_tunnel_parm_kern p; __u32 fwmark = t->fwmark; - struct ip_tunnel_parm p; int err; err = ipgre_newlink_encap_setup(dev, data); @@ -1496,7 +1497,7 @@ static size_t ipgre_get_size(const struct net_device *dev) static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); - struct ip_tunnel_parm *p = &t->parms; + struct ip_tunnel_parm_kern *p = &t->parms; __be16 o_flags = p->o_flags; if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 1b8d8ff9a237..d10f41f81463 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -56,7 +56,7 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote) IP_TNL_HASH_BITS); } -static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, +static bool ip_tunnel_key_match(const struct ip_tunnel_parm_kern *p, __be16 flags, __be32 key) { if (p->i_flags & TUNNEL_KEY) { @@ -171,7 +171,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn, EXPORT_SYMBOL_GPL(ip_tunnel_lookup); static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn, - struct ip_tunnel_parm *parms) + struct ip_tunnel_parm_kern *parms) { unsigned int h; __be32 remote; @@ -206,7 +206,7 @@ static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t) } static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn, - struct ip_tunnel_parm *parms, + struct ip_tunnel_parm_kern *parms, int type) { __be32 remote = parms->iph.daddr; @@ -230,7 +230,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn, static struct net_device *__ip_tunnel_create(struct net *net, const struct rtnl_link_ops *ops, - struct ip_tunnel_parm *parms) + struct ip_tunnel_parm_kern *parms) { int err; struct ip_tunnel *tunnel; @@ -326,7 +326,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev) static struct ip_tunnel *ip_tunnel_create(struct net *net, struct ip_tunnel_net *itn, - struct ip_tunnel_parm *parms) + struct ip_tunnel_parm_kern *parms) { struct ip_tunnel *nt; struct net_device *dev; @@ -871,7 +871,7 @@ EXPORT_SYMBOL_GPL(ip_tunnel_xmit); static void ip_tunnel_update(struct ip_tunnel_net *itn, struct ip_tunnel *t, struct net_device *dev, - struct ip_tunnel_parm *p, + struct ip_tunnel_parm_kern *p, bool set_mtu, __u32 fwmark) { @@ -903,7 +903,8 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, netdev_state_change(dev); } -int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) +int ip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, + int cmd) { int err = 0; struct ip_tunnel *t = netdev_priv(dev); @@ -1005,16 +1006,52 @@ done: } EXPORT_SYMBOL_GPL(ip_tunnel_ctl); +bool ip_tunnel_parm_from_user(struct ip_tunnel_parm_kern *kp, + const void __user *data) +{ + struct ip_tunnel_parm p; + + if (copy_from_user(&p, data, sizeof(p))) + return false; + + strscpy(kp->name, p.name); + kp->link = p.link; + kp->i_flags = p.i_flags; + kp->o_flags = p.o_flags; + kp->i_key = p.i_key; + kp->o_key = p.o_key; + memcpy(&kp->iph, &p.iph, min(sizeof(kp->iph), sizeof(p.iph))); + + return true; +} +EXPORT_SYMBOL_GPL(ip_tunnel_parm_from_user); + +bool ip_tunnel_parm_to_user(void __user *data, struct ip_tunnel_parm_kern *kp) +{ + struct ip_tunnel_parm p; + + strscpy(p.name, kp->name); + p.link = kp->link; + p.i_flags = kp->i_flags; + p.o_flags = kp->o_flags; + p.i_key = kp->i_key; + p.o_key = kp->o_key; + memcpy(&p.iph, &kp->iph, min(sizeof(p.iph), sizeof(kp->iph))); + + return !copy_to_user(data, &p, sizeof(p)); +} +EXPORT_SYMBOL_GPL(ip_tunnel_parm_to_user); + int ip_tunnel_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { - struct ip_tunnel_parm p; + struct ip_tunnel_parm_kern p; int err; - if (copy_from_user(&p, data, sizeof(p))) + if (!ip_tunnel_parm_from_user(&p, data)) return -EFAULT; err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, cmd); - if (!err && copy_to_user(data, &p, sizeof(p))) + if (!err && !ip_tunnel_parm_to_user(data, &p)) return -EFAULT; return err; } @@ -1093,7 +1130,7 @@ int ip_tunnel_init_net(struct net *net, unsigned int ip_tnl_net_id, struct rtnl_link_ops *ops, char *devname) { struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id); - struct ip_tunnel_parm parms; + struct ip_tunnel_parm_kern parms; unsigned int i; itn->rtnl_link_ops = ops; @@ -1171,7 +1208,7 @@ void ip_tunnel_delete_nets(struct list_head *net_list, unsigned int id, EXPORT_SYMBOL_GPL(ip_tunnel_delete_nets); int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark) + struct ip_tunnel_parm_kern *p, __u32 fwmark) { struct ip_tunnel *nt; struct net *net = dev_net(dev); @@ -1225,7 +1262,7 @@ err_register_netdevice: EXPORT_SYMBOL_GPL(ip_tunnel_newlink); int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[], - struct ip_tunnel_parm *p, __u32 fwmark) + struct ip_tunnel_parm_kern *p, __u32 fwmark) { struct ip_tunnel *t; struct ip_tunnel *tunnel = netdev_priv(dev); diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 80ccd6661aa3..98f136b07191 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -1116,7 +1116,7 @@ bool ip_tunnel_netlink_encap_parms(struct nlattr *data[], EXPORT_SYMBOL_GPL(ip_tunnel_netlink_encap_parms); void ip_tunnel_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms) + struct ip_tunnel_parm_kern *parms) { if (data[IFLA_IPTUN_LINK]) parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]); diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index ee587adb169f..7e595f619615 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -167,7 +167,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) { struct ip_tunnel *tunnel = netdev_priv(dev); - struct ip_tunnel_parm *parms = &tunnel->parms; + struct ip_tunnel_parm_kern *parms = &tunnel->parms; struct dst_entry *dst = skb_dst(skb); struct net_device *tdev; /* Device to other host */ int pkt_len = skb->len; @@ -373,7 +373,7 @@ static int vti4_err(struct sk_buff *skb, u32 info) } static int -vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) +vti_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd) { int err = 0; @@ -531,7 +531,7 @@ static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], } static void vti_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms, + struct ip_tunnel_parm_kern *parms, __u32 *fwmark) { memset(parms, 0, sizeof(*parms)); @@ -566,7 +566,7 @@ static int vti_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { - struct ip_tunnel_parm parms; + struct ip_tunnel_parm_kern parms; __u32 fwmark = 0; vti_netlink_parms(data, &parms, &fwmark); @@ -578,8 +578,8 @@ static int vti_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); + struct ip_tunnel_parm_kern p; __u32 fwmark = t->fwmark; - struct ip_tunnel_parm p; vti_netlink_parms(data, &p, &fwmark); return ip_tunnel_changelink(dev, tb, &p, fwmark); @@ -606,7 +606,7 @@ static size_t vti_get_size(const struct net_device *dev) static int vti_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *t = netdev_priv(dev); - struct ip_tunnel_parm *p = &t->parms; + struct ip_tunnel_parm_kern *p = &t->parms; if (nla_put_u32(skb, IFLA_VTI_LINK, p->link) || nla_put_be32(skb, IFLA_VTI_IKEY, p->i_key) || diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index f2696eaadbe6..db960cd08a3b 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -330,7 +330,7 @@ static bool ipip_tunnel_ioctl_verify_protocol(u8 ipproto) } static int -ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) +ipip_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, int cmd) { if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) { if (p->iph.version != 4 || @@ -405,8 +405,8 @@ static int ipip_tunnel_validate(struct nlattr *tb[], struct nlattr *data[], } static void ipip_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms, bool *collect_md, - __u32 *fwmark) + struct ip_tunnel_parm_kern *parms, + bool *collect_md, __u32 *fwmark) { memset(parms, 0, sizeof(*parms)); @@ -432,8 +432,8 @@ static int ipip_newlink(struct net *src_net, struct net_device *dev, struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); - struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + struct ip_tunnel_parm_kern p; __u32 fwmark = 0; if (ip_tunnel_netlink_encap_parms(data, &ipencap)) { @@ -452,8 +452,8 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); - struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + struct ip_tunnel_parm_kern p; bool collect_md; __u32 fwmark = t->fwmark; @@ -510,7 +510,7 @@ static size_t ipip_get_size(const struct net_device *dev) static int ipip_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); - struct ip_tunnel_parm *parm = &tunnel->parms; + struct ip_tunnel_parm_kern *parm = &tunnel->parms; if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) || diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index fd5c01c8489f..6c750bd13dd8 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -441,7 +441,7 @@ static bool ipmr_init_vif_indev(const struct net_device *dev) static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) { struct net_device *tunnel_dev, *new_dev; - struct ip_tunnel_parm p = { }; + struct ip_tunnel_parm_kern p = { }; int err; tunnel_dev = __dev_get_by_name(net, "tunl0"); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 92db9b474f2b..82ad6b8fa007 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -63,6 +63,7 @@ #include #include +#include #include #include #include @@ -2917,7 +2918,7 @@ put: static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev, struct in6_ifreq *ireq) { - struct ip_tunnel_parm p = { }; + struct ip_tunnel_parm_kern p = { }; int err; if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4)) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 655c9b1a19b8..894e6a4c4be3 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -132,8 +132,8 @@ static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net, return NULL; } -static struct ip_tunnel __rcu **__ipip6_bucket(struct sit_net *sitn, - struct ip_tunnel_parm *parms) +static struct ip_tunnel __rcu ** +__ipip6_bucket(struct sit_net *sitn, struct ip_tunnel_parm_kern *parms) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; @@ -226,7 +226,8 @@ out: } static struct ip_tunnel *ipip6_tunnel_locate(struct net *net, - struct ip_tunnel_parm *parms, int create) + struct ip_tunnel_parm_kern *parms, + int create) { __be32 remote = parms->iph.daddr; __be32 local = parms->iph.saddr; @@ -1135,7 +1136,8 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev) dev->needed_headroom = t_hlen + hlen; } -static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p, +static void ipip6_tunnel_update(struct ip_tunnel *t, + struct ip_tunnel_parm_kern *p, __u32 fwmark) { struct net *net = t->net; @@ -1196,11 +1198,11 @@ static int ipip6_tunnel_get6rd(struct net_device *dev, struct ip_tunnel_parm __user *data) { struct ip_tunnel *t = netdev_priv(dev); + struct ip_tunnel_parm_kern p; struct ip_tunnel_6rd ip6rd; - struct ip_tunnel_parm p; if (dev == dev_to_sit_net(dev)->fb_tunnel_dev) { - if (copy_from_user(&p, data, sizeof(p))) + if (!ip_tunnel_parm_from_user(&p, data)) return -EFAULT; t = ipip6_tunnel_locate(t->net, &p, 0); } @@ -1251,7 +1253,7 @@ static bool ipip6_valid_ip_proto(u8 ipproto) } static int -__ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm *p) +__ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm_kern *p) { if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; @@ -1268,7 +1270,7 @@ __ipip6_tunnel_ioctl_validate(struct net *net, struct ip_tunnel_parm *p) } static int -ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm *p) +ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm_kern *p) { struct ip_tunnel *t = netdev_priv(dev); @@ -1281,7 +1283,7 @@ ipip6_tunnel_get(struct net_device *dev, struct ip_tunnel_parm *p) } static int -ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm *p) +ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm_kern *p) { struct ip_tunnel *t = netdev_priv(dev); int err; @@ -1297,7 +1299,7 @@ ipip6_tunnel_add(struct net_device *dev, struct ip_tunnel_parm *p) } static int -ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm *p) +ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm_kern *p) { struct ip_tunnel *t = netdev_priv(dev); int err; @@ -1328,7 +1330,7 @@ ipip6_tunnel_change(struct net_device *dev, struct ip_tunnel_parm *p) } static int -ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm *p) +ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm_kern *p) { struct ip_tunnel *t = netdev_priv(dev); @@ -1348,7 +1350,8 @@ ipip6_tunnel_del(struct net_device *dev, struct ip_tunnel_parm *p) } static int -ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd) +ipip6_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm_kern *p, + int cmd) { switch (cmd) { case SIOCGETTUNNEL: @@ -1490,7 +1493,7 @@ static int ipip6_validate(struct nlattr *tb[], struct nlattr *data[], } static void ipip6_netlink_parms(struct nlattr *data[], - struct ip_tunnel_parm *parms, + struct ip_tunnel_parm_kern *parms, __u32 *fwmark) { memset(parms, 0, sizeof(*parms)); @@ -1599,8 +1602,8 @@ static int ipip6_changelink(struct net_device *dev, struct nlattr *tb[], struct netlink_ext_ack *extack) { struct ip_tunnel *t = netdev_priv(dev); - struct ip_tunnel_parm p; struct ip_tunnel_encap ipencap; + struct ip_tunnel_parm_kern p; struct net *net = t->net; struct sit_net *sitn = net_generic(net, sit_net_id); #ifdef CONFIG_IPV6_SIT_6RD @@ -1687,7 +1690,7 @@ static size_t ipip6_get_size(const struct net_device *dev) static int ipip6_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); - struct ip_tunnel_parm *parm = &tunnel->parms; + struct ip_tunnel_parm_kern *parm = &tunnel->parms; if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) || nla_put_in_addr(skb, IFLA_IPTUN_LOCAL, parm->iph.saddr) || -- cgit From b9495b564d91a0afe4125db64d2bc25c310bda9c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Mar 2024 15:42:18 +0000 Subject: net: move kick_defer_list_purge() to net/core/dev.h kick_defer_list_purge() is defined in net/core/dev.c and used from net/core/skubff.c Because we need softnet_data, include from net/core/dev.h Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 1 - net/core/dev.h | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 55c7cf9404a4..15e70527b703 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3288,7 +3288,6 @@ static inline void dev_xmit_recursion_dec(void) __this_cpu_dec(softnet_data.xmit.recursion); } -void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); diff --git a/net/core/dev.h b/net/core/dev.h index 2bcaf8eee50c..9d0f8b4587f8 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -4,11 +4,9 @@ #include #include +#include struct net; -struct net_device; -struct netdev_bpf; -struct netdev_phys_item_id; struct netlink_ext_ack; struct cpumask; @@ -150,4 +148,6 @@ static inline void xdp_do_check_flushed(struct napi_struct *napi) { } #endif struct napi_struct *napi_by_id(unsigned int napi_id); +void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); + #endif -- cgit From 2fe50a4d7225cf10748775e290361896637091a9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Mar 2024 15:42:19 +0000 Subject: net: move dev_xmit_recursion() helpers to net/core/dev.h Move dev_xmit_recursion() and friends to net/core/dev.h They are only used from net/core/dev.c and net/core/filter.c. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 17 ----------------- net/core/dev.h | 17 +++++++++++++++++ 2 files changed, 17 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 15e70527b703..0cd9ee83f554 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3271,23 +3271,6 @@ static inline int dev_recursion_level(void) return this_cpu_read(softnet_data.xmit.recursion); } -#define XMIT_RECURSION_LIMIT 8 -static inline bool dev_xmit_recursion(void) -{ - return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > - XMIT_RECURSION_LIMIT); -} - -static inline void dev_xmit_recursion_inc(void) -{ - __this_cpu_inc(softnet_data.xmit.recursion); -} - -static inline void dev_xmit_recursion_dec(void) -{ - __this_cpu_dec(softnet_data.xmit.recursion); -} - void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); diff --git a/net/core/dev.h b/net/core/dev.h index 9d0f8b4587f8..8572d2c8dc4a 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -150,4 +150,21 @@ static inline void xdp_do_check_flushed(struct napi_struct *napi) { } struct napi_struct *napi_by_id(unsigned int napi_id); void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); +#define XMIT_RECURSION_LIMIT 8 +static inline bool dev_xmit_recursion(void) +{ + return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > + XMIT_RECURSION_LIMIT); +} + +static inline void dev_xmit_recursion_inc(void) +{ + __this_cpu_inc(softnet_data.xmit.recursion); +} + +static inline void dev_xmit_recursion_dec(void) +{ + __this_cpu_dec(softnet_data.xmit.recursion); +} + #endif -- cgit From a7ae7b0b2ea014ff3ed4be812c3efa1b1d86e153 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Mar 2024 15:42:21 +0000 Subject: net: make softnet_data.dropped an atomic_t If under extreme cpu backlog pressure enqueue_to_backlog() has to drop a packet, it could do this without dirtying a cache line and potentially slowing down the target cpu. Move sd->dropped into a separate cache line, and make it atomic. In non pressure mode, this field is not touched, no need to consume valuable space in a hot cache line. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 ++- net/core/dev.c | 13 +++++++++---- net/core/net-procfs.c | 3 ++- 3 files changed, 13 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0cd9ee83f554..be9f071a340d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3237,10 +3237,11 @@ struct softnet_data { unsigned int input_queue_tail; #endif unsigned int received_rps; - unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + atomic_t dropped ____cacheline_aligned_in_smp; + /* Another possibly contended cache line */ spinlock_t defer_lock ____cacheline_aligned_in_smp; int defer_count; diff --git a/net/core/dev.c b/net/core/dev.c index 4ad7836365e6..02c98f115243 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4800,17 +4800,22 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, struct softnet_data *sd; unsigned long flags; unsigned int qlen; + int max_backlog; reason = SKB_DROP_REASON_DEV_READY; if (!netif_running(skb->dev)) goto bad_dev; + reason = SKB_DROP_REASON_CPU_BACKLOG; sd = &per_cpu(softnet_data, cpu); + qlen = skb_queue_len_lockless(&sd->input_pkt_queue); + max_backlog = READ_ONCE(net_hotdata.max_backlog); + if (unlikely(qlen > max_backlog)) + goto cpu_backlog_drop; backlog_lock_irq_save(sd, &flags); qlen = skb_queue_len(&sd->input_pkt_queue); - if (qlen <= READ_ONCE(net_hotdata.max_backlog) && - !skb_flow_limit(skb, qlen)) { + if (qlen <= max_backlog && !skb_flow_limit(skb, qlen)) { if (qlen) { enqueue: __skb_queue_tail(&sd->input_pkt_queue, skb); @@ -4826,11 +4831,11 @@ enqueue: napi_schedule_rps(sd); goto enqueue; } - reason = SKB_DROP_REASON_CPU_BACKLOG; - sd->dropped++; backlog_unlock_irq_restore(sd, &flags); +cpu_backlog_drop: + atomic_inc(&sd->dropped); bad_dev: dev_core_stats_rx_dropped_inc(skb->dev); kfree_skb_reason(skb, reason); diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c index a97eceb84e61..fa6d3969734a 100644 --- a/net/core/net-procfs.c +++ b/net/core/net-procfs.c @@ -144,7 +144,8 @@ static int softnet_seq_show(struct seq_file *seq, void *v) seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x " "%08x %08x\n", - sd->processed, sd->dropped, sd->time_squeeze, 0, + sd->processed, atomic_read(&sd->dropped), + sd->time_squeeze, 0, 0, 0, 0, 0, /* was fastroute */ 0, /* was cpu_collision */ sd->received_rps, flow_limit_count, -- cgit From 36b83ffcf209a2e6099dae1070df6a2001dfab27 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Mar 2024 15:42:23 +0000 Subject: net: rps: change input_queue_tail_incr_save() input_queue_tail_incr_save() is incrementing the sd queue_tail and save it in the flow last_qtail. Two issues here : - no lock protects the write on last_qtail, we should use appropriate annotations. - We can perform this write after releasing the per-cpu backlog lock, to decrease this lock hold duration (move away the cache line miss) Also move input_queue_head_incr() and rps helpers to include/net/rps.h, while adding rps_ prefix to better reflect their role. v2: Fixed a build issue (Jakub and kernel build bots) Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 15 --------------- include/net/rps.h | 23 +++++++++++++++++++++++ net/core/dev.c | 20 ++++++++++++-------- 3 files changed, 35 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index be9f071a340d..2c11c295cf64 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3250,21 +3250,6 @@ struct softnet_data { call_single_data_t defer_csd; }; -static inline void input_queue_head_incr(struct softnet_data *sd) -{ -#ifdef CONFIG_RPS - sd->input_queue_head++; -#endif -} - -static inline void input_queue_tail_incr_save(struct softnet_data *sd, - unsigned int *qtail) -{ -#ifdef CONFIG_RPS - *qtail = ++sd->input_queue_tail; -#endif -} - DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); static inline int dev_recursion_level(void) diff --git a/include/net/rps.h b/include/net/rps.h index 7660243e905b..10ca25731c1e 100644 --- a/include/net/rps.h +++ b/include/net/rps.h @@ -122,4 +122,27 @@ static inline void sock_rps_record_flow(const struct sock *sk) #endif } +static inline u32 rps_input_queue_tail_incr(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + return ++sd->input_queue_tail; +#else + return 0; +#endif +} + +static inline void rps_input_queue_tail_save(u32 *dest, u32 tail) +{ +#ifdef CONFIG_RPS + WRITE_ONCE(*dest, tail); +#endif +} + +static inline void rps_input_queue_head_incr(struct softnet_data *sd) +{ +#ifdef CONFIG_RPS + sd->input_queue_head++; +#endif +} + #endif /* _NET_RPS_H */ diff --git a/net/core/dev.c b/net/core/dev.c index 0a8ccb0451c3..79073bbc9a64 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4611,7 +4611,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, if (unlikely(tcpu != next_cpu) && (tcpu >= nr_cpu_ids || !cpu_online(tcpu) || ((int)(per_cpu(softnet_data, tcpu).input_queue_head - - rflow->last_qtail)) >= 0)) { + READ_ONCE(rflow->last_qtail))) >= 0)) { tcpu = next_cpu; rflow = set_rps_cpu(dev, skb, rflow, next_cpu); } @@ -4666,7 +4666,7 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, cpu = READ_ONCE(rflow->cpu); if (rflow->filter == filter_id && cpu < nr_cpu_ids && ((int)(per_cpu(softnet_data, cpu).input_queue_head - - rflow->last_qtail) < + READ_ONCE(rflow->last_qtail)) < (int)(10 * flow_table->mask))) expire = false; } @@ -4801,6 +4801,7 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, unsigned long flags; unsigned int qlen; int max_backlog; + u32 tail; reason = SKB_DROP_REASON_DEV_READY; if (!netif_running(skb->dev)) @@ -4825,8 +4826,11 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, napi_schedule_rps(sd); } __skb_queue_tail(&sd->input_pkt_queue, skb); - input_queue_tail_incr_save(sd, qtail); + tail = rps_input_queue_tail_incr(sd); backlog_unlock_irq_restore(sd, &flags); + + /* save the tail outside of the critical section */ + rps_input_queue_tail_save(qtail, tail); return NET_RX_SUCCESS; } @@ -5904,7 +5908,7 @@ static void flush_backlog(struct work_struct *work) if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); dev_kfree_skb_irq(skb); - input_queue_head_incr(sd); + rps_input_queue_head_incr(sd); } } backlog_unlock_irq_enable(sd); @@ -5913,7 +5917,7 @@ static void flush_backlog(struct work_struct *work) if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); kfree_skb(skb); - input_queue_head_incr(sd); + rps_input_queue_head_incr(sd); } } local_bh_enable(); @@ -6041,7 +6045,7 @@ static int process_backlog(struct napi_struct *napi, int quota) rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); - input_queue_head_incr(sd); + rps_input_queue_head_incr(sd); if (++work >= quota) return work; @@ -11455,11 +11459,11 @@ static int dev_cpu_dead(unsigned int oldcpu) /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx(skb); - input_queue_head_incr(oldsd); + rps_input_queue_head_incr(oldsd); } while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx(skb); - input_queue_head_incr(oldsd); + rps_input_queue_head_incr(oldsd); } return 0; -- cgit From d3ae5f4632c107d3c2eeb97a60fecc6a6f9d6fbe Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 29 Mar 2024 15:42:25 +0000 Subject: net: rps: move received_rps field to a better location Commit 14d898f3c1b3 ("dev: Move received_rps counter next to RPS members in softnet data") was unfortunate: received_rps is dirtied by a cpu and never read by other cpus in fast path. Its presence in the hot RPS cache line (shared by many cpus) is hurting RPS/RFS performance. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 2c11c295cf64..7d12b5a9380f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3204,6 +3204,7 @@ struct softnet_data { struct softnet_data *rps_ipi_list; #endif + unsigned int received_rps; bool in_net_rx_action; bool in_napi_threaded_poll; @@ -3236,7 +3237,6 @@ struct softnet_data { unsigned int cpu; unsigned int input_queue_tail; #endif - unsigned int received_rps; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; -- cgit From c32c617de8076d8fb2a16a4a2f3b5da5f3df398d Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 25 Mar 2024 15:31:24 -0500 Subject: power: supply: bq27xxx: Move temperature reading out of update loop Most of the functions that read values return a status and put the value itself in an a function parameter. Update temperature reading to match. As temp is not checked for changes as part of the update loop, remove the read of the temperature from the periodic update loop. This saves I2C/1W bandwidth. It also means we do not have to cache it, fresh values are read when requested. Signed-off-by: Andrew Davis Link: https://lore.kernel.org/r/20240325203129.150030-1-afd@ti.com Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 17 ++++++++++------- include/linux/power/bq27xxx_battery.h | 1 - 2 files changed, 10 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index abca56834468..00b2a56039ac 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1652,10 +1652,11 @@ static int bq27xxx_battery_read_energy(struct bq27xxx_device_info *di) } /* - * Return the battery temperature in tenths of degree Kelvin + * Return the battery temperature in tenths of degree Celsius * Or < 0 if something fails. */ -static int bq27xxx_battery_read_temperature(struct bq27xxx_device_info *di) +static int bq27xxx_battery_read_temperature(struct bq27xxx_device_info *di, + union power_supply_propval *val) { int temp; @@ -1668,7 +1669,12 @@ static int bq27xxx_battery_read_temperature(struct bq27xxx_device_info *di) if (di->opts & BQ27XXX_O_ZERO) temp = 5 * temp / 2; - return temp; + /* Convert decidegree Kelvin to Celsius */ + temp -= 2731; + + val->intval = temp; + + return 0; } /* @@ -1851,7 +1857,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) if ((cache.flags & 0xff) == 0xff) cache.flags = -1; /* read error */ if (cache.flags >= 0) { - cache.temperature = bq27xxx_battery_read_temperature(di); if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) @@ -2038,9 +2043,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, ret = bq27xxx_battery_capacity_level(di, val); break; case POWER_SUPPLY_PROP_TEMP: - ret = bq27xxx_simple_value(di->cache.temperature, val); - if (ret == 0) - val->intval -= 2731; /* convert decidegree k to c */ + ret = bq27xxx_battery_read_temperature(di, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: ret = bq27xxx_simple_value(di->cache.time_to_empty, val); diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index b9e5bd2b42d3..64b2749d9562 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -47,7 +47,6 @@ struct bq27xxx_access_methods { }; struct bq27xxx_reg_cache { - int temperature; int time_to_empty; int time_to_empty_avg; int time_to_full; -- cgit From 651a620aa4d49f5647e21e55fc71bb049bc03389 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 25 Mar 2024 15:31:25 -0500 Subject: power: supply: bq27xxx: Move time reading out of update loop Most of the functions that read values return a status and put the value itself in an a function parameter. Update time reading to match. As time is not checked for changes as part of the update loop, remove the read of the this from the periodic update loop. This saves I2C/1W bandwidth. It also means we do not have to cache it, fresh values are read when requested. Signed-off-by: Andrew Davis Link: https://lore.kernel.org/r/20240325203129.150030-2-afd@ti.com Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 20 ++++++++------------ include/linux/power/bq27xxx_battery.h | 3 --- 2 files changed, 8 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 00b2a56039ac..dcfe3c10e7ed 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1696,7 +1696,8 @@ static int bq27xxx_battery_read_cyct(struct bq27xxx_device_info *di) * Read a time register. * Return < 0 if something fails. */ -static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg) +static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg, + union power_supply_propval *val) { int tval; @@ -1710,7 +1711,9 @@ static int bq27xxx_battery_read_time(struct bq27xxx_device_info *di, u8 reg) if (tval == 65535) return -ENODATA; - return tval * 60; + val->intval = tval * 60; + + return 0; } /* @@ -1857,13 +1860,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) if ((cache.flags & 0xff) == 0xff) cache.flags = -1; /* read error */ if (cache.flags >= 0) { - if (di->regs[BQ27XXX_REG_TTE] != INVALID_REG_ADDR) - cache.time_to_empty = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE); - if (di->regs[BQ27XXX_REG_TTECP] != INVALID_REG_ADDR) - cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); - if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) - cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); - cache.charge_full = bq27xxx_battery_read_fcc(di); cache.capacity = bq27xxx_battery_read_soc(di); if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) @@ -2046,13 +2042,13 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, ret = bq27xxx_battery_read_temperature(di, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: - ret = bq27xxx_simple_value(di->cache.time_to_empty, val); + ret = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTE, val); break; case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: - ret = bq27xxx_simple_value(di->cache.time_to_empty_avg, val); + ret = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP, val); break; case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: - ret = bq27xxx_simple_value(di->cache.time_to_full, val); + ret = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF, val); break; case POWER_SUPPLY_PROP_TECHNOLOGY: if (di->opts & BQ27XXX_O_MUL_CHEM) diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 64b2749d9562..e89ef989a575 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -47,9 +47,6 @@ struct bq27xxx_access_methods { }; struct bq27xxx_reg_cache { - int time_to_empty; - int time_to_empty_avg; - int time_to_full; int charge_full; int cycle_count; int capacity; -- cgit From 8d846335204f25a2247e5e88e39e1604b6ecc133 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 25 Mar 2024 15:31:26 -0500 Subject: power: supply: bq27xxx: Move charge reading out of update loop Most of the functions that read values return a status and put the value itself in an a function parameter. Update charge reading to match. As charge state is not checked for changes as part of the update loop, remove the read of this from the periodic update loop. This saves I2C/1W bandwidth. It also means we do not have to cache it, fresh values are read when requested. Signed-off-by: Andrew Davis Link: https://lore.kernel.org/r/20240325203129.150030-3-afd@ti.com Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 29 +++++++++++++++++------------ include/linux/power/bq27xxx_battery.h | 1 - 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index dcfe3c10e7ed..799ee0aa9ef7 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1545,7 +1545,8 @@ static int bq27xxx_battery_read_soc(struct bq27xxx_device_info *di) * Return a battery charge value in µAh * Or < 0 if something fails. */ -static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg) +static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg, + union power_supply_propval *val) { int charge; @@ -1561,34 +1562,39 @@ static int bq27xxx_battery_read_charge(struct bq27xxx_device_info *di, u8 reg) else charge *= 1000; - return charge; + val->intval = charge; + + return 0; } /* * Return the battery Nominal available capacity in µAh * Or < 0 if something fails. */ -static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di) +static inline int bq27xxx_battery_read_nac(struct bq27xxx_device_info *di, + union power_supply_propval *val) { - return bq27xxx_battery_read_charge(di, BQ27XXX_REG_NAC); + return bq27xxx_battery_read_charge(di, BQ27XXX_REG_NAC, val); } /* * Return the battery Remaining Capacity in µAh * Or < 0 if something fails. */ -static inline int bq27xxx_battery_read_rc(struct bq27xxx_device_info *di) +static inline int bq27xxx_battery_read_rc(struct bq27xxx_device_info *di, + union power_supply_propval *val) { - return bq27xxx_battery_read_charge(di, BQ27XXX_REG_RC); + return bq27xxx_battery_read_charge(di, BQ27XXX_REG_RC, val); } /* * Return the battery Full Charge Capacity in µAh * Or < 0 if something fails. */ -static inline int bq27xxx_battery_read_fcc(struct bq27xxx_device_info *di) +static inline int bq27xxx_battery_read_fcc(struct bq27xxx_device_info *di, + union power_supply_propval *val) { - return bq27xxx_battery_read_charge(di, BQ27XXX_REG_FCC); + return bq27xxx_battery_read_charge(di, BQ27XXX_REG_FCC, val); } /* @@ -1860,7 +1866,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) if ((cache.flags & 0xff) == 0xff) cache.flags = -1; /* read error */ if (cache.flags >= 0) { - cache.charge_full = bq27xxx_battery_read_fcc(di); cache.capacity = bq27xxx_battery_read_soc(di); if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) cache.energy = bq27xxx_battery_read_energy(di); @@ -2058,12 +2063,12 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CHARGE_NOW: if (di->regs[BQ27XXX_REG_NAC] != INVALID_REG_ADDR) - ret = bq27xxx_simple_value(bq27xxx_battery_read_nac(di), val); + ret = bq27xxx_battery_read_nac(di, val); else - ret = bq27xxx_simple_value(bq27xxx_battery_read_rc(di), val); + ret = bq27xxx_battery_read_rc(di, val); break; case POWER_SUPPLY_PROP_CHARGE_FULL: - ret = bq27xxx_simple_value(di->cache.charge_full, val); + ret = bq27xxx_battery_read_fcc(di, val); break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: ret = bq27xxx_battery_read_dcap(di, val); diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index e89ef989a575..1c67fa46013b 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -47,7 +47,6 @@ struct bq27xxx_access_methods { }; struct bq27xxx_reg_cache { - int charge_full; int cycle_count; int capacity; int energy; -- cgit From 39cf1c4cd03254218a23ef955bd534e19328f618 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 25 Mar 2024 15:31:27 -0500 Subject: power: supply: bq27xxx: Move energy reading out of update loop Most of the functions that read values return a status and put the value itself in an a function parameter. Update energy reading to match. As energy is not checked for changes as part of the update loop, remove the read of this from the periodic update loop. This saves I2C/1W bandwidth. It also means we do not have to cache it, fresh values are read when requested. Signed-off-by: Andrew Davis Link: https://lore.kernel.org/r/20240325203129.150030-4-afd@ti.com Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 11 ++++++----- include/linux/power/bq27xxx_battery.h | 1 - 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 799ee0aa9ef7..eae2b9a60f40 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1639,7 +1639,8 @@ static int bq27xxx_battery_read_dcap(struct bq27xxx_device_info *di, * Return the battery Available energy in µWh * Or < 0 if something fails. */ -static int bq27xxx_battery_read_energy(struct bq27xxx_device_info *di) +static int bq27xxx_battery_read_energy(struct bq27xxx_device_info *di, + union power_supply_propval *val) { int ae; @@ -1654,7 +1655,9 @@ static int bq27xxx_battery_read_energy(struct bq27xxx_device_info *di) else ae *= 1000; - return ae; + val->intval = ae; + + return 0; } /* @@ -1867,8 +1870,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) cache.flags = -1; /* read error */ if (cache.flags >= 0) { cache.capacity = bq27xxx_battery_read_soc(di); - if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) - cache.energy = bq27xxx_battery_read_energy(di); di->cache.flags = cache.flags; cache.health = bq27xxx_battery_read_health(di); if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) @@ -2084,7 +2085,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, ret = bq27xxx_simple_value(di->cache.cycle_count, val); break; case POWER_SUPPLY_PROP_ENERGY_NOW: - ret = bq27xxx_simple_value(di->cache.energy, val); + ret = bq27xxx_battery_read_energy(di, val); break; case POWER_SUPPLY_PROP_POWER_AVG: ret = bq27xxx_battery_pwr_avg(di, val); diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 1c67fa46013b..5c75abf3cf06 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -49,7 +49,6 @@ struct bq27xxx_access_methods { struct bq27xxx_reg_cache { int cycle_count; int capacity; - int energy; int flags; int health; }; -- cgit From 656489ac90f25f92190a1dd5c4e5c5293bd70323 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 25 Mar 2024 15:31:28 -0500 Subject: power: supply: bq27xxx: Move cycle count reading out of update loop Most of the functions that read values return a status and put the value itself in an a function parameter. Update cycle count reading to match. As cycle count is not checked for changes as part of the update loop, remove the read of this from the periodic update loop. This saves I2C/1W bandwidth. It also means we do not have to cache it, fresh values are read when requested. Signed-off-by: Andrew Davis Link: https://lore.kernel.org/r/20240325203129.150030-5-afd@ti.com Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 11 ++++++----- include/linux/power/bq27xxx_battery.h | 1 - 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index eae2b9a60f40..fe1b0af143ca 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1690,7 +1690,8 @@ static int bq27xxx_battery_read_temperature(struct bq27xxx_device_info *di, * Return the battery Cycle count total * Or < 0 if something fails. */ -static int bq27xxx_battery_read_cyct(struct bq27xxx_device_info *di) +static int bq27xxx_battery_read_cyct(struct bq27xxx_device_info *di, + union power_supply_propval *val) { int cyct; @@ -1698,7 +1699,9 @@ static int bq27xxx_battery_read_cyct(struct bq27xxx_device_info *di) if (cyct < 0) dev_err(di->dev, "error reading cycle count total\n"); - return cyct; + val->intval = cyct; + + return 0; } /* @@ -1872,8 +1875,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) cache.capacity = bq27xxx_battery_read_soc(di); di->cache.flags = cache.flags; cache.health = bq27xxx_battery_read_health(di); - if (di->regs[BQ27XXX_REG_CYCT] != INVALID_REG_ADDR) - cache.cycle_count = bq27xxx_battery_read_cyct(di); /* * On gauges with signed current reporting the current must be @@ -2082,7 +2083,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: return -EINVAL; case POWER_SUPPLY_PROP_CYCLE_COUNT: - ret = bq27xxx_simple_value(di->cache.cycle_count, val); + ret = bq27xxx_battery_read_cyct(di, val); break; case POWER_SUPPLY_PROP_ENERGY_NOW: ret = bq27xxx_battery_read_energy(di, val); diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index 5c75abf3cf06..d743270799d7 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -47,7 +47,6 @@ struct bq27xxx_access_methods { }; struct bq27xxx_reg_cache { - int cycle_count; int capacity; int flags; int health; -- cgit From 50f0ff7c8cc4c1d10fabc4b3b3f3b9e942b08187 Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Mon, 25 Mar 2024 15:31:29 -0500 Subject: power: supply: bq27xxx: Move health reading out of update loop Most of the functions that read values return a status and put the value itself in an a function parameter. Update health reading to match. As health is not checked for changes as part of the update loop, remove the read of this from the periodic update loop. This saves I2C/1W bandwidth. It also means we do not have to cache it, fresh values are read when requested. Signed-off-by: Andrew Davis Link: https://lore.kernel.org/r/20240325203129.150030-6-afd@ti.com Signed-off-by: Sebastian Reichel --- drivers/power/supply/bq27xxx_battery.c | 30 ++++++++++++++++++------------ include/linux/power/bq27xxx_battery.h | 1 - 2 files changed, 18 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index fe1b0af143ca..750fda543308 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1777,19 +1777,26 @@ static bool bq27xxx_battery_capacity_inaccurate(struct bq27xxx_device_info *di, return false; } -static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di) +static int bq27xxx_battery_read_health(struct bq27xxx_device_info *di, + union power_supply_propval *val) { + int health; + /* Unlikely but important to return first */ if (unlikely(bq27xxx_battery_overtemp(di, di->cache.flags))) - return POWER_SUPPLY_HEALTH_OVERHEAT; - if (unlikely(bq27xxx_battery_undertemp(di, di->cache.flags))) - return POWER_SUPPLY_HEALTH_COLD; - if (unlikely(bq27xxx_battery_dead(di, di->cache.flags))) - return POWER_SUPPLY_HEALTH_DEAD; - if (unlikely(bq27xxx_battery_capacity_inaccurate(di, di->cache.flags))) - return POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED; - - return POWER_SUPPLY_HEALTH_GOOD; + health = POWER_SUPPLY_HEALTH_OVERHEAT; + else if (unlikely(bq27xxx_battery_undertemp(di, di->cache.flags))) + health = POWER_SUPPLY_HEALTH_COLD; + else if (unlikely(bq27xxx_battery_dead(di, di->cache.flags))) + health = POWER_SUPPLY_HEALTH_DEAD; + else if (unlikely(bq27xxx_battery_capacity_inaccurate(di, di->cache.flags))) + health = POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED; + else + health = POWER_SUPPLY_HEALTH_GOOD; + + val->intval = health; + + return 0; } static bool bq27xxx_battery_is_full(struct bq27xxx_device_info *di, int flags) @@ -1874,7 +1881,6 @@ static void bq27xxx_battery_update_unlocked(struct bq27xxx_device_info *di) if (cache.flags >= 0) { cache.capacity = bq27xxx_battery_read_soc(di); di->cache.flags = cache.flags; - cache.health = bq27xxx_battery_read_health(di); /* * On gauges with signed current reporting the current must be @@ -2092,7 +2098,7 @@ static int bq27xxx_battery_get_property(struct power_supply *psy, ret = bq27xxx_battery_pwr_avg(di, val); break; case POWER_SUPPLY_PROP_HEALTH: - ret = bq27xxx_simple_value(di->cache.health, val); + ret = bq27xxx_battery_read_health(di, val); break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = BQ27XXX_MANUFACTURER; diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index d743270799d7..5180dc9f1706 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h @@ -49,7 +49,6 @@ struct bq27xxx_access_methods { struct bq27xxx_reg_cache { int capacity; int flags; - int health; }; struct bq27xxx_device_info { -- cgit From 813e0ae613d6ee1b3e11f1c41f8b9e9df8ef0493 Mon Sep 17 00:00:00 2001 From: Baochen Qiang Date: Tue, 5 Mar 2024 10:13:18 +0800 Subject: bus: mhi: host: Add mhi_power_down_keep_dev() API to support system suspend/hibernation Currently, ath11k fails to resume from system suspend/hibernation on some the x86 host machines with below error message: ``` ath11k_pci 0000:06:00.0: timeout while waiting for restart complete ``` This happens because, ath11k powers down the MHI stack during suspend and that leads to destruction of the struct device associated with the MHI channels. And during resume, ath11k calls calling mhi_sync_power_up() to power up the MHI subsystem and that eventually calls the driver framework's device_add() API from mhi_create_devices(). But the PM framework blocks the struct device creation during device_add() and this leads to probe deferral as below: ``` mhi mhi0_IPCR: Driver qcom_mhi_qrtr force probe deferral ``` The reason for deferring device creation during resume is explained in dpm_prepare(): /* * It is unsafe if probing of devices will happen during suspend or * hibernation and system behavior will be unpredictable in this * case. So, let's prohibit device's probing here and defer their * probes instead. The normal behavior will be restored in * dpm_complete(). */ Due to the device probe deferral, qcom_mhi_qrtr_probe() API is not getting called during resume and thus MHI channels are not prepared. So this blocks the QMI messages from being transferred between ath11k and firmware, resulting in a firmware initialization failure. After consulting with Rafael, it was decided to not destroy the struct device for the MHI channels during system suspend/hibernation because the device is bound to appear again during resume. So to achieve this, a new API called mhi_power_down_keep_dev() is introduced for MHI controllers to keep the struct device when required. This API is similar to the existing mhi_power_down() API, except that it keeps the struct device associated with MHI channels instead of destroying them. Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03125-QCAHSPSWPL_V1_V2_SILICONZ_LITE-3.6510.30 Signed-off-by: Baochen Qiang Reviewed-by: Manivannan Sadhasivam Reviewed-by: Jeff Johnson Link: https://lore.kernel.org/r/20240305021320.3367-2-quic_bqiang@quicinc.com [mani: reworded the commit message and subject] Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/internal.h | 4 +++- drivers/bus/mhi/host/pm.c | 42 +++++++++++++++++++++++++++++++++++------ include/linux/mhi.h | 18 +++++++++++++++++- 3 files changed, 56 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 5fe49311b8eb..aaad40a07f69 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -80,6 +80,7 @@ enum dev_st_transition { DEV_ST_TRANSITION_FP, DEV_ST_TRANSITION_SYS_ERR, DEV_ST_TRANSITION_DISABLE, + DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE, DEV_ST_TRANSITION_MAX, }; @@ -90,7 +91,8 @@ enum dev_st_transition { dev_st_trans(MISSION_MODE, "MISSION MODE") \ dev_st_trans(FP, "FLASH PROGRAMMER") \ dev_st_trans(SYS_ERR, "SYS ERROR") \ - dev_st_trans_end(DISABLE, "DISABLE") + dev_st_trans(DISABLE, "DISABLE") \ + dev_st_trans_end(DISABLE_DESTROY_DEVICE, "DISABLE (DESTROY DEVICE)") extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX]; #define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \ diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 8b40d3f01acc..11c0e751f223 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -468,7 +468,8 @@ error_mission_mode: } /* Handle shutdown transitions */ -static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) +static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl, + bool destroy_device) { enum mhi_pm_state cur_state; struct mhi_event *mhi_event; @@ -530,8 +531,16 @@ skip_mhi_reset: dev_dbg(dev, "Waiting for all pending threads to complete\n"); wake_up_all(&mhi_cntrl->state_event); - dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); - device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + /* + * Only destroy the 'struct device' for channels if indicated by the + * 'destroy_device' flag. Because, during system suspend or hibernation + * state, there is no need to destroy the 'struct device' as the endpoint + * device would still be physically attached to the machine. + */ + if (destroy_device) { + dev_dbg(dev, "Reset all active channels and remove MHI devices\n"); + device_for_each_child(&mhi_cntrl->mhi_dev->dev, NULL, mhi_destroy_device); + } mutex_lock(&mhi_cntrl->pm_mutex); @@ -821,7 +830,10 @@ void mhi_pm_st_worker(struct work_struct *work) mhi_pm_sys_error_transition(mhi_cntrl); break; case DEV_ST_TRANSITION_DISABLE: - mhi_pm_disable_transition(mhi_cntrl); + mhi_pm_disable_transition(mhi_cntrl, false); + break; + case DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE: + mhi_pm_disable_transition(mhi_cntrl, true); break; default: break; @@ -1175,7 +1187,8 @@ error_exit: } EXPORT_SYMBOL_GPL(mhi_async_power_up); -void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +static void __mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful, + bool destroy_device) { enum mhi_pm_state cur_state, transition_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; @@ -1211,15 +1224,32 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) write_unlock_irq(&mhi_cntrl->pm_lock); mutex_unlock(&mhi_cntrl->pm_mutex); - mhi_queue_state_transition(mhi_cntrl, DEV_ST_TRANSITION_DISABLE); + if (destroy_device) + mhi_queue_state_transition(mhi_cntrl, + DEV_ST_TRANSITION_DISABLE_DESTROY_DEVICE); + else + mhi_queue_state_transition(mhi_cntrl, + DEV_ST_TRANSITION_DISABLE); /* Wait for shutdown to complete */ flush_work(&mhi_cntrl->st_worker); disable_irq(mhi_cntrl->irq[0]); } + +void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful) +{ + __mhi_power_down(mhi_cntrl, graceful, true); +} EXPORT_SYMBOL_GPL(mhi_power_down); +void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, + bool graceful) +{ + __mhi_power_down(mhi_cntrl, graceful, false); +} +EXPORT_SYMBOL_GPL(mhi_power_down_keep_dev); + int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) { int ret = mhi_async_power_up(mhi_cntrl); diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 77b8c0a26674..cde01e133a1b 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -630,12 +630,28 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl); int mhi_sync_power_up(struct mhi_controller *mhi_cntrl); /** - * mhi_power_down - Start MHI power down sequence + * mhi_power_down - Power down the MHI device and also destroy the + * 'struct device' for the channels associated with it. + * See also mhi_power_down_keep_dev() which is a variant + * of this API that keeps the 'struct device' for channels + * (useful during suspend/hibernation). * @mhi_cntrl: MHI controller * @graceful: Link is still accessible, so do a graceful shutdown process */ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful); +/** + * mhi_power_down_keep_dev - Power down the MHI device but keep the 'struct + * device' for the channels associated with it. + * This is a variant of 'mhi_power_down()' and + * useful in scenarios such as suspend/hibernation + * where destroying of the 'struct device' is not + * needed. + * @mhi_cntrl: MHI controller + * @graceful: Link is still accessible, so do a graceful shutdown process + */ +void mhi_power_down_keep_dev(struct mhi_controller *mhi_cntrl, bool graceful); + /** * mhi_unprepare_after_power_down - Free any allocated memory after power down * @mhi_cntrl: MHI controller -- cgit From c9418adfbabadf7530c034a52e54b31705568f95 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 28 Mar 2024 09:41:44 +0100 Subject: block: add a bio_list_merge_init helper This is a simple combination of bio_list_merge + bio_list_init similar to list_splice_init. While it only saves a single line in a callers, it makes the move all bios from one list to another and reinitialize the original pattern a lot more obvious in the callers. Signed-off-by: Christoph Hellwig Reviewed-by: Matthew Sakai Reviewed-by: Johannes Thumshirn Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20240328084147.2954434-2-hch@lst.de Signed-off-by: Jens Axboe --- include/linux/bio.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bio.h b/include/linux/bio.h index 875d792bffff..9b8a369f44bc 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -615,6 +615,13 @@ static inline void bio_list_merge(struct bio_list *bl, struct bio_list *bl2) bl->tail = bl2->tail; } +static inline void bio_list_merge_init(struct bio_list *bl, + struct bio_list *bl2) +{ + bio_list_merge(bl, bl2); + bio_list_init(bl2); +} + static inline void bio_list_merge_head(struct bio_list *bl, struct bio_list *bl2) { -- cgit From 0fce55e5334d380d8a09f80ba9c9b68eeea6971d Mon Sep 17 00:00:00 2001 From: Xin Zeng Date: Wed, 6 Mar 2024 21:58:53 +0800 Subject: crypto: qat - add interface for live migration Extend the driver with a new interface to be used for VF live migration. This allows to create and destroy a qat_mig_dev object that contains a set of methods to allow to save and restore the state of QAT VF. This interface will be used by the qat-vfio-pci module. Signed-off-by: Xin Zeng Reviewed-by: Giovanni Cabiddu Signed-off-by: Herbert Xu --- drivers/crypto/intel/qat/qat_common/Makefile | 2 +- .../intel/qat/qat_common/adf_accel_devices.h | 17 +++ .../crypto/intel/qat/qat_common/adf_gen4_vf_mig.h | 10 ++ drivers/crypto/intel/qat/qat_common/qat_mig_dev.c | 130 +++++++++++++++++++++ include/linux/qat/qat_mig_dev.h | 31 +++++ 5 files changed, 189 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h create mode 100644 drivers/crypto/intel/qat/qat_common/qat_mig_dev.c create mode 100644 include/linux/qat/qat_mig_dev.h (limited to 'include/linux') diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index ceaa685352ed..9fba31d4ac7f 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -54,6 +54,6 @@ intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ - adf_gen2_pfvf.o adf_gen4_pfvf.o + adf_gen2_pfvf.o adf_gen4_pfvf.o qat_mig_dev.o intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 986e63ec702d..b08fea10121e 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -9,6 +9,7 @@ #include #include #include +#include #include "adf_cfg_common.h" #include "adf_rl.h" #include "adf_telemetry.h" @@ -258,6 +259,20 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct qat_migdev_ops { + int (*init)(struct qat_mig_dev *mdev); + void (*cleanup)(struct qat_mig_dev *mdev); + void (*reset)(struct qat_mig_dev *mdev); + int (*open)(struct qat_mig_dev *mdev); + void (*close)(struct qat_mig_dev *mdev); + int (*suspend)(struct qat_mig_dev *mdev); + int (*resume)(struct qat_mig_dev *mdev); + int (*save_state)(struct qat_mig_dev *mdev); + int (*save_setup)(struct qat_mig_dev *mdev); + int (*load_state)(struct qat_mig_dev *mdev); + int (*load_setup)(struct qat_mig_dev *mdev, int size); +}; + struct adf_dev_err_mask { u32 cppagentcmdpar_mask; u32 parerr_ath_cph_mask; @@ -325,6 +340,7 @@ struct adf_hw_device_data { struct adf_dev_err_mask dev_err_mask; struct adf_rl_hw_data rl_data; struct adf_tl_hw_data tl_data; + struct qat_migdev_ops vfmig_ops; const char *fw_name; const char *fw_mmp_name; u32 fuses; @@ -381,6 +397,7 @@ struct adf_hw_device_data { #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops) #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h new file mode 100644 index 000000000000..72216d078ee1 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_vf_mig.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef ADF_GEN4_VF_MIG_H_ +#define ADF_GEN4_VF_MIG_H_ + +#include "adf_accel_devices.h" + +void adf_gen4_init_vf_mig_ops(struct qat_migdev_ops *vfmig_ops); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c new file mode 100644 index 000000000000..892c2283a50e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/qat_mig_dev.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2024 Intel Corporation */ +#include +#include +#include +#include +#include +#include "adf_accel_devices.h" +#include "adf_common_drv.h" + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id) +{ + struct adf_accel_dev *accel_dev; + struct qat_migdev_ops *ops; + struct qat_mig_dev *mdev; + + accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + if (!accel_dev) + return ERR_PTR(-ENODEV); + + ops = GET_VFMIG_OPS(accel_dev); + if (!ops || !ops->init || !ops->cleanup || !ops->reset || !ops->open || + !ops->close || !ops->suspend || !ops->resume || !ops->save_state || + !ops->load_state || !ops->save_setup || !ops->load_setup) + return ERR_PTR(-EINVAL); + + mdev = kmalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return ERR_PTR(-ENOMEM); + + mdev->vf_id = vf_id; + mdev->parent_accel_dev = accel_dev; + + return mdev; +} +EXPORT_SYMBOL_GPL(qat_vfmig_create); + +int qat_vfmig_init(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->init(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_init); + +void qat_vfmig_cleanup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->cleanup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_cleanup); + +void qat_vfmig_reset(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->reset(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_reset); + +int qat_vfmig_open(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->open(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_open); + +void qat_vfmig_close(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + GET_VFMIG_OPS(accel_dev)->close(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_close); + +int qat_vfmig_suspend(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->suspend(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_suspend); + +int qat_vfmig_resume(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->resume(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_resume); + +int qat_vfmig_save_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_state); + +int qat_vfmig_save_setup(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->save_setup(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_save_setup); + +int qat_vfmig_load_state(struct qat_mig_dev *mdev) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_state(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_state); + +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size) +{ + struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; + + return GET_VFMIG_OPS(accel_dev)->load_setup(mdev, size); +} +EXPORT_SYMBOL_GPL(qat_vfmig_load_setup); + +void qat_vfmig_destroy(struct qat_mig_dev *mdev) +{ + kfree(mdev); +} +EXPORT_SYMBOL_GPL(qat_vfmig_destroy); diff --git a/include/linux/qat/qat_mig_dev.h b/include/linux/qat/qat_mig_dev.h new file mode 100644 index 000000000000..dbbb6a063dd2 --- /dev/null +++ b/include/linux/qat/qat_mig_dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2024 Intel Corporation */ +#ifndef QAT_MIG_DEV_H_ +#define QAT_MIG_DEV_H_ + +struct pci_dev; + +struct qat_mig_dev { + void *parent_accel_dev; + u8 *state; + u32 setup_size; + u32 remote_setup_size; + u32 state_size; + s32 vf_id; +}; + +struct qat_mig_dev *qat_vfmig_create(struct pci_dev *pdev, int vf_id); +int qat_vfmig_init(struct qat_mig_dev *mdev); +void qat_vfmig_cleanup(struct qat_mig_dev *mdev); +void qat_vfmig_reset(struct qat_mig_dev *mdev); +int qat_vfmig_open(struct qat_mig_dev *mdev); +void qat_vfmig_close(struct qat_mig_dev *mdev); +int qat_vfmig_suspend(struct qat_mig_dev *mdev); +int qat_vfmig_resume(struct qat_mig_dev *mdev); +int qat_vfmig_save_state(struct qat_mig_dev *mdev); +int qat_vfmig_save_setup(struct qat_mig_dev *mdev); +int qat_vfmig_load_state(struct qat_mig_dev *mdev); +int qat_vfmig_load_setup(struct qat_mig_dev *mdev, int size); +void qat_vfmig_destroy(struct qat_mig_dev *mdev); + +#endif /*QAT_MIG_DEV_H_*/ -- cgit From 5bc63d3a6f466add504f283d9f743f20ca9ec334 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Mar 2024 10:57:08 -0700 Subject: netlink: create a new header for internal genetlink symbols There are things in linux/genetlink.h which are only used under net/netlink/. Move them to a new local header. A new header with just 2 externs isn't great, but alternative would be to include af_netlink.h in genetlink.c which feels even worse. Link: https://lore.kernel.org/r/20240329175710.291749-2-kuba@kernel.org Signed-off-by: Jakub Kicinski --- include/linux/genetlink.h | 5 ----- net/netlink/af_netlink.c | 2 +- net/netlink/genetlink.c | 2 ++ net/netlink/genetlink.h | 11 +++++++++++ 4 files changed, 14 insertions(+), 6 deletions(-) create mode 100644 net/netlink/genetlink.h (limited to 'include/linux') diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index c285968e437a..9dbd7ba9b858 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -4,15 +4,10 @@ #include - /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); -/* for synchronisation between af_netlink and genetlink */ -extern atomic_t genl_sk_destructing_cnt; -extern wait_queue_head_t genl_sk_destructing_waitq; - #define MODULE_ALIAS_GENL_FAMILY(family)\ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7554803218a2..dc8c3c01d51b 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -59,7 +59,6 @@ #include #include #include -#include #include #include #include @@ -73,6 +72,7 @@ #include #include "af_netlink.h" +#include "genetlink.h" struct listeners { struct rcu_head rcu; diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 3b7666944b11..feb54c63a116 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -22,6 +22,8 @@ #include #include +#include "genetlink.h" + static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ static DECLARE_RWSEM(cb_lock); diff --git a/net/netlink/genetlink.h b/net/netlink/genetlink.h new file mode 100644 index 000000000000..89bd9d2631c3 --- /dev/null +++ b/net/netlink/genetlink.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NET_GENETLINK_H +#define __NET_GENETLINK_H + +#include + +/* for synchronisation between af_netlink and genetlink */ +extern atomic_t genl_sk_destructing_cnt; +extern wait_queue_head_t genl_sk_destructing_waitq; + +#endif /* __LINUX_GENERIC_NETLINK_H */ -- cgit From cd7209628cdb2a7edd7656c126d2455e7102e949 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 29 Mar 2024 10:57:10 -0700 Subject: genetlink: remove linux/genetlink.h genetlink.h is a shell of what used to be a combined uAPI and kernel header over a decade ago. It has fewer than 10 lines of code. Merge it into net/genetlink.h. In some ways it'd be better to keep the combined header under linux/ but it would make looking through git history harder. Acked-by: Sven Eckelmann Link: https://lore.kernel.org/r/20240329175710.291749-4-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/wireguard/main.c | 2 +- include/linux/genetlink.h | 14 -------------- include/linux/genl_magic_struct.h | 2 +- include/net/genetlink.h | 10 +++++++++- net/batman-adv/main.c | 2 +- net/batman-adv/netlink.c | 1 - net/openvswitch/datapath.c | 1 - 7 files changed, 12 insertions(+), 20 deletions(-) delete mode 100644 include/linux/genetlink.h (limited to 'include/linux') diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c index ee4da9ab8013..a00671b58701 100644 --- a/drivers/net/wireguard/main.c +++ b/drivers/net/wireguard/main.c @@ -14,7 +14,7 @@ #include #include -#include +#include #include static int __init wg_mod_init(void) diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h deleted file mode 100644 index 9dbd7ba9b858..000000000000 --- a/include/linux/genetlink.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_GENERIC_NETLINK_H -#define __LINUX_GENERIC_NETLINK_H - -#include - -/* All generic netlink requests are serialized by a global lock. */ -extern void genl_lock(void); -extern void genl_unlock(void); - -#define MODULE_ALIAS_GENL_FAMILY(family)\ - MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) - -#endif /* __LINUX_GENERIC_NETLINK_H */ diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h index a419d93789ff..621b87a87d74 100644 --- a/include/linux/genl_magic_struct.h +++ b/include/linux/genl_magic_struct.h @@ -15,8 +15,8 @@ #endif #include -#include #include +#include extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void); extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void); diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 9ece6e5a3ea8..9ab49bfeae78 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -2,12 +2,20 @@ #ifndef __NET_GENERIC_NETLINK_H #define __NET_GENERIC_NETLINK_H -#include +#include #include #include +#include #define GENLMSG_DEFAULT_SIZE (NLMSG_DEFAULT_SIZE - GENL_HDRLEN) +/* Non-parallel generic netlink requests are serialized by a global lock. */ +void genl_lock(void); +void genl_unlock(void); + +#define MODULE_ALIAS_GENL_FAMILY(family) \ + MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) + /* Binding to multicast group requires %CAP_NET_ADMIN */ #define GENL_MCAST_CAP_NET_ADMIN BIT(0) /* Binding to multicast group requires %CAP_SYS_ADMIN */ diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 75119f1ffccc..8e0f44c71696 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -38,6 +37,7 @@ #include #include #include +#include #include #include #include diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index 0954757f0b8b..9362cd9d6f3d 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 11c69415c605..99d72543abd3 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -15,7 +15,6 @@ #include #include #include -#include #include #include #include -- cgit From 94cd1011002ee9aabfd5b7d96b366384b6de3707 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Fri, 29 Mar 2024 18:24:31 +0100 Subject: mmc: sdio: store owner from modules with sdio_register_driver() Modules registering driver with sdio_register_driver() might forget to set .owner field. The field is used by some of other kernel parts for reference counting (try_module_get()), so it is expected that drivers will set it. Solve the problem by moving this task away from the drivers to the core code, just like we did for platform_driver in commit 9447057eaff8 ("platform_device: use a macro instead of platform_driver_register"). Since many drivers forget to set the .owner, this effectively will fix them. Examples of fixed drivers are: ath6kl, b43, btsdio.c, ks7010, libertas, MediaTek WiFi drivers, Realtek WiFi drivers, rsi, siano, wilc1000, wl1251 and more. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Francesco Dolcini Link: https://lore.kernel.org/r/20240329-module-owner-sdio-v1-1-e4010b11ccaa@linaro.org Signed-off-by: Ulf Hansson --- drivers/mmc/core/sdio_bus.c | 9 ++++++--- include/linux/mmc/sdio_func.h | 5 ++++- 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 71d885fbc228..c5fdfe2325f8 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -265,16 +265,19 @@ void sdio_unregister_bus(void) } /** - * sdio_register_driver - register a function driver + * __sdio_register_driver - register a function driver * @drv: SDIO function driver + * @owner: owning module/driver */ -int sdio_register_driver(struct sdio_driver *drv) +int __sdio_register_driver(struct sdio_driver *drv, struct module *owner) { drv->drv.name = drv->name; drv->drv.bus = &sdio_bus_type; + drv->drv.owner = owner; + return driver_register(&drv->drv); } -EXPORT_SYMBOL_GPL(sdio_register_driver); +EXPORT_SYMBOL_GPL(__sdio_register_driver); /** * sdio_unregister_driver - unregister a function driver diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h index 478855b8e406..fed1f5f4a8d3 100644 --- a/include/linux/mmc/sdio_func.h +++ b/include/linux/mmc/sdio_func.h @@ -106,7 +106,10 @@ struct sdio_driver { .class = (dev_class), \ .vendor = SDIO_ANY_ID, .device = SDIO_ANY_ID -extern int sdio_register_driver(struct sdio_driver *); +/* use a macro to avoid include chaining to get THIS_MODULE */ +#define sdio_register_driver(drv) \ + __sdio_register_driver(drv, THIS_MODULE) +extern int __sdio_register_driver(struct sdio_driver *, struct module *); extern void sdio_unregister_driver(struct sdio_driver *); /** -- cgit From ce09cbdd988887662546a1175bcfdfc6c8fdd150 Mon Sep 17 00:00:00 2001 From: Jose Fernandez Date: Mon, 1 Apr 2024 21:40:10 -0600 Subject: bpf: Improve program stats run-time calculation This patch improves the run-time calculation for program stats by capturing the duration as soon as possible after the program returns. Previously, the duration included u64_stats_t operations. While the instrumentation overhead is part of the total time spent when stats are enabled, distinguishing between the program's native execution time and the time spent due to instrumentation is crucial for accurate performance analysis. By making this change, the patch facilitates more precise optimization of BPF programs, enabling users to understand their performance in environments without stats enabled. I used a virtualized environment to measure the run-time over one minute for a basic raw_tracepoint/sys_enter program, which just increments a local counter. Although the virtualization introduced some performance degradation that could affect the results, I observed approximately a 16% decrease in average run-time reported by stats with this change (310 -> 260 nsec). Signed-off-by: Jose Fernandez Signed-off-by: Daniel Borkmann Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20240402034010.25060-1-josef@netflix.com --- include/linux/filter.h | 6 ++++-- kernel/bpf/trampoline.c | 3 ++- 2 files changed, 6 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/filter.h b/include/linux/filter.h index 44934b968b57..531b36090122 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -654,14 +654,16 @@ static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, cant_migrate(); if (static_branch_unlikely(&bpf_stats_enabled_key)) { struct bpf_prog_stats *stats; - u64 start = sched_clock(); + u64 duration, start = sched_clock(); unsigned long flags; ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + + duration = sched_clock() - start; stats = this_cpu_ptr(prog->stats); flags = u64_stats_update_begin_irqsave(&stats->syncp); u64_stats_inc(&stats->cnt); - u64_stats_add(&stats->nsecs, sched_clock() - start); + u64_stats_add(&stats->nsecs, duration); u64_stats_update_end_irqrestore(&stats->syncp, flags); } else { ret = dfunc(ctx, prog->insnsi, prog->bpf_func); diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index cc50607f8d8c..26ae703d3c3b 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -885,12 +885,13 @@ static void notrace update_prog_stats(struct bpf_prog *prog, * Hence check that 'start' is valid. */ start > NO_START_TIME) { + u64 duration = sched_clock() - start; unsigned long flags; stats = this_cpu_ptr(prog->stats); flags = u64_stats_update_begin_irqsave(&stats->syncp); u64_stats_inc(&stats->cnt); - u64_stats_add(&stats->nsecs, sched_clock() - start); + u64_stats_add(&stats->nsecs, duration); u64_stats_update_end_irqrestore(&stats->syncp, flags); } } -- cgit From 416bdb89605d960405178b9bf04df512d1ace1a3 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 22 Dec 2023 21:05:11 -0800 Subject: counter: linux/counter.h: fix Excess kernel-doc description warning Remove the @priv: line to prevent the kernel-doc warning: include/linux/counter.h:400: warning: Excess struct member 'priv' description in 'counter_device' Signed-off-by: Randy Dunlap Fixes: f2ee4759fb70 ("counter: remove old and now unused registration API") Link: https://lore.kernel.org/r/20231223050511.13849-1-rdunlap@infradead.org Signed-off-by: William Breathitt Gray --- include/linux/counter.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/counter.h b/include/linux/counter.h index 702e9108bbb4..b767b5c821f5 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -359,7 +359,6 @@ struct counter_ops { * @num_counts: number of Counts specified in @counts * @ext: optional array of Counter device extensions * @num_ext: number of Counter device extensions specified in @ext - * @priv: optional private data supplied by driver * @dev: internal device structure * @chrdev: internal character device structure * @events_list: list of current watching Counter events -- cgit From 2f48aba356a004d854bc6d77fbc032b2fc666911 Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Wed, 6 Mar 2024 16:36:31 +0100 Subject: counter: Introduce the COUNTER_COMP_FREQUENCY() macro Now that there are two users for the "frequency" extension, introduce a new COUNTER_COMP_FREQUENCY() macro. This extension is intended to be a read-only signal attribute. Suggested-by: William Breathitt Gray Signed-off-by: Fabrice Gasnier Link: https://lore.kernel.org/r/20240306153631.4051115-1-fabrice.gasnier@foss.st.com Signed-off-by: William Breathitt Gray --- include/linux/counter.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/counter.h b/include/linux/counter.h index b767b5c821f5..cd35d8574ee2 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -601,6 +601,9 @@ struct counter_array { #define COUNTER_COMP_FLOOR(_read, _write) \ COUNTER_COMP_COUNT_U64("floor", _read, _write) +#define COUNTER_COMP_FREQUENCY(_read) \ + COUNTER_COMP_SIGNAL_U64("frequency", _read, NULL) + #define COUNTER_COMP_POLARITY(_read, _write, _available) \ { \ .type = COUNTER_COMP_SIGNAL_POLARITY, \ -- cgit From 1aed15275b7ce17b5ebdfc112a76e0d7165ed46b Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Thu, 7 Mar 2024 14:33:06 +0100 Subject: counter: stm32-timer-cnt: add support for capture events Add support for capture events. Captured counter value for each channel can be retrieved through CCRx register. STM32 timers can have up to 4 capture channels (on input channel 1 to channel 4), hence need to check the number of channels before reading the capture data. The capture configuration is hard-coded to capture signals on both edges (non-inverted). Interrupts are used to report events independently for each channel. Reviewed-by: William Breathitt Gray Acked-by: Lee Jones Signed-off-by: Fabrice Gasnier Link: https://lore.kernel.org/r/20240307133306.383045-11-fabrice.gasnier@foss.st.com Signed-off-by: William Breathitt Gray --- drivers/counter/stm32-timer-cnt.c | 134 +++++++++++++++++++++++++++++++++++++- include/linux/mfd/stm32-timers.h | 13 ++++ 2 files changed, 144 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c index 9fcafec682b7..0664ef969f79 100644 --- a/drivers/counter/stm32-timer-cnt.c +++ b/drivers/counter/stm32-timer-cnt.c @@ -262,6 +262,40 @@ static int stm32_count_prescaler_write(struct counter_device *counter, return regmap_write(priv->regmap, TIM_PSC, psc); } +static int stm32_count_cap_read(struct counter_device *counter, + struct counter_count *count, + size_t ch, u64 *cap) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + u32 ccrx; + + if (ch >= priv->nchannels) + return -EOPNOTSUPP; + + switch (ch) { + case 0: + regmap_read(priv->regmap, TIM_CCR1, &ccrx); + break; + case 1: + regmap_read(priv->regmap, TIM_CCR2, &ccrx); + break; + case 2: + regmap_read(priv->regmap, TIM_CCR3, &ccrx); + break; + case 3: + regmap_read(priv->regmap, TIM_CCR4, &ccrx); + break; + default: + return -EINVAL; + } + + dev_dbg(counter->parent, "CCR%zu: 0x%08x\n", ch + 1, ccrx); + + *cap = ccrx; + + return 0; +} + static int stm32_count_nb_ovf_read(struct counter_device *counter, struct counter_count *count, u64 *val) { @@ -288,6 +322,8 @@ static int stm32_count_nb_ovf_write(struct counter_device *counter, return 0; } +static DEFINE_COUNTER_ARRAY_CAPTURE(stm32_count_cap_array, 4); + static struct counter_comp stm32_count_ext[] = { COUNTER_COMP_DIRECTION(stm32_count_direction_read), COUNTER_COMP_ENABLE(stm32_count_enable_read, stm32_count_enable_write), @@ -295,6 +331,7 @@ static struct counter_comp stm32_count_ext[] = { stm32_count_ceiling_write), COUNTER_COMP_COUNT_U64("prescaler", stm32_count_prescaler_read, stm32_count_prescaler_write), + COUNTER_COMP_ARRAY_CAPTURE(stm32_count_cap_read, NULL, stm32_count_cap_array), COUNTER_COMP_COUNT_U64("num_overflows", stm32_count_nb_ovf_read, stm32_count_nb_ovf_write), }; @@ -353,11 +390,68 @@ static int stm32_action_read(struct counter_device *counter, } } +struct stm32_count_cc_regs { + u32 ccmr_reg; + u32 ccmr_mask; + u32 ccmr_bits; + u32 ccer_bits; +}; + +static const struct stm32_count_cc_regs stm32_cc[] = { + { TIM_CCMR1, TIM_CCMR_CC1S, TIM_CCMR_CC1S_TI1, + TIM_CCER_CC1E | TIM_CCER_CC1P | TIM_CCER_CC1NP }, + { TIM_CCMR1, TIM_CCMR_CC2S, TIM_CCMR_CC2S_TI2, + TIM_CCER_CC2E | TIM_CCER_CC2P | TIM_CCER_CC2NP }, + { TIM_CCMR2, TIM_CCMR_CC3S, TIM_CCMR_CC3S_TI3, + TIM_CCER_CC3E | TIM_CCER_CC3P | TIM_CCER_CC3NP }, + { TIM_CCMR2, TIM_CCMR_CC4S, TIM_CCMR_CC4S_TI4, + TIM_CCER_CC4E | TIM_CCER_CC4P | TIM_CCER_CC4NP }, +}; + +static int stm32_count_capture_configure(struct counter_device *counter, unsigned int ch, + bool enable) +{ + struct stm32_timer_cnt *const priv = counter_priv(counter); + const struct stm32_count_cc_regs *cc; + u32 ccmr, ccer; + + if (ch >= ARRAY_SIZE(stm32_cc) || ch >= priv->nchannels) { + dev_err(counter->parent, "invalid ch: %d\n", ch); + return -EINVAL; + } + + cc = &stm32_cc[ch]; + + /* + * configure channel in input capture mode, map channel 1 on TI1, channel2 on TI2... + * Select both edges / non-inverted to trigger a capture. + */ + if (enable) { + /* first clear possibly latched capture flag upon enabling */ + if (!regmap_test_bits(priv->regmap, TIM_CCER, cc->ccer_bits)) + regmap_write(priv->regmap, TIM_SR, ~TIM_SR_CC_IF(ch)); + regmap_update_bits(priv->regmap, cc->ccmr_reg, cc->ccmr_mask, + cc->ccmr_bits); + regmap_set_bits(priv->regmap, TIM_CCER, cc->ccer_bits); + } else { + regmap_clear_bits(priv->regmap, TIM_CCER, cc->ccer_bits); + regmap_clear_bits(priv->regmap, cc->ccmr_reg, cc->ccmr_mask); + } + + regmap_read(priv->regmap, cc->ccmr_reg, &ccmr); + regmap_read(priv->regmap, TIM_CCER, &ccer); + dev_dbg(counter->parent, "%s(%s) ch%d 0x%08x 0x%08x\n", __func__, enable ? "ena" : "dis", + ch, ccmr, ccer); + + return 0; +} + static int stm32_count_events_configure(struct counter_device *counter) { struct stm32_timer_cnt *const priv = counter_priv(counter); struct counter_event_node *event_node; u32 dier = 0; + int i, ret; list_for_each_entry(event_node, &counter->events_list, l) { switch (event_node->event) { @@ -367,6 +461,12 @@ static int stm32_count_events_configure(struct counter_device *counter) regmap_write(priv->regmap, TIM_SR, (u32)~TIM_SR_UIF); dier |= TIM_DIER_UIE; break; + case COUNTER_EVENT_CAPTURE: + ret = stm32_count_capture_configure(counter, event_node->channel, true); + if (ret) + return ret; + dier |= TIM_DIER_CC_IE(event_node->channel); + break; default: /* should never reach this path */ return -EINVAL; @@ -376,6 +476,15 @@ static int stm32_count_events_configure(struct counter_device *counter) /* Enable / disable all events at once, from events_list, so write all DIER bits */ regmap_write(priv->regmap, TIM_DIER, dier); + /* check for disabled capture events */ + for (i = 0 ; i < priv->nchannels; i++) { + if (!(dier & TIM_DIER_CC_IE(i))) { + ret = stm32_count_capture_configure(counter, i, false); + if (ret) + return ret; + } + } + return 0; } @@ -389,6 +498,12 @@ static int stm32_count_watch_validate(struct counter_device *counter, return -EOPNOTSUPP; switch (watch->event) { + case COUNTER_EVENT_CAPTURE: + if (watch->channel >= priv->nchannels) { + dev_err(counter->parent, "Invalid channel %d\n", watch->channel); + return -EINVAL; + } + return 0; case COUNTER_EVENT_OVERFLOW_UNDERFLOW: return 0; default: @@ -499,6 +614,7 @@ static irqreturn_t stm32_timer_cnt_isr(int irq, void *ptr) struct stm32_timer_cnt *const priv = counter_priv(counter); u32 clr = GENMASK(31, 0); /* SR flags can be cleared by writing 0 (wr 1 has no effect) */ u32 sr, dier; + int i; regmap_read(priv->regmap, TIM_SR, &sr); regmap_read(priv->regmap, TIM_DIER, &dier); @@ -506,7 +622,7 @@ static irqreturn_t stm32_timer_cnt_isr(int irq, void *ptr) * Some status bits in SR don't match with the enable bits in DIER. Only take care of * the possibly enabled bits in DIER (that matches in between SR and DIER). */ - dier &= TIM_DIER_UIE; + dier &= (TIM_DIER_UIE | TIM_DIER_CC1IE | TIM_DIER_CC2IE | TIM_DIER_CC3IE | TIM_DIER_CC4IE); sr &= dier; if (sr & TIM_SR_UIF) { @@ -519,6 +635,15 @@ static irqreturn_t stm32_timer_cnt_isr(int irq, void *ptr) clr &= ~TIM_SR_UIF; } + /* Check capture events */ + for (i = 0 ; i < priv->nchannels; i++) { + if (sr & TIM_SR_CC_IF(i)) { + counter_push_event(counter, COUNTER_EVENT_CAPTURE, i); + clr &= ~TIM_SR_CC_IF(i); + dev_dbg(counter->parent, "COUNTER_EVENT_CAPTURE, %d\n", i); + } + } + regmap_write(priv->regmap, TIM_SR, clr); return IRQ_HANDLED; @@ -633,8 +758,11 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev) } } else { for (i = 0; i < priv->nr_irqs; i++) { - /* Only take care of update IRQ for overflow events */ - if (i != STM32_TIMERS_IRQ_UP) + /* + * Only take care of update IRQ for overflow events, and cc for + * capture events. + */ + if (i != STM32_TIMERS_IRQ_UP && i != STM32_TIMERS_IRQ_CC) continue; ret = devm_request_irq(&pdev->dev, ddata->irq[i], stm32_timer_cnt_isr, diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h index ca35af30745f..9eb17481b07f 100644 --- a/include/linux/mfd/stm32-timers.h +++ b/include/linux/mfd/stm32-timers.h @@ -41,6 +41,11 @@ #define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */ #define TIM_SMCR_TS (BIT(4) | BIT(5) | BIT(6)) /* Trigger selection */ #define TIM_DIER_UIE BIT(0) /* Update interrupt */ +#define TIM_DIER_CC1IE BIT(1) /* CC1 Interrupt Enable */ +#define TIM_DIER_CC2IE BIT(2) /* CC2 Interrupt Enable */ +#define TIM_DIER_CC3IE BIT(3) /* CC3 Interrupt Enable */ +#define TIM_DIER_CC4IE BIT(4) /* CC4 Interrupt Enable */ +#define TIM_DIER_CC_IE(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt enable */ #define TIM_DIER_UDE BIT(8) /* Update DMA request Enable */ #define TIM_DIER_CC1DE BIT(9) /* CC1 DMA request Enable */ #define TIM_DIER_CC2DE BIT(10) /* CC2 DMA request Enable */ @@ -49,6 +54,7 @@ #define TIM_DIER_COMDE BIT(13) /* COM DMA request Enable */ #define TIM_DIER_TDE BIT(14) /* Trigger DMA request Enable */ #define TIM_SR_UIF BIT(0) /* Update interrupt flag */ +#define TIM_SR_CC_IF(x) BIT((x) + 1) /* CC1, CC2, CC3, CC4 interrupt flag */ #define TIM_EGR_UG BIT(0) /* Update Generation */ #define TIM_CCMR_PE BIT(3) /* Channel Preload Enable */ #define TIM_CCMR_M1 (BIT(6) | BIT(5)) /* Channel PWM Mode 1 */ @@ -60,16 +66,23 @@ #define TIM_CCMR_CC1S_TI2 BIT(1) /* IC1/IC3 selects TI2/TI4 */ #define TIM_CCMR_CC2S_TI2 BIT(8) /* IC2/IC4 selects TI2/TI4 */ #define TIM_CCMR_CC2S_TI1 BIT(9) /* IC2/IC4 selects TI1/TI3 */ +#define TIM_CCMR_CC3S (BIT(0) | BIT(1)) /* Capture/compare 3 sel */ +#define TIM_CCMR_CC4S (BIT(8) | BIT(9)) /* Capture/compare 4 sel */ +#define TIM_CCMR_CC3S_TI3 BIT(0) /* IC3 selects TI3 */ +#define TIM_CCMR_CC4S_TI4 BIT(8) /* IC4 selects TI4 */ #define TIM_CCER_CC1E BIT(0) /* Capt/Comp 1 out Ena */ #define TIM_CCER_CC1P BIT(1) /* Capt/Comp 1 Polarity */ #define TIM_CCER_CC1NE BIT(2) /* Capt/Comp 1N out Ena */ #define TIM_CCER_CC1NP BIT(3) /* Capt/Comp 1N Polarity */ #define TIM_CCER_CC2E BIT(4) /* Capt/Comp 2 out Ena */ #define TIM_CCER_CC2P BIT(5) /* Capt/Comp 2 Polarity */ +#define TIM_CCER_CC2NP BIT(7) /* Capt/Comp 2N Polarity */ #define TIM_CCER_CC3E BIT(8) /* Capt/Comp 3 out Ena */ #define TIM_CCER_CC3P BIT(9) /* Capt/Comp 3 Polarity */ +#define TIM_CCER_CC3NP BIT(11) /* Capt/Comp 3N Polarity */ #define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */ #define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */ +#define TIM_CCER_CC4NP BIT(15) /* Capt/Comp 4N Polarity */ #define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12)) #define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */ #define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */ -- cgit From 8db2509faa331865903a81a92f15c449e821b1d7 Mon Sep 17 00:00:00 2001 From: Jonathan Neuschäfer Date: Fri, 29 Mar 2024 17:26:27 +0100 Subject: rhashtable: Improve grammar MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change "a" to "an" according to the usual rules, fix an "if" that was mistyped as "in", improve grammar in "considerable slow" -> "considerably slower". Signed-off-by: Jonathan Neuschäfer Reviewed-by: Randy Dunlap Link: https://lore.kernel.org/r/20240329-misc-rhashtable-v1-1-5862383ff798@gmx.net Signed-off-by: Jakub Kicinski --- include/linux/rhashtable.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 5b5357c0bd8c..8463a128e2f4 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -633,7 +633,7 @@ restart: * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. The first matching entry is returned. + * for an entry with an identical key. The first matching entry is returned. * * This must only be called under the RCU read lock. * @@ -655,7 +655,7 @@ static inline void *rhashtable_lookup( * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. The first matching entry is returned. + * for an entry with an identical key. The first matching entry is returned. * * Only use this function when you have other mechanisms guaranteeing * that the object won't go away after the RCU read lock is released. @@ -682,7 +682,7 @@ static inline void *rhashtable_lookup_fast( * @params: hash table parameters * * Computes the hash value for the key and traverses the bucket chain looking - * for a entry with an identical key. All matching entries are returned + * for an entry with an identical key. All matching entries are returned * in a list. * * This must only be called under the RCU read lock. @@ -699,7 +699,7 @@ static inline struct rhlist_head *rhltable_lookup( } /* Internal function, please use rhashtable_insert_fast() instead. This - * function returns the existing element already in hashes in there is a clash, + * function returns the existing element already in hashes if there is a clash, * otherwise it returns an error via ERR_PTR(). */ static inline void *__rhashtable_insert_fast( @@ -1130,7 +1130,7 @@ static inline int rhashtable_remove_fast( * * Since the hash chain is single linked, the removal operation needs to * walk the bucket chain upon removal. The removal operation is thus - * considerable slow if the hash table is not correctly sized. + * considerably slower if the hash table is not correctly sized. * * Will automatically shrink the table if permitted when residency drops * below 30% -- cgit From 4a96a4e807c390a9d91b450ebe04eeb2e0ecc076 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Fri, 29 Mar 2024 17:55:06 +0100 Subject: page_pool: check for PP direct cache locality later Since we have pool->p.napi (Jakub) and pool->cpuid (Lorenzo) to check whether it's safe to use direct recycling, we can use both globally for each page instead of relying solely on @allow_direct argument. Let's assume that @allow_direct means "I'm sure it's local, don't waste time rechecking this" and when it's false, try the mentioned params to still recycle the page directly. If neither is true, we'll lose some CPU cycles, but then it surely won't be hotpath. On the other hand, paths where it's possible to use direct cache, but not possible to safely set @allow_direct, will benefit from this move. The whole propagation of @napi_safe through a dozen of skb freeing functions can now go away, which saves us some stack space. Signed-off-by: Alexander Lobakin Link: https://lore.kernel.org/r/20240329165507.3240110-2-aleksander.lobakin@intel.com Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 12 ++++----- net/core/page_pool.c | 31 +++++++++++++++++++--- net/core/skbuff.c | 70 ++++++++++++++++---------------------------------- net/ipv4/esp4.c | 2 +- net/ipv6/esp6.c | 2 +- 5 files changed, 58 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b7f1ecdaec38..03ea36a82cdd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3510,25 +3510,25 @@ int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom); int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, struct bpf_prog *prog); -bool napi_pp_put_page(struct page *page, bool napi_safe); +bool napi_pp_put_page(struct page *page); static inline void -skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe) +skb_page_unref(const struct sk_buff *skb, struct page *page) { #ifdef CONFIG_PAGE_POOL - if (skb->pp_recycle && napi_pp_put_page(page, napi_safe)) + if (skb->pp_recycle && napi_pp_put_page(page)) return; #endif put_page(page); } static inline void -napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe) +napi_frag_unref(skb_frag_t *frag, bool recycle) { struct page *page = skb_frag_page(frag); #ifdef CONFIG_PAGE_POOL - if (recycle && napi_pp_put_page(page, napi_safe)) + if (recycle && napi_pp_put_page(page)) return; #endif put_page(page); @@ -3544,7 +3544,7 @@ napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe) */ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { - napi_frag_unref(frag, recycle, false); + napi_frag_unref(frag, recycle); } /** diff --git a/net/core/page_pool.c b/net/core/page_pool.c index dd364d738c00..9d56257e444b 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -690,8 +690,7 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, page_pool_dma_sync_for_device(pool, page, dma_sync_size); - if (allow_direct && in_softirq() && - page_pool_recycle_in_cache(page, pool)) + if (allow_direct && page_pool_recycle_in_cache(page, pool)) return NULL; /* Page found as candidate for recycling */ @@ -716,9 +715,35 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, return NULL; } +static bool page_pool_napi_local(const struct page_pool *pool) +{ + const struct napi_struct *napi; + u32 cpuid; + + if (unlikely(!in_softirq())) + return false; + + /* Allow direct recycle if we have reasons to believe that we are + * in the same context as the consumer would run, so there's + * no possible race. + * __page_pool_put_page() makes sure we're not in hardirq context + * and interrupts are enabled prior to accessing the cache. + */ + cpuid = smp_processor_id(); + if (READ_ONCE(pool->cpuid) == cpuid) + return true; + + napi = READ_ONCE(pool->p.napi); + + return napi && READ_ONCE(napi->list_owner) == cpuid; +} + void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct) { + if (!allow_direct) + allow_direct = page_pool_napi_local(pool); + page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); if (page && !page_pool_recycle_in_ring(pool, page)) { /* Cache full, fallback to free pages */ @@ -969,7 +994,7 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), static void page_pool_disable_direct_recycling(struct page_pool *pool) { /* Disable direct recycling based on pool->cpuid. - * Paired with READ_ONCE() in napi_pp_put_page(). + * Paired with READ_ONCE() in page_pool_napi_local(). */ WRITE_ONCE(pool->cpuid, -1); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index a1be84be5d35..2a5ce6667bbb 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1004,11 +1004,8 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, EXPORT_SYMBOL(skb_cow_data_for_xdp); #if IS_ENABLED(CONFIG_PAGE_POOL) -bool napi_pp_put_page(struct page *page, bool napi_safe) +bool napi_pp_put_page(struct page *page) { - bool allow_direct = false; - struct page_pool *pp; - page = compound_head(page); /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation @@ -1021,39 +1018,18 @@ bool napi_pp_put_page(struct page *page, bool napi_safe) if (unlikely(!is_pp_page(page))) return false; - pp = page->pp; - - /* Allow direct recycle if we have reasons to believe that we are - * in the same context as the consumer would run, so there's - * no possible race. - * __page_pool_put_page() makes sure we're not in hardirq context - * and interrupts are enabled prior to accessing the cache. - */ - if (napi_safe || in_softirq()) { - const struct napi_struct *napi = READ_ONCE(pp->p.napi); - unsigned int cpuid = smp_processor_id(); - - allow_direct = napi && READ_ONCE(napi->list_owner) == cpuid; - allow_direct |= READ_ONCE(pp->cpuid) == cpuid; - } - - /* Driver set this to memory recycling info. Reset it on recycle. - * This will *not* work for NIC using a split-page memory model. - * The page will be returned to the pool here regardless of the - * 'flipped' fragment being in use or not. - */ - page_pool_put_full_page(pp, page, allow_direct); + page_pool_put_full_page(page->pp, page, false); return true; } EXPORT_SYMBOL(napi_pp_put_page); #endif -static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) +static bool skb_pp_recycle(struct sk_buff *skb, void *data) { if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) return false; - return napi_pp_put_page(virt_to_page(data), napi_safe); + return napi_pp_put_page(virt_to_page(data)); } /** @@ -1095,12 +1071,12 @@ static void skb_kfree_head(void *head, unsigned int end_offset) kfree(head); } -static void skb_free_head(struct sk_buff *skb, bool napi_safe) +static void skb_free_head(struct sk_buff *skb) { unsigned char *head = skb->head; if (skb->head_frag) { - if (skb_pp_recycle(skb, head, napi_safe)) + if (skb_pp_recycle(skb, head)) return; skb_free_frag(head); } else { @@ -1108,8 +1084,7 @@ static void skb_free_head(struct sk_buff *skb, bool napi_safe) } } -static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, - bool napi_safe) +static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) { struct skb_shared_info *shinfo = skb_shinfo(skb); int i; @@ -1126,13 +1101,13 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, } for (i = 0; i < shinfo->nr_frags; i++) - napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe); + napi_frag_unref(&shinfo->frags[i], skb->pp_recycle); free_head: if (shinfo->frag_list) kfree_skb_list_reason(shinfo->frag_list, reason); - skb_free_head(skb, napi_safe); + skb_free_head(skb); exit: /* When we clone an SKB we copy the reycling bit. The pp_recycle * bit is only set on the head though, so in order to avoid races @@ -1193,12 +1168,11 @@ void skb_release_head_state(struct sk_buff *skb) } /* Free everything but the sk_buff shell. */ -static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason, - bool napi_safe) +static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason) { skb_release_head_state(skb); if (likely(skb->head)) - skb_release_data(skb, reason, napi_safe); + skb_release_data(skb, reason); } /** @@ -1212,7 +1186,7 @@ static void skb_release_all(struct sk_buff *skb, enum skb_drop_reason reason, void __kfree_skb(struct sk_buff *skb) { - skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED, false); + skb_release_all(skb, SKB_DROP_REASON_NOT_SPECIFIED); kfree_skbmem(skb); } EXPORT_SYMBOL(__kfree_skb); @@ -1269,7 +1243,7 @@ static void kfree_skb_add_bulk(struct sk_buff *skb, return; } - skb_release_all(skb, reason, false); + skb_release_all(skb, reason); sa->skb_array[sa->skb_count++] = skb; if (unlikely(sa->skb_count == KFREE_SKB_BULK_SIZE)) { @@ -1443,7 +1417,7 @@ EXPORT_SYMBOL(consume_skb); void __consume_stateless_skb(struct sk_buff *skb) { trace_consume_skb(skb, __builtin_return_address(0)); - skb_release_data(skb, SKB_CONSUMED, false); + skb_release_data(skb, SKB_CONSUMED); kfree_skbmem(skb); } @@ -1470,7 +1444,7 @@ static void napi_skb_cache_put(struct sk_buff *skb) void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) { - skb_release_all(skb, reason, true); + skb_release_all(skb, reason); napi_skb_cache_put(skb); } @@ -1508,7 +1482,7 @@ void napi_consume_skb(struct sk_buff *skb, int budget) return; } - skb_release_all(skb, SKB_CONSUMED, !!budget); + skb_release_all(skb, SKB_CONSUMED); napi_skb_cache_put(skb); } EXPORT_SYMBOL(napi_consume_skb); @@ -1639,7 +1613,7 @@ EXPORT_SYMBOL_GPL(alloc_skb_for_msg); */ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) { - skb_release_all(dst, SKB_CONSUMED, false); + skb_release_all(dst, SKB_CONSUMED); return __skb_clone(dst, src); } EXPORT_SYMBOL_GPL(skb_morph); @@ -2271,9 +2245,9 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); - skb_release_data(skb, SKB_CONSUMED, false); + skb_release_data(skb, SKB_CONSUMED); } else { - skb_free_head(skb, false); + skb_free_head(skb); } off = (data + nhead) - skb->head; @@ -6574,12 +6548,12 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, skb_frag_ref(skb, i); if (skb_has_frag_list(skb)) skb_clone_fraglist(skb); - skb_release_data(skb, SKB_CONSUMED, false); + skb_release_data(skb, SKB_CONSUMED); } else { /* we can reuse existing recount- all we did was * relocate values */ - skb_free_head(skb, false); + skb_free_head(skb); } skb->head = data; @@ -6714,7 +6688,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, skb_kfree_head(data, size); return -ENOMEM; } - skb_release_data(skb, SKB_CONSUMED, false); + skb_release_data(skb, SKB_CONSUMED); skb->head = data; skb->head_frag = 0; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index d33d12421814..3d647c9a7a21 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -114,7 +114,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) */ if (req->src != req->dst) for (sg = sg_next(req->src); sg; sg = sg_next(sg)) - skb_page_unref(skb, sg_page(sg), false); + skb_page_unref(skb, sg_page(sg)); } #ifdef CONFIG_INET_ESPINTCP diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 7371886d4f9f..fe8d53f5a5ee 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -131,7 +131,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) */ if (req->src != req->dst) for (sg = sg_next(req->src); sg; sg = sg_next(sg)) - skb_page_unref(skb, sg_page(sg), false); + skb_page_unref(skb, sg_page(sg)); } #ifdef CONFIG_INET6_ESPINTCP -- cgit From 0439fcff304acdedfc493dbf900dce86922fd31f Mon Sep 17 00:00:00 2001 From: Sumit Garg Date: Mon, 25 Mar 2024 16:11:01 +0100 Subject: tee: Refactor TEE subsystem header files Since commit 25559c22cef8 ("tee: add kernel internal client interface"), it has been a common include/linux/tee_drv.h header file which is shared to hold TEE subsystem internal bits along with the APIs exposed to the TEE client drivers. However, this practice is prone to TEE subsystem internal APIs abuse and especially so with the new TEE implementation drivers being added to reuse existing functionality. In order to address this split TEE subsystem internal bits as a separate header file: include/linux/tee_core.h which should be the one used by TEE implementation drivers. With that include/linux/tee_drv.h lists only APIs exposed by TEE subsystem to the TEE client drivers. Signed-off-by: Sumit Garg Signed-off-by: Balint Dobszay Signed-off-by: Jens Wiklander --- MAINTAINERS | 1 + drivers/tee/amdtee/amdtee_private.h | 2 +- drivers/tee/amdtee/call.c | 2 +- drivers/tee/amdtee/core.c | 3 +- drivers/tee/amdtee/shm_pool.c | 2 +- drivers/tee/optee/call.c | 2 +- drivers/tee/optee/core.c | 2 +- drivers/tee/optee/device.c | 2 +- drivers/tee/optee/ffa_abi.c | 2 +- drivers/tee/optee/notif.c | 2 +- drivers/tee/optee/optee_private.h | 2 +- drivers/tee/optee/rpc.c | 2 +- drivers/tee/optee/smc_abi.c | 2 +- drivers/tee/tee_core.c | 2 +- drivers/tee/tee_private.h | 35 ----- drivers/tee/tee_shm.c | 2 +- drivers/tee/tee_shm_pool.c | 2 +- include/linux/tee_core.h | 296 ++++++++++++++++++++++++++++++++++++ include/linux/tee_drv.h | 285 ++++------------------------------ 19 files changed, 343 insertions(+), 305 deletions(-) create mode 100644 include/linux/tee_core.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index aa3b947fb080..fd221a7d4d1c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -21684,6 +21684,7 @@ F: Documentation/driver-api/tee.rst F: Documentation/tee/ F: Documentation/userspace-api/tee.rst F: drivers/tee/ +F: include/linux/tee_core.h F: include/linux/tee_drv.h F: include/uapi/linux/tee.h diff --git a/drivers/tee/amdtee/amdtee_private.h b/drivers/tee/amdtee/amdtee_private.h index 6d0f7062bb87..d87050033894 100644 --- a/drivers/tee/amdtee/amdtee_private.h +++ b/drivers/tee/amdtee/amdtee_private.h @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include "amdtee_if.h" diff --git a/drivers/tee/amdtee/call.c b/drivers/tee/amdtee/call.c index e9b63dcb3194..4c21b02be4af 100644 --- a/drivers/tee/amdtee/call.c +++ b/drivers/tee/amdtee/call.c @@ -5,7 +5,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c index 3c15f6a9e91c..e487231d25dc 100644 --- a/drivers/tee/amdtee/core.c +++ b/drivers/tee/amdtee/core.c @@ -9,13 +9,12 @@ #include #include #include -#include +#include #include #include #include #include #include "amdtee_private.h" -#include "../tee_private.h" #include static struct amdtee_driver_data *drv_data; diff --git a/drivers/tee/amdtee/shm_pool.c b/drivers/tee/amdtee/shm_pool.c index f0303126f199..6346e0bc8a64 100644 --- a/drivers/tee/amdtee/shm_pool.c +++ b/drivers/tee/amdtee/shm_pool.c @@ -4,7 +4,7 @@ */ #include -#include +#include #include #include "amdtee_private.h" diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index a91e50be11be..16eb953e14bb 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include #include "optee_private.h" diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index 3aed554bc8d8..f762e3a25119 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include "optee_private.h" diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c index 1892e49a8e6a..d296c70ddfdc 100644 --- a/drivers/tee/optee/device.c +++ b/drivers/tee/optee/device.c @@ -7,7 +7,7 @@ #include #include -#include +#include #include #include "optee_private.h" diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index ecb5eb079408..cee8ccb84cb8 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include "optee_private.h" #include "optee_ffa.h" diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c index 05212842b0a5..0d7878e770cd 100644 --- a/drivers/tee/optee/notif.c +++ b/drivers/tee/optee/notif.c @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include "optee_private.h" struct notif_entry { diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index 7a5243c78b55..a0698ac18993 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include "optee_msg.h" diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c index e69bc6380683..f086812f1179 100644 --- a/drivers/tee/optee/rpc.c +++ b/drivers/tee/optee/rpc.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include "optee_private.h" #include "optee_rpc_cmd.h" diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index a37f87087e5c..9c296b887dc1 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include #include "optee_private.h" diff --git a/drivers/tee/tee_core.c b/drivers/tee/tee_core.c index e59c20d74b36..82ad095d2b1c 100644 --- a/drivers/tee/tee_core.c +++ b/drivers/tee/tee_core.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tee/tee_private.h b/drivers/tee/tee_private.h index 754e11dcb240..9bc50605227c 100644 --- a/drivers/tee/tee_private.h +++ b/drivers/tee/tee_private.h @@ -12,41 +12,6 @@ #include #include -#define TEE_DEVICE_FLAG_REGISTERED 0x1 -#define TEE_MAX_DEV_NAME_LEN 32 - -/** - * struct tee_device - TEE Device representation - * @name: name of device - * @desc: description of device - * @id: unique id of device - * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above - * @dev: embedded basic device structure - * @cdev: embedded cdev - * @num_users: number of active users of this device - * @c_no_user: completion used when unregistering the device - * @mutex: mutex protecting @num_users and @idr - * @idr: register of user space shared memory objects allocated or - * registered on this device - * @pool: shared memory pool - */ -struct tee_device { - char name[TEE_MAX_DEV_NAME_LEN]; - const struct tee_desc *desc; - int id; - unsigned int flags; - - struct device dev; - struct cdev cdev; - - size_t num_users; - struct completion c_no_users; - struct mutex mutex; /* protects num_users and idr */ - - struct idr idr; - struct tee_shm_pool *pool; -}; - int tee_shm_get_fd(struct tee_shm *shm); bool tee_device_get(struct tee_device *teedev); diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 731d9028b67f..96a45c817427 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -8,7 +8,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/drivers/tee/tee_shm_pool.c b/drivers/tee/tee_shm_pool.c index 058bfbac657a..80004b55628d 100644 --- a/drivers/tee/tee_shm_pool.c +++ b/drivers/tee/tee_shm_pool.c @@ -6,7 +6,7 @@ #include #include #include -#include +#include #include "tee_private.h" static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h new file mode 100644 index 000000000000..d9b3ba8e8fa9 --- /dev/null +++ b/include/linux/tee_core.h @@ -0,0 +1,296 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2024 Linaro Limited + */ + +#ifndef __TEE_CORE_H +#define __TEE_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * The file describes the API provided by the generic TEE driver to the + * specific TEE driver. + */ + +#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */ + /* in secure world */ +#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ +#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ +#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ + +#define TEE_DEVICE_FLAG_REGISTERED 0x1 +#define TEE_MAX_DEV_NAME_LEN 32 + +/** + * struct tee_device - TEE Device representation + * @name: name of device + * @desc: description of device + * @id: unique id of device + * @flags: represented by TEE_DEVICE_FLAG_REGISTERED above + * @dev: embedded basic device structure + * @cdev: embedded cdev + * @num_users: number of active users of this device + * @c_no_user: completion used when unregistering the device + * @mutex: mutex protecting @num_users and @idr + * @idr: register of user space shared memory objects allocated or + * registered on this device + * @pool: shared memory pool + */ +struct tee_device { + char name[TEE_MAX_DEV_NAME_LEN]; + const struct tee_desc *desc; + int id; + unsigned int flags; + + struct device dev; + struct cdev cdev; + + size_t num_users; + struct completion c_no_users; + struct mutex mutex; /* protects num_users and idr */ + + struct idr idr; + struct tee_shm_pool *pool; +}; + +/** + * struct tee_driver_ops - driver operations vtable + * @get_version: returns version of driver + * @open: called when the device file is opened + * @release: release this open file + * @open_session: open a new session + * @close_session: close a session + * @system_session: declare session as a system session + * @invoke_func: invoke a trusted function + * @cancel_req: request cancel of an ongoing invoke or open + * @supp_recv: called for supplicant to get a command + * @supp_send: called for supplicant to send a response + * @shm_register: register shared memory buffer in TEE + * @shm_unregister: unregister shared memory buffer in TEE + */ +struct tee_driver_ops { + void (*get_version)(struct tee_device *teedev, + struct tee_ioctl_version_data *vers); + int (*open)(struct tee_context *ctx); + void (*release)(struct tee_context *ctx); + int (*open_session)(struct tee_context *ctx, + struct tee_ioctl_open_session_arg *arg, + struct tee_param *param); + int (*close_session)(struct tee_context *ctx, u32 session); + int (*system_session)(struct tee_context *ctx, u32 session); + int (*invoke_func)(struct tee_context *ctx, + struct tee_ioctl_invoke_arg *arg, + struct tee_param *param); + int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); + int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, + struct tee_param *param); + int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, + struct tee_param *param); + int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, + struct page **pages, size_t num_pages, + unsigned long start); + int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); +}; + +/** + * struct tee_desc - Describes the TEE driver to the subsystem + * @name: name of driver + * @ops: driver operations vtable + * @owner: module providing the driver + * @flags: Extra properties of driver, defined by TEE_DESC_* below + */ +#define TEE_DESC_PRIVILEGED 0x1 +struct tee_desc { + const char *name; + const struct tee_driver_ops *ops; + struct module *owner; + u32 flags; +}; + +/** + * tee_device_alloc() - Allocate a new struct tee_device instance + * @teedesc: Descriptor for this driver + * @dev: Parent device for this device + * @pool: Shared memory pool, NULL if not used + * @driver_data: Private driver data for this device + * + * Allocates a new struct tee_device instance. The device is + * removed by tee_device_unregister(). + * + * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure + */ +struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, + struct device *dev, + struct tee_shm_pool *pool, + void *driver_data); + +/** + * tee_device_register() - Registers a TEE device + * @teedev: Device to register + * + * tee_device_unregister() need to be called to remove the @teedev if + * this function fails. + * + * @returns < 0 on failure + */ +int tee_device_register(struct tee_device *teedev); + +/** + * tee_device_unregister() - Removes a TEE device + * @teedev: Device to unregister + * + * This function should be called to remove the @teedev even if + * tee_device_register() hasn't been called yet. Does nothing if + * @teedev is NULL. + */ +void tee_device_unregister(struct tee_device *teedev); + +/** + * tee_session_calc_client_uuid() - Calculates client UUID for session + * @uuid: Resulting UUID + * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) + * @connectuon_data: Connection data for opening session + * + * Based on connection method calculates UUIDv5 based client UUID. + * + * For group based logins verifies that calling process has specified + * credentials. + * + * @return < 0 on failure + */ +int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, + const u8 connection_data[TEE_IOCTL_UUID_LEN]); + +/** + * struct tee_shm_pool - shared memory pool + * @ops: operations + * @private_data: private data for the shared memory manager + */ +struct tee_shm_pool { + const struct tee_shm_pool_ops *ops; + void *private_data; +}; + +/** + * struct tee_shm_pool_ops - shared memory pool operations + * @alloc: called when allocating shared memory + * @free: called when freeing shared memory + * @destroy_pool: called when destroying the pool + */ +struct tee_shm_pool_ops { + int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm, + size_t size, size_t align); + void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm); + void (*destroy_pool)(struct tee_shm_pool *pool); +}; + +/* + * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory + * @vaddr: Virtual address of start of pool + * @paddr: Physical address of start of pool + * @size: Size in bytes of the pool + * + * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. + */ +struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, + phys_addr_t paddr, size_t size, + int min_alloc_order); + +/** + * tee_shm_pool_free() - Free a shared memory pool + * @pool: The shared memory pool to free + * + * The must be no remaining shared memory allocated from this pool when + * this function is called. + */ +static inline void tee_shm_pool_free(struct tee_shm_pool *pool) +{ + pool->ops->destroy_pool(pool); +} + +/** + * tee_get_drvdata() - Return driver_data pointer + * @returns the driver_data pointer supplied to tee_register(). + */ +void *tee_get_drvdata(struct tee_device *teedev); + +/** + * tee_shm_alloc_priv_buf() - Allocate shared memory for private use by specific + * TEE driver + * @ctx: The TEE context for shared memory allocation + * @size: Shared memory allocation size + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); + +/** + * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind + * @shm: Shared memory handle + * @returns true if object is dynamic shared memory + */ +static inline bool tee_shm_is_dynamic(struct tee_shm *shm) +{ + return shm && (shm->flags & TEE_SHM_DYNAMIC); +} + +/** + * tee_shm_put() - Decrease reference count on a shared memory handle + * @shm: Shared memory handle + */ +void tee_shm_put(struct tee_shm *shm); + +/** + * tee_shm_get_id() - Get id of a shared memory object + * @shm: Shared memory handle + * @returns id + */ +static inline int tee_shm_get_id(struct tee_shm *shm) +{ + return shm->id; +} + +/** + * tee_shm_get_from_id() - Find shared memory object and increase reference + * count + * @ctx: Context owning the shared memory + * @id: Id of shared memory object + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure + */ +struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); + +static inline bool tee_param_is_memref(struct tee_param *param) +{ + switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: + case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: + return true; + default: + return false; + } +} + +/** + * teedev_open() - Open a struct tee_device + * @teedev: Device to open + * + * @return a pointer to struct tee_context on success or an ERR_PTR on failure. + */ +struct tee_context *teedev_open(struct tee_device *teedev); + +/** + * teedev_close_context() - closes a struct tee_context + * @ctx: The struct tee_context to close + */ +void teedev_close_context(struct tee_context *ctx); + +#endif /*__TEE_CORE_H*/ diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 71632e3c5f18..786b9ae6cf4d 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -1,40 +1,28 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2015-2022 Linaro Limited + * Copyright (c) 2015-2024 Linaro Limited */ #ifndef __TEE_DRV_H #define __TEE_DRV_H #include -#include #include #include #include #include #include -#include /* - * The file describes the API provided by the generic TEE driver to the - * specific TEE driver. + * The file describes the API provided by the TEE subsystem to the + * TEE client drivers. */ -#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */ - /* in secure world */ -#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */ -#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */ -#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */ - -struct device; struct tee_device; -struct tee_shm; -struct tee_shm_pool; /** * struct tee_context - driver specific context on file pointer data * @teedev: pointer to this drivers struct tee_device - * @list_shm: List of shared memory object owned by this context * @data: driver specific context data, managed by the driver * @refcount: reference counter for this structure * @releasing: flag that indicates if context is being released right now. @@ -57,134 +45,6 @@ struct tee_context { bool cap_memref_null; }; -struct tee_param_memref { - size_t shm_offs; - size_t size; - struct tee_shm *shm; -}; - -struct tee_param_value { - u64 a; - u64 b; - u64 c; -}; - -struct tee_param { - u64 attr; - union { - struct tee_param_memref memref; - struct tee_param_value value; - } u; -}; - -/** - * struct tee_driver_ops - driver operations vtable - * @get_version: returns version of driver - * @open: called when the device file is opened - * @release: release this open file - * @open_session: open a new session - * @close_session: close a session - * @system_session: declare session as a system session - * @invoke_func: invoke a trusted function - * @cancel_req: request cancel of an ongoing invoke or open - * @supp_recv: called for supplicant to get a command - * @supp_send: called for supplicant to send a response - * @shm_register: register shared memory buffer in TEE - * @shm_unregister: unregister shared memory buffer in TEE - */ -struct tee_driver_ops { - void (*get_version)(struct tee_device *teedev, - struct tee_ioctl_version_data *vers); - int (*open)(struct tee_context *ctx); - void (*release)(struct tee_context *ctx); - int (*open_session)(struct tee_context *ctx, - struct tee_ioctl_open_session_arg *arg, - struct tee_param *param); - int (*close_session)(struct tee_context *ctx, u32 session); - int (*system_session)(struct tee_context *ctx, u32 session); - int (*invoke_func)(struct tee_context *ctx, - struct tee_ioctl_invoke_arg *arg, - struct tee_param *param); - int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session); - int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params, - struct tee_param *param); - int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, - struct tee_param *param); - int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, - struct page **pages, size_t num_pages, - unsigned long start); - int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); -}; - -/** - * struct tee_desc - Describes the TEE driver to the subsystem - * @name: name of driver - * @ops: driver operations vtable - * @owner: module providing the driver - * @flags: Extra properties of driver, defined by TEE_DESC_* below - */ -#define TEE_DESC_PRIVILEGED 0x1 -struct tee_desc { - const char *name; - const struct tee_driver_ops *ops; - struct module *owner; - u32 flags; -}; - -/** - * tee_device_alloc() - Allocate a new struct tee_device instance - * @teedesc: Descriptor for this driver - * @dev: Parent device for this device - * @pool: Shared memory pool, NULL if not used - * @driver_data: Private driver data for this device - * - * Allocates a new struct tee_device instance. The device is - * removed by tee_device_unregister(). - * - * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure - */ -struct tee_device *tee_device_alloc(const struct tee_desc *teedesc, - struct device *dev, - struct tee_shm_pool *pool, - void *driver_data); - -/** - * tee_device_register() - Registers a TEE device - * @teedev: Device to register - * - * tee_device_unregister() need to be called to remove the @teedev if - * this function fails. - * - * @returns < 0 on failure - */ -int tee_device_register(struct tee_device *teedev); - -/** - * tee_device_unregister() - Removes a TEE device - * @teedev: Device to unregister - * - * This function should be called to remove the @teedev even if - * tee_device_register() hasn't been called yet. Does nothing if - * @teedev is NULL. - */ -void tee_device_unregister(struct tee_device *teedev); - -/** - * tee_session_calc_client_uuid() - Calculates client UUID for session - * @uuid: Resulting UUID - * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) - * @connectuon_data: Connection data for opening session - * - * Based on connection method calculates UUIDv5 based client UUID. - * - * For group based logins verifies that calling process has specified - * credentials. - * - * @return < 0 on failure - */ -int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, - const u8 connection_data[TEE_IOCTL_UUID_LEN]); - /** * struct tee_shm - shared memory object * @ctx: context using the object @@ -195,15 +55,12 @@ int tee_session_calc_client_uuid(uuid_t *uuid, u32 connection_method, * @pages: locked pages from userspace * @num_pages: number of locked pages * @refcount: reference counter - * @flags: defined by TEE_SHM_* in tee_drv.h + * @flags: defined by TEE_SHM_* in tee_core.h * @id: unique id of a shared memory object on this device, shared * with user space * @sec_world_id: * secure world assigned id of this shared memory object, not * used by all drivers - * - * This pool is only supposed to be accessed directly from the TEE - * subsystem and from drivers that implements their own shm pool manager. */ struct tee_shm { struct tee_context *ctx; @@ -219,87 +76,52 @@ struct tee_shm { u64 sec_world_id; }; -/** - * struct tee_shm_pool - shared memory pool - * @ops: operations - * @private_data: private data for the shared memory manager - */ -struct tee_shm_pool { - const struct tee_shm_pool_ops *ops; - void *private_data; +struct tee_param_memref { + size_t shm_offs; + size_t size; + struct tee_shm *shm; }; -/** - * struct tee_shm_pool_ops - shared memory pool operations - * @alloc: called when allocating shared memory - * @free: called when freeing shared memory - * @destroy_pool: called when destroying the pool - */ -struct tee_shm_pool_ops { - int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm, - size_t size, size_t align); - void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm); - void (*destroy_pool)(struct tee_shm_pool *pool); +struct tee_param_value { + u64 a; + u64 b; + u64 c; }; -/* - * tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory - * @vaddr: Virtual address of start of pool - * @paddr: Physical address of start of pool - * @size: Size in bytes of the pool - * - * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. - */ -struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr, - phys_addr_t paddr, size_t size, - int min_alloc_order); +struct tee_param { + u64 attr; + union { + struct tee_param_memref memref; + struct tee_param_value value; + } u; +}; /** - * tee_shm_pool_free() - Free a shared memory pool - * @pool: The shared memory pool to free - * - * The must be no remaining shared memory allocated from this pool when - * this function is called. + * tee_shm_alloc_kernel_buf() - Allocate kernel shared memory for a + * particular TEE client driver + * @ctx: The TEE context for shared memory allocation + * @size: Shared memory allocation size + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure */ -static inline void tee_shm_pool_free(struct tee_shm_pool *pool) -{ - pool->ops->destroy_pool(pool); -} +struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** - * tee_get_drvdata() - Return driver_data pointer - * @returns the driver_data pointer supplied to tee_register(). + * tee_shm_register_kernel_buf() - Register kernel shared memory for a + * particular TEE client driver + * @ctx: The TEE context for shared memory registration + * @addr: Kernel buffer address + * @length: Kernel buffer length + * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure */ -void *tee_get_drvdata(struct tee_device *teedev); - -struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); -struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); - struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx, void *addr, size_t length); -/** - * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind - * @shm: Shared memory handle - * @returns true if object is dynamic shared memory - */ -static inline bool tee_shm_is_dynamic(struct tee_shm *shm) -{ - return shm && (shm->flags & TEE_SHM_DYNAMIC); -} - /** * tee_shm_free() - Free shared memory * @shm: Handle to shared memory to free */ void tee_shm_free(struct tee_shm *shm); -/** - * tee_shm_put() - Decrease reference count on a shared memory handle - * @shm: Shared memory handle - */ -void tee_shm_put(struct tee_shm *shm); - /** * tee_shm_get_va() - Get virtual address of a shared memory plus an offset * @shm: Shared memory handle @@ -352,25 +174,6 @@ static inline size_t tee_shm_get_page_offset(struct tee_shm *shm) return shm->offset; } -/** - * tee_shm_get_id() - Get id of a shared memory object - * @shm: Shared memory handle - * @returns id - */ -static inline int tee_shm_get_id(struct tee_shm *shm) -{ - return shm->id; -} - -/** - * tee_shm_get_from_id() - Find shared memory object and increase reference - * count - * @ctx: Context owning the shared memory - * @id: Id of shared memory object - * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure - */ -struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); - /** * tee_client_open_context() - Open a TEE context * @start: if not NULL, continue search after this context @@ -470,18 +273,6 @@ int tee_client_invoke_func(struct tee_context *ctx, int tee_client_cancel_req(struct tee_context *ctx, struct tee_ioctl_cancel_arg *arg); -static inline bool tee_param_is_memref(struct tee_param *param) -{ - switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: - case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: - return true; - default: - return false; - } -} - extern const struct bus_type tee_bus_type; /** @@ -509,18 +300,4 @@ struct tee_client_driver { #define to_tee_client_driver(d) \ container_of(d, struct tee_client_driver, driver) -/** - * teedev_open() - Open a struct tee_device - * @teedev: Device to open - * - * @return a pointer to struct tee_context on success or an ERR_PTR on failure. - */ -struct tee_context *teedev_open(struct tee_device *teedev); - -/** - * teedev_close_context() - closes a struct tee_context - * @ctx: The struct tee_context to close - */ -void teedev_close_context(struct tee_context *ctx); - #endif /*__TEE_DRV_H*/ -- cgit From 464eb03c4a7cfb32cb3324249193cf6bb5b35152 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 28 Mar 2024 08:27:49 +0100 Subject: rtnetlink: add guard for RTNL The new guard/scoped_gard can be useful for the RTNL as well, so add a guard definition for it. It gets used like { guard(rtnl)(); // RTNL held until end of block } or scoped_guard(rtnl) { // RTNL held in this block } as with any other guard/scoped_guard. Signed-off-by: Johannes Berg Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/linux/rtnetlink.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index cdfc897f1e3c..a7da7dfc06a2 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -7,6 +7,7 @@ #include #include #include +#include #include extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -46,6 +47,8 @@ extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); extern bool refcount_dec_and_rtnl_lock(refcount_t *r); +DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock()) + extern wait_queue_head_t netdev_unregistering_wq; extern atomic_t dev_unreg_count; extern struct rw_semaphore pernet_ops_rwsem; -- cgit From b1f81b9a535b48b2c9ca460720a2bc73fd2001de Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 28 Mar 2024 08:27:50 +0100 Subject: netdevice: add DEFINE_FREE() for dev_put For short netdev holds within a function there are still a lot of users of dev_put() rather than netdev_put(). Add DEFINE_FREE() to allow making those safer. Signed-off-by: Johannes Berg Reviewed-by: Simon Horman Signed-off-by: David S. Miller --- include/linux/netdevice.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 7d12b5a9380f..0c198620ac93 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4097,6 +4097,8 @@ static inline void dev_put(struct net_device *dev) netdev_put(dev, NULL); } +DEFINE_FREE(dev_put, struct net_device *, if (_T) dev_put(_T)) + static inline void netdev_ref_replace(struct net_device *odev, struct net_device *ndev, netdevice_tracker *tracker, -- cgit From dbcedec3a31119d7594baacc743300d127c99c56 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 7 Mar 2024 15:49:05 +0200 Subject: gpiolib: legacy: Remove unused gpio_request_array() and gpio_free_array() No more users. Signed-off-by: Andy Shevchenko Reviewed-by: Yanteng Si Signed-off-by: Bartosz Golaszewski --- Documentation/driver-api/gpio/legacy.rst | 16 --------- .../translations/zh_CN/driver-api/gpio/legacy.rst | 16 --------- Documentation/translations/zh_TW/gpio.txt | 17 ---------- drivers/gpio/gpiolib-legacy.c | 39 ---------------------- include/linux/gpio.h | 15 --------- 5 files changed, 103 deletions(-) (limited to 'include/linux') diff --git a/Documentation/driver-api/gpio/legacy.rst b/Documentation/driver-api/gpio/legacy.rst index b6505914791c..534dfe95d128 100644 --- a/Documentation/driver-api/gpio/legacy.rst +++ b/Documentation/driver-api/gpio/legacy.rst @@ -225,8 +225,6 @@ setup or driver probe/teardown code, so this is an easy constraint.):: gpio_request() ## gpio_request_one() - ## gpio_request_array() - ## gpio_free_array() gpio_free() @@ -295,14 +293,6 @@ are claimed, three additional calls are defined:: */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); - /* request multiple GPIOs in a single call - */ - int gpio_request_array(struct gpio *array, size_t num); - - /* release multiple GPIOs in a single call - */ - void gpio_free_array(struct gpio *array, size_t num); - where 'flags' is currently defined to specify the following properties: * GPIOF_DIR_IN - to configure direction as input @@ -341,12 +331,6 @@ A typical example of usage:: if (err) ... - err = gpio_request_array(leds_gpios, ARRAY_SIZE(leds_gpios)); - if (err) - ... - - gpio_free_array(leds_gpios, ARRAY_SIZE(leds_gpios)); - GPIOs mapped to IRQs -------------------- diff --git a/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst b/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst index aeccff777170..0faf042001d2 100644 --- a/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst +++ b/Documentation/translations/zh_CN/driver-api/gpio/legacy.rst @@ -208,8 +208,6 @@ GPIO 值的命令需要等待其信息排到队首才发送命令,再获得其 gpio_request() ## gpio_request_one() - ## gpio_request_array() - ## gpio_free_array() gpio_free() @@ -272,14 +270,6 @@ gpio_request()前将这类细节配置好,例如使用引脚控制子系统的 */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); - /* 在单个函数中申请多个 GPIO - */ - int gpio_request_array(struct gpio *array, size_t num); - - /* 在单个函数中释放多个 GPIO - */ - void gpio_free_array(struct gpio *array, size_t num); - 这里 'flags' 当前定义可指定以下属性: * GPIOF_DIR_IN - 配置方向为输入 @@ -317,12 +307,6 @@ gpio_request()前将这类细节配置好,例如使用引脚控制子系统的 if (err) ... - err = gpio_request_array(leds_gpios, ARRAY_SIZE(leds_gpios)); - if (err) - ... - - gpio_free_array(leds_gpios, ARRAY_SIZE(leds_gpios)); - GPIO 映射到 IRQ ---------------- diff --git a/Documentation/translations/zh_TW/gpio.txt b/Documentation/translations/zh_TW/gpio.txt index b9b48012c62e..77d69d381316 100644 --- a/Documentation/translations/zh_TW/gpio.txt +++ b/Documentation/translations/zh_TW/gpio.txt @@ -215,13 +215,10 @@ GPIO 值的命令需要等待其信息排到隊首才發送命令,再獲得其 gpio_request() ## gpio_request_one() -## gpio_request_array() -## gpio_free_array() gpio_free() - 聲明和釋放 GPIO ---------------------------- 爲了有助於捕獲系統配置錯誤,定義了兩個函數。 @@ -278,14 +275,6 @@ gpio_request()前將這類細節配置好,例如使用 pinctrl 子系統的映 */ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); - /* 在單個函數中申請多個 GPIO - */ - int gpio_request_array(struct gpio *array, size_t num); - - /* 在單個函數中釋放多個 GPIO - */ - void gpio_free_array(struct gpio *array, size_t num); - 這裡 'flags' 當前定義可指定以下屬性: * GPIOF_DIR_IN - 配置方向爲輸入 @@ -323,12 +312,6 @@ gpio_request()前將這類細節配置好,例如使用 pinctrl 子系統的映 if (err) ... - err = gpio_request_array(leds_gpios, ARRAY_SIZE(leds_gpios)); - if (err) - ... - - gpio_free_array(leds_gpios, ARRAY_SIZE(leds_gpios)); - GPIO 映射到 IRQ -------------------- diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c index b138682fec3d..616d60187f85 100644 --- a/drivers/gpio/gpiolib-legacy.c +++ b/drivers/gpio/gpiolib-legacy.c @@ -72,42 +72,3 @@ int gpio_request(unsigned gpio, const char *label) return gpiod_request(desc, label); } EXPORT_SYMBOL_GPL(gpio_request); - -/** - * gpio_request_array - request multiple GPIOs in a single call - * @array: array of the 'struct gpio' - * @num: how many GPIOs in the array - * - * **DEPRECATED** This function is deprecated and must not be used in new code. - */ -int gpio_request_array(const struct gpio *array, size_t num) -{ - int i, err; - - for (i = 0; i < num; i++, array++) { - err = gpio_request_one(array->gpio, array->flags, array->label); - if (err) - goto err_free; - } - return 0; - -err_free: - while (i--) - gpio_free((--array)->gpio); - return err; -} -EXPORT_SYMBOL_GPL(gpio_request_array); - -/** - * gpio_free_array - release multiple GPIOs in a single call - * @array: array of the 'struct gpio' - * @num: how many GPIOs in the array - * - * **DEPRECATED** This function is deprecated and must not be used in new code. - */ -void gpio_free_array(const struct gpio *array, size_t num) -{ - while (num--) - gpio_free((array++)->gpio); -} -EXPORT_SYMBOL_GPL(gpio_free_array); diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 7ecc25c543ce..4aaedcf424ce 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -114,8 +114,6 @@ static inline int gpio_to_irq(unsigned gpio) } int gpio_request_one(unsigned gpio, unsigned long flags, const char *label); -int gpio_request_array(const struct gpio *array, size_t num); -void gpio_free_array(const struct gpio *array, size_t num); /* CONFIG_GPIOLIB: bindings for managed devices that want to request gpios */ @@ -146,11 +144,6 @@ static inline int gpio_request_one(unsigned gpio, return -ENOSYS; } -static inline int gpio_request_array(const struct gpio *array, size_t num) -{ - return -ENOSYS; -} - static inline void gpio_free(unsigned gpio) { might_sleep(); @@ -159,14 +152,6 @@ static inline void gpio_free(unsigned gpio) WARN_ON(1); } -static inline void gpio_free_array(const struct gpio *array, size_t num) -{ - might_sleep(); - - /* GPIO can never have been requested */ - WARN_ON(1); -} - static inline int gpio_direction_input(unsigned gpio) { return -ENOSYS; -- cgit From cf4441503e20a0c0a36e0a98dcf57de3d3844c91 Mon Sep 17 00:00:00 2001 From: Balint Dobszay Date: Mon, 25 Mar 2024 16:11:02 +0100 Subject: tee: optee: Move pool_op helper functions Move the pool alloc and free helper functions from the OP-TEE driver to the TEE subsystem, since these could be reused in other TEE drivers. This patch is not supposed to change behavior, it's only reorganizing the code. Reviewed-by: Sumit Garg Suggested-by: Jens Wiklander Signed-off-by: Balint Dobszay Signed-off-by: Jens Wiklander --- drivers/tee/optee/core.c | 64 -------------------------------------- drivers/tee/optee/ffa_abi.c | 6 ++-- drivers/tee/optee/optee_private.h | 12 -------- drivers/tee/optee/smc_abi.c | 9 +++--- drivers/tee/tee_shm.c | 65 +++++++++++++++++++++++++++++++++++++++ include/linux/tee_core.h | 10 ++++++ 6 files changed, 82 insertions(+), 84 deletions(-) (limited to 'include/linux') diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c index f762e3a25119..39e688d4e974 100644 --- a/drivers/tee/optee/core.c +++ b/drivers/tee/optee/core.c @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include @@ -17,69 +16,6 @@ #include #include "optee_private.h" -int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm, - size_t size, size_t align, - int (*shm_register)(struct tee_context *ctx, - struct tee_shm *shm, - struct page **pages, - size_t num_pages, - unsigned long start)) -{ - size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; - struct page **pages; - unsigned int i; - int rc = 0; - - /* - * Ignore alignment since this is already going to be page aligned - * and there's no need for any larger alignment. - */ - shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE, - GFP_KERNEL | __GFP_ZERO); - if (!shm->kaddr) - return -ENOMEM; - - shm->paddr = virt_to_phys(shm->kaddr); - shm->size = nr_pages * PAGE_SIZE; - - pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) { - rc = -ENOMEM; - goto err; - } - - for (i = 0; i < nr_pages; i++) - pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE); - - shm->pages = pages; - shm->num_pages = nr_pages; - - if (shm_register) { - rc = shm_register(shm->ctx, shm, pages, nr_pages, - (unsigned long)shm->kaddr); - if (rc) - goto err; - } - - return 0; -err: - free_pages_exact(shm->kaddr, shm->size); - shm->kaddr = NULL; - return rc; -} - -void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm, - int (*shm_unregister)(struct tee_context *ctx, - struct tee_shm *shm)) -{ - if (shm_unregister) - shm_unregister(shm->ctx, shm); - free_pages_exact(shm->kaddr, shm->size); - shm->kaddr = NULL; - kfree(shm->pages); - shm->pages = NULL; -} - static void optee_bus_scan(struct work_struct *work) { WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP)); diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index cee8ccb84cb8..3235e1c719e8 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -374,14 +374,14 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx, static int pool_ffa_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm, size_t size, size_t align) { - return optee_pool_op_alloc_helper(pool, shm, size, align, - optee_ffa_shm_register); + return tee_dyn_shm_alloc_helper(shm, size, align, + optee_ffa_shm_register); } static void pool_ffa_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { - optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister); + tee_dyn_shm_free_helper(shm, optee_ffa_shm_unregister); } static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool) diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index a0698ac18993..429cc20be5cc 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -283,18 +283,6 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session); int optee_enumerate_devices(u32 func); void optee_unregister_devices(void); -int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm, - size_t size, size_t align, - int (*shm_register)(struct tee_context *ctx, - struct tee_shm *shm, - struct page **pages, - size_t num_pages, - unsigned long start)); -void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm, - int (*shm_unregister)(struct tee_context *ctx, - struct tee_shm *shm)); - - void optee_remove_common(struct optee *optee); int optee_open(struct tee_context *ctx, bool cap_memref_null); void optee_release(struct tee_context *ctx); diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 9c296b887dc1..734c484ed0f6 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -592,19 +592,18 @@ static int pool_op_alloc(struct tee_shm_pool *pool, * to be registered with OP-TEE. */ if (shm->flags & TEE_SHM_PRIV) - return optee_pool_op_alloc_helper(pool, shm, size, align, NULL); + return tee_dyn_shm_alloc_helper(shm, size, align, NULL); - return optee_pool_op_alloc_helper(pool, shm, size, align, - optee_shm_register); + return tee_dyn_shm_alloc_helper(shm, size, align, optee_shm_register); } static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm) { if (!(shm->flags & TEE_SHM_PRIV)) - optee_pool_op_free_helper(pool, shm, optee_shm_unregister); + tee_dyn_shm_free_helper(shm, optee_shm_unregister); else - optee_pool_op_free_helper(pool, shm, NULL); + tee_dyn_shm_free_helper(shm, NULL); } static void pool_op_destroy_pool(struct tee_shm_pool *pool) diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c index 96a45c817427..daf6e5cfd59a 100644 --- a/drivers/tee/tee_shm.c +++ b/drivers/tee/tee_shm.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -202,6 +203,70 @@ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size) } EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf); +int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, + int (*shm_register)(struct tee_context *ctx, + struct tee_shm *shm, + struct page **pages, + size_t num_pages, + unsigned long start)) +{ + size_t nr_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE; + struct page **pages; + unsigned int i; + int rc = 0; + + /* + * Ignore alignment since this is already going to be page aligned + * and there's no need for any larger alignment. + */ + shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE, + GFP_KERNEL | __GFP_ZERO); + if (!shm->kaddr) + return -ENOMEM; + + shm->paddr = virt_to_phys(shm->kaddr); + shm->size = nr_pages * PAGE_SIZE; + + pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) { + rc = -ENOMEM; + goto err; + } + + for (i = 0; i < nr_pages; i++) + pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE); + + shm->pages = pages; + shm->num_pages = nr_pages; + + if (shm_register) { + rc = shm_register(shm->ctx, shm, pages, nr_pages, + (unsigned long)shm->kaddr); + if (rc) + goto err; + } + + return 0; +err: + free_pages_exact(shm->kaddr, shm->size); + shm->kaddr = NULL; + return rc; +} +EXPORT_SYMBOL_GPL(tee_dyn_shm_alloc_helper); + +void tee_dyn_shm_free_helper(struct tee_shm *shm, + int (*shm_unregister)(struct tee_context *ctx, + struct tee_shm *shm)) +{ + if (shm_unregister) + shm_unregister(shm->ctx, shm); + free_pages_exact(shm->kaddr, shm->size); + shm->kaddr = NULL; + kfree(shm->pages); + shm->pages = NULL; +} +EXPORT_SYMBOL_GPL(tee_dyn_shm_free_helper); + static struct tee_shm * register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags, int id) diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h index d9b3ba8e8fa9..efd16ed52315 100644 --- a/include/linux/tee_core.h +++ b/include/linux/tee_core.h @@ -232,6 +232,16 @@ void *tee_get_drvdata(struct tee_device *teedev); */ struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size); +int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align, + int (*shm_register)(struct tee_context *ctx, + struct tee_shm *shm, + struct page **pages, + size_t num_pages, + unsigned long start)); +void tee_dyn_shm_free_helper(struct tee_shm *shm, + int (*shm_unregister)(struct tee_context *ctx, + struct tee_shm *shm)); + /** * tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind * @shm: Shared memory handle -- cgit From afde996a33ee4dbe3692e1eff28b56c820331428 Mon Sep 17 00:00:00 2001 From: Dhruva Gole Date: Mon, 18 Mar 2024 20:46:32 +0530 Subject: PM: wakeup: make device_wakeup_disable() return void The device_wakeup_disable() call only returns an error if no dev exists, but there's not much a user can do at that point. Rather, make this function return void. Signed-off-by: Dhruva Gole Signed-off-by: Rafael J. Wysocki --- drivers/base/power/wakeup.c | 11 +++++++---- drivers/mmc/host/sdhci-pci-core.c | 2 +- include/linux/pm_wakeup.h | 5 ++--- 3 files changed, 10 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index a917219feea6..752b417e8129 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -451,16 +451,15 @@ static struct wakeup_source *device_wakeup_detach(struct device *dev) * Detach the @dev's wakeup source object from it, unregister this wakeup source * object and destroy it. */ -int device_wakeup_disable(struct device *dev) +void device_wakeup_disable(struct device *dev) { struct wakeup_source *ws; if (!dev || !dev->power.can_wakeup) - return -EINVAL; + return; ws = device_wakeup_detach(dev); wakeup_source_unregister(ws); - return 0; } EXPORT_SYMBOL_GPL(device_wakeup_disable); @@ -502,7 +501,11 @@ EXPORT_SYMBOL_GPL(device_set_wakeup_capable); */ int device_set_wakeup_enable(struct device *dev, bool enable) { - return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); + if (enable) + return device_wakeup_enable(dev); + + device_wakeup_disable(dev); + return 0; } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 025b31aa712c..ef89ec382bfe 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -63,7 +63,7 @@ static int sdhci_pci_init_wakeup(struct sdhci_pci_chip *chip) if ((pm_flags & MMC_PM_KEEP_POWER) && (pm_flags & MMC_PM_WAKE_SDIO_IRQ)) return device_wakeup_enable(&chip->pdev->dev); else if (!cap_cd_wake) - return device_wakeup_disable(&chip->pdev->dev); + device_wakeup_disable(&chip->pdev->dev); return 0; } diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 6eb9adaef52b..428803eed798 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -107,7 +107,7 @@ extern void wakeup_sources_read_unlock(int idx); extern struct wakeup_source *wakeup_sources_walk_start(void); extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws); extern int device_wakeup_enable(struct device *dev); -extern int device_wakeup_disable(struct device *dev); +extern void device_wakeup_disable(struct device *dev); extern void device_set_wakeup_capable(struct device *dev, bool capable); extern int device_set_wakeup_enable(struct device *dev, bool enable); extern void __pm_stay_awake(struct wakeup_source *ws); @@ -154,10 +154,9 @@ static inline int device_wakeup_enable(struct device *dev) return 0; } -static inline int device_wakeup_disable(struct device *dev) +static inline void device_wakeup_disable(struct device *dev) { dev->power.should_wakeup = false; - return 0; } static inline int device_set_wakeup_enable(struct device *dev, bool enable) -- cgit From 3642c7ed52312ac2b95c9aba45c40e50bd8798ad Mon Sep 17 00:00:00 2001 From: Dhruva Gole Date: Mon, 18 Mar 2024 20:46:33 +0530 Subject: PM: wakeup: Remove unnecessary else from device_init_wakeup() Checkpatch warns that else is generally not necessary after a return condition which exists in the if part of this function. Hence, just to abide by what checkpatch recommends, follow it's guidelines. Signed-off-by: Dhruva Gole Signed-off-by: Rafael J. Wysocki --- include/linux/pm_wakeup.h | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 428803eed798..76cd1f9f1365 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -234,11 +234,10 @@ static inline int device_init_wakeup(struct device *dev, bool enable) if (enable) { device_set_wakeup_capable(dev, true); return device_wakeup_enable(dev); - } else { - device_wakeup_disable(dev); - device_set_wakeup_capable(dev, false); - return 0; } + device_wakeup_disable(dev); + device_set_wakeup_capable(dev, false); + return 0; } #endif /* _LINUX_PM_WAKEUP_H */ -- cgit From 4d0333798ebbfa1683cc3bc056d1b25b8c24344c Mon Sep 17 00:00:00 2001 From: Simon Trimmer Date: Mon, 25 Mar 2024 11:31:23 +0000 Subject: firmware: cs_dsp: Add locked wrappers for coeff read and write It is a common pattern for functions to take and release the DSP pwr_lock over the cs_dsp calls to read and write firmware controls. Add wrapper functions to do this sequence so that the calling code can be simplified to a single function call.. Signed-off-by: Simon Trimmer Signed-off-by: Richard Fitzgerald Reviewed-by: Takashi Iwai Link: https://msgid.link/r/20240325113127.112783-2-rf@opensource.cirrus.com Signed-off-by: Mark Brown --- drivers/firmware/cirrus/cs_dsp.c | 54 ++++++++++++++++++++++++++++++++++ include/linux/firmware/cirrus/cs_dsp.h | 4 +++ 2 files changed, 58 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c index 79d4254d1f9b..1263382e0d10 100644 --- a/drivers/firmware/cirrus/cs_dsp.c +++ b/drivers/firmware/cirrus/cs_dsp.c @@ -819,6 +819,33 @@ int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, } EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_write_ctrl, FW_CS_DSP); +/** + * cs_dsp_coeff_lock_and_write_ctrl() - Writes the given buffer to the given coefficient control + * @ctl: pointer to coefficient control + * @off: word offset at which data should be written + * @buf: the buffer to write to the given control + * @len: the length of the buffer in bytes + * + * Same as cs_dsp_coeff_write_ctrl() but takes pwr_lock. + * + * Return: A negative number on error, 1 when the control value changed and 0 when it has not. + */ +int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, const void *buf, size_t len) +{ + struct cs_dsp *dsp = ctl->dsp; + int ret; + + lockdep_assert_not_held(&dsp->pwr_lock); + + mutex_lock(&dsp->pwr_lock); + ret = cs_dsp_coeff_write_ctrl(ctl, off, buf, len); + mutex_unlock(&dsp->pwr_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_write_ctrl); + static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, unsigned int off, void *buf, size_t len) { @@ -891,6 +918,33 @@ int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, } EXPORT_SYMBOL_NS_GPL(cs_dsp_coeff_read_ctrl, FW_CS_DSP); +/** + * cs_dsp_coeff_lock_and_read_ctrl() - Reads the given coefficient control into the given buffer + * @ctl: pointer to coefficient control + * @off: word offset at which data should be read + * @buf: the buffer to store to the given control + * @len: the length of the buffer in bytes + * + * Same as cs_dsp_coeff_read_ctrl() but takes pwr_lock. + * + * Return: Zero for success, a negative number on error. + */ +int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, + unsigned int off, void *buf, size_t len) +{ + struct cs_dsp *dsp = ctl->dsp; + int ret; + + lockdep_assert_not_held(&dsp->pwr_lock); + + mutex_lock(&dsp->pwr_lock); + ret = cs_dsp_coeff_read_ctrl(ctl, off, buf, len); + mutex_unlock(&dsp->pwr_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(cs_dsp_coeff_lock_and_read_ctrl); + static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp) { struct cs_dsp_coeff_ctl *ctl; diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h index 29cd11d5a3cf..6097b1f701f1 100644 --- a/include/linux/firmware/cirrus/cs_dsp.h +++ b/include/linux/firmware/cirrus/cs_dsp.h @@ -239,8 +239,12 @@ void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp); int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id); int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, const void *buf, size_t len); +int cs_dsp_coeff_lock_and_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, + const void *buf, size_t len); int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, void *buf, size_t len); +int cs_dsp_coeff_lock_and_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off, + void *buf, size_t len); struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type, unsigned int alg); -- cgit From 7bdbf7446305cb65c510c16d57cde82bc76b234a Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 1 Apr 2024 19:13:02 -0700 Subject: bpf: add special internal-only MOV instruction to resolve per-CPU addrs Add a new BPF instruction for resolving absolute addresses of per-CPU data from their per-CPU offsets. This instruction is internal-only and users are not allowed to use them directly. They will only be used for internal inlining optimizations for now between BPF verifier and BPF JITs. We use a special BPF_MOV | BPF_ALU64 | BPF_X form with insn->off field set to BPF_ADDR_PERCPU = -1. I used negative offset value to distinguish them from positive ones used by user-exposed instructions. Such instruction performs a resolution of a per-CPU offset stored in a register to a valid kernel address which can be dereferenced. It is useful in any use case where absolute address of a per-CPU data has to be resolved (e.g., in inlining bpf_map_lookup_elem()). BPF disassembler is also taught to recognize them to support dumping final BPF assembly code (non-JIT'ed version). Add arch-specific way for BPF JITs to mark support for this instructions. This patch also adds support for these instructions in x86-64 BPF JIT. Signed-off-by: Andrii Nakryiko Acked-by: John Fastabend Link: https://lore.kernel.org/r/20240402021307.1012571-2-andrii@kernel.org Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp.c | 16 ++++++++++++++++ include/linux/filter.h | 20 ++++++++++++++++++++ kernel/bpf/core.c | 5 +++++ kernel/bpf/disasm.c | 14 ++++++++++++++ 4 files changed, 55 insertions(+) (limited to 'include/linux') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 3b639d6f2f54..af89dd117dce 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1382,6 +1382,17 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image maybe_emit_mod(&prog, AUX_REG, dst_reg, true); EMIT3(0x0F, 0x44, add_2reg(0xC0, AUX_REG, dst_reg)); break; + } else if (insn_is_mov_percpu_addr(insn)) { + u32 off = (u32)(unsigned long)&this_cpu_off; + + /* mov , (if necessary) */ + EMIT_mov(dst_reg, src_reg); + + /* add , gs:[] */ + EMIT2(0x65, add_1mod(0x48, dst_reg)); + EMIT3(0x03, add_1reg(0x04, dst_reg), 0x25); + EMIT(off, 4); + break; } fallthrough; case BPF_ALU | BPF_MOV | BPF_X: @@ -3365,6 +3376,11 @@ bool bpf_jit_supports_subprog_tailcalls(void) return true; } +bool bpf_jit_supports_percpu_insn(void) +{ + return true; +} + void bpf_jit_free(struct bpf_prog *prog) { if (prog->jited) { diff --git a/include/linux/filter.h b/include/linux/filter.h index 531b36090122..161d5f7b64ed 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -178,6 +178,25 @@ struct ctl_table_header; .off = 0, \ .imm = 0 }) +/* Special (internal-only) form of mov, used to resolve per-CPU addrs: + * dst_reg = src_reg + + * BPF_ADDR_PERCPU is used as a special insn->off value. + */ +#define BPF_ADDR_PERCPU (-1) + +#define BPF_MOV64_PERCPU_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = BPF_ADDR_PERCPU, \ + .imm = 0 }) + +static inline bool insn_is_mov_percpu_addr(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU; +} + /* Short form of mov, dst_reg = imm32 */ #define BPF_MOV64_IMM(DST, IMM) \ @@ -972,6 +991,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); void bpf_jit_compile(struct bpf_prog *prog); bool bpf_jit_needs_zext(void); bool bpf_jit_supports_subprog_tailcalls(void); +bool bpf_jit_supports_percpu_insn(void); bool bpf_jit_supports_kfunc_call(void); bool bpf_jit_supports_far_kfunc_call(void); bool bpf_jit_supports_exceptions(void); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index ae406a2814db..7a33a3a7e63c 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2945,6 +2945,11 @@ bool __weak bpf_jit_supports_subprog_tailcalls(void) return false; } +bool __weak bpf_jit_supports_percpu_insn(void) +{ + return false; +} + bool __weak bpf_jit_supports_kfunc_call(void) { return false; diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c index bd2e2dd04740..309c4aa1b026 100644 --- a/kernel/bpf/disasm.c +++ b/kernel/bpf/disasm.c @@ -172,6 +172,17 @@ static bool is_addr_space_cast(const struct bpf_insn *insn) insn->off == BPF_ADDR_SPACE_CAST; } +/* Special (internal-only) form of mov, used to resolve per-CPU addrs: + * dst_reg = src_reg + + * BPF_ADDR_PERCPU is used as a special insn->off value. + */ +#define BPF_ADDR_PERCPU (-1) + +static inline bool is_mov_percpu_addr(const struct bpf_insn *insn) +{ + return insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->off == BPF_ADDR_PERCPU; +} + void print_bpf_insn(const struct bpf_insn_cbs *cbs, const struct bpf_insn *insn, bool allow_ptr_leaks) @@ -194,6 +205,9 @@ void print_bpf_insn(const struct bpf_insn_cbs *cbs, verbose(cbs->private_data, "(%02x) r%d = addr_space_cast(r%d, %d, %d)\n", insn->code, insn->dst_reg, insn->src_reg, ((u32)insn->imm) >> 16, (u16)insn->imm); + } else if (is_mov_percpu_addr(insn)) { + verbose(cbs->private_data, "(%02x) r%d = &(void __percpu *)(r%d)\n", + insn->code, insn->dst_reg, insn->src_reg); } else if (BPF_SRC(insn->code) == BPF_X) { verbose(cbs->private_data, "(%02x) %c%d %s %s%c%d\n", insn->code, class == BPF_ALU ? 'w' : 'r', -- cgit From ada9841e3e665bb320d2eedcf33f640198e2378d Mon Sep 17 00:00:00 2001 From: Pawel Dembicki Date: Tue, 2 Apr 2024 22:11:18 +0200 Subject: net: phy: marvell: add basic support of 88E308X/88E609X family This patch implements only basic support. It covers PHY used in multiple IC: PHY: 88E3082, 88E3083 Switch: 88E6096, 88E6097 Signed-off-by: Pawel Dembicki Reviewed-by: Andrew Lunn Reviewed-by: Russell King (Oracle) Link: https://lore.kernel.org/r/20240402201123.2961909-1-paweldembicki@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/phy/marvell.c | 13 +++++++++++++ include/linux/marvell_phy.h | 1 + 2 files changed, 14 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 42ed013385bf..fae7eb57ee2c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -3289,6 +3289,18 @@ static struct phy_driver marvell_drivers[] = { .get_strings = marvell_get_strings, .get_stats = marvell_get_stats, }, + { + .phy_id = MARVELL_PHY_ID_88E3082, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .name = "Marvell 88E308X/88E609X Family", + /* PHY_BASIC_FEATURES */ + .probe = marvell_probe, + .config_init = marvell_config_init, + .aneg_done = marvell_aneg_done, + .read_status = marvell_read_status, + .resume = genphy_resume, + .suspend = genphy_suspend, + }, { .phy_id = MARVELL_PHY_ID_88E1112, .phy_id_mask = MARVELL_PHY_ID_MASK, @@ -3742,6 +3754,7 @@ module_phy_driver(marvell_drivers); static struct mdio_device_id __maybe_unused marvell_tbl[] = { { MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK }, + { MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E1111_FINISAR, MARVELL_PHY_ID_MASK }, diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 693eba9869e4..88254f9aec2b 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -7,6 +7,7 @@ /* Known PHY IDs */ #define MARVELL_PHY_ID_88E1101 0x01410c60 +#define MARVELL_PHY_ID_88E3082 0x01410c80 #define MARVELL_PHY_ID_88E1112 0x01410c90 #define MARVELL_PHY_ID_88E1111 0x01410cc0 #define MARVELL_PHY_ID_88E1118 0x01410e10 -- cgit From 30f8d23814ea4bb0229a78f7f8f36500c2e2d962 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Tue, 2 Apr 2024 16:30:39 +0300 Subject: net/mlx5: Convert uintX_t to uX In the kernel, the preferred types are uX. Signed-off-by: Gal Pressman Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed Signed-off-by: Tariq Toukan Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20240402133043.56322-8-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h | 4 ++-- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/fw.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | 2 +- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c | 2 +- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c | 4 ++-- include/linux/mlx5/device.h | 2 +- 11 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 84db05fb9389..f5a3ac40f6e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1160,7 +1160,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv); void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, struct ethtool_drvinfo *drvinfo); void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, - uint32_t stringset, uint8_t *data); + u32 stringset, u8 *data); int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset); void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, struct ethtool_stats *stats, u64 *data); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index c7d191f66ad1..4f83e3172767 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -73,7 +73,7 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs, struct sock *sk, u32 tirn, - uint32_t flow_tag) + u32 flow_tag) { struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs); struct mlx5_flow_destination dest = {}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h index a032bff482a6..7e899c716267 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h @@ -11,14 +11,14 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs); void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs); struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs, struct sock *sk, u32 tirn, - uint32_t flow_tag); + u32 flow_tag); void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule); #else static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; } static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {} static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs, struct sock *sk, u32 tirn, - uint32_t flow_tag) + u32 flow_tag) { return ERR_PTR(-EOPNOTSUPP); } static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {} #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h index c1844128effa..07a04a142a2e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h @@ -95,7 +95,7 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv); void mlx5e_ktls_cleanup(struct mlx5e_priv *priv); int mlx5e_ktls_get_count(struct mlx5e_priv *priv); -void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t **data); +void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data); void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data); #else @@ -144,7 +144,7 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev) static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { } static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; } -static inline void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t **data) { } +static inline void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { } static inline void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data) { } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c index 7bf79973128b..60be2d72eb9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c @@ -58,7 +58,7 @@ int mlx5e_ktls_get_count(struct mlx5e_priv *priv) return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc); } -void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t **data) +void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { unsigned int i, n; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 55b7efe21624..a74ee698671c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -274,7 +274,7 @@ out: } static void mlx5e_rep_get_strings(struct net_device *dev, - u32 stringset, uint8_t *data) + u32 stringset, u8 *data) { struct mlx5e_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index e7faf7e73ca4..2d95a9b7b44e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c @@ -283,7 +283,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev) return 0; } -int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id) +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id) { u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {}; int i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 58732f44940f..c38342b9f320 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -205,7 +205,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev); void mlx5_cmd_disable(struct mlx5_core_dev *dev); void mlx5_cmd_set_state(struct mlx5_core_dev *dev, enum mlx5_cmdif_state cmdif_state); -int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); +int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index f708b029425a..e9f6c7ed7a7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -1883,7 +1883,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc5 *misc5 = &value->misc5; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index dd856cde188d..1d49704b9542 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -1897,7 +1897,7 @@ void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc5 *misc5 = &value->misc5; @@ -2129,7 +2129,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0; struct mlx5dr_match_misc *misc = &value->misc; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 01275c6e8468..da61be87a12e 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -68,7 +68,7 @@ #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) -#define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld))) +#define MLX5_ADDR_OF(typ, p, fld) ((void *)((u8 *)(p) + MLX5_BYTE_OFF(typ, fld))) /* insert a value to a struct */ #define MLX5_SET(typ, p, fld, v) do { \ -- cgit From 137f3d50ad2a0f2e1ebe5181d6b32a5541786b99 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Tue, 2 Apr 2024 16:30:41 +0300 Subject: net/mlx5: Support matching on l4_type for ttc_table Replace matching on TCP and UDP protocols with new l4_type field which is parsed by steering for ttc_table. It is enabled by the outer_l4_type or inner_l4_type bits in nic_rx or port_sel flow table capabilities and used only if pcc_ifa2 bit in HCA capabilities is set. Signed-off-by: Jianbo Liu Reviewed-by: Mark Bloch Signed-off-by: Tariq Toukan Link: https://lore.kernel.org/r/20240402133043.56322-10-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | 6 +- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 3 +- .../net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 8 +- .../net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c | 254 +++++++++++++++++---- .../net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h | 2 +- include/linux/mlx5/device.h | 6 + include/linux/mlx5/mlx5_ifc.h | 32 ++- 7 files changed, 244 insertions(+), 67 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 777d311d44ef..8c5b291a171f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -896,8 +896,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs, int tt; memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(fs->mdev, - MLX5_FLOW_NAMESPACE_KERNEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL; ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; @@ -920,8 +919,7 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs, int tt; memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(fs->mdev, - MLX5_FLOW_NAMESPACE_KERNEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL; ft_attr->level = MLX5E_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index f2f19fa37d91..aeb32cb27182 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -835,8 +835,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, - MLX5_FLOW_NAMESPACE_KERNEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL; for (tt = 0; tt < MLX5_NUM_TT; tt++) { ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; ttc_params->dests[tt].tir_num = diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c index 101b3bb90863..c16b462ddedf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c @@ -449,13 +449,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel, static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev, struct ttc_params *ttc_params) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_flow_table_attr *ft_attr; int tt; - ttc_params->ns = mlx5_get_flow_namespace(dev, - MLX5_FLOW_NAMESPACE_PORT_SEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL; ft_attr = &ttc_params->ft_attr; ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC; @@ -470,13 +468,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev, static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev, struct ttc_params *ttc_params) { - struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_lag_port_sel *port_sel = &ldev->port_sel; struct mlx5_flow_table_attr *ft_attr; int tt; - ttc_params->ns = mlx5_get_flow_namespace(dev, - MLX5_FLOW_NAMESPACE_PORT_SEL); + ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL; ft_attr = &ttc_params->ft_attr; ft_attr->level = MLX5_LAG_FT_LEVEL_TTC; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c index b78f2ba25c19..9f13cea16446 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -9,21 +9,24 @@ #include "mlx5_core.h" #include "lib/fs_ttc.h" -#define MLX5_TTC_NUM_GROUPS 3 -#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT) -#define MLX5_TTC_GROUP2_SIZE BIT(1) -#define MLX5_TTC_GROUP3_SIZE BIT(0) -#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\ - MLX5_TTC_GROUP2_SIZE +\ - MLX5_TTC_GROUP3_SIZE) - -#define MLX5_INNER_TTC_NUM_GROUPS 3 -#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\ - MLX5_INNER_TTC_GROUP2_SIZE +\ - MLX5_INNER_TTC_GROUP3_SIZE) +#define MLX5_TTC_MAX_NUM_GROUPS 4 +#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1) + +struct mlx5_fs_ttc_groups { + bool use_l4_type; + int num_groups; + int group_size[MLX5_TTC_MAX_NUM_GROUPS]; +}; + +static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups) +{ + int i, sz = 0; + + for (i = 0; i < groups->num_groups; i++) + sz += groups->group_size[i]; + + return sz; +} /* L3/L4 traffic type classifier */ struct mlx5_ttc_table { @@ -138,6 +141,53 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = { }; +enum TTC_GROUP_TYPE { + TTC_GROUPS_DEFAULT = 0, + TTC_GROUPS_USE_L4_TYPE = 1, +}; + +static const struct mlx5_fs_ttc_groups ttc_groups[] = { + [TTC_GROUPS_DEFAULT] = { + .num_groups = 3, + .group_size = { + BIT(3) + MLX5_NUM_TUNNEL_TT, + BIT(1), + BIT(0), + }, + }, + [TTC_GROUPS_USE_L4_TYPE] = { + .use_l4_type = true, + .num_groups = 4, + .group_size = { + MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(1), + BIT(0), + }, + }, +}; + +static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = { + [TTC_GROUPS_DEFAULT] = { + .num_groups = 3, + .group_size = { + BIT(3), + BIT(1), + BIT(0), + }, + }, + [TTC_GROUPS_USE_L4_TYPE] = { + .use_l4_type = true, + .num_groups = 4, + .group_size = { + MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE, + BIT(1), + BIT(0), + }, + }, +}; + u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt) { return ttc_tunnel_rules[tt].proto; @@ -188,9 +238,29 @@ static u8 mlx5_etype_to_ipv(u16 ethertype) return 0; } +static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v, + u8 proto, bool use_l4_type) +{ + int l4_type; + + if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) { + if (proto == IPPROTO_TCP) + l4_type = MLX5_PACKET_L4_TYPE_TCP; + else + l4_type = MLX5_PACKET_L4_TYPE_UDP; + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type); + } else { + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto); + } +} + static struct mlx5_flow_handle * mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, - struct mlx5_flow_destination *dest, u16 etype, u8 proto) + struct mlx5_flow_destination *dest, u16 etype, u8 proto, + bool use_l4_type) { int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(dev, @@ -207,8 +277,13 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, if (proto) { spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); + mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param, + spec->match_criteria, + outer_headers), + MLX5_ADDR_OF(fte_match_param, + spec->match_value, + outer_headers), + proto, use_l4_type); } ipv = mlx5_etype_to_ipv(etype); @@ -234,7 +309,8 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, struct ttc_params *params, - struct mlx5_ttc_table *ttc) + struct mlx5_ttc_table *ttc, + bool use_l4_type) { struct mlx5_flow_handle **trules; struct mlx5_ttc_rule *rules; @@ -251,7 +327,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, continue; rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt], ttc_rules[tt].etype, - ttc_rules[tt].proto); + ttc_rules[tt].proto, + use_l4_type); if (IS_ERR(rule->rule)) { err = PTR_ERR(rule->rule); rule->rule = NULL; @@ -273,7 +350,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, trules[tt] = mlx5_generate_ttc_rule(dev, ft, ¶ms->tunnel_dests[tt], ttc_tunnel_rules[tt].etype, - ttc_tunnel_rules[tt].proto); + ttc_tunnel_rules[tt].proto, + use_l4_type); if (IS_ERR(trules[tt])) { err = PTR_ERR(trules[tt]); trules[tt] = NULL; @@ -289,7 +367,8 @@ del_rules: } static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, - bool use_ipv) + bool use_ipv, + const struct mlx5_fs_ttc_groups *groups) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int ix = 0; @@ -297,7 +376,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, int err; u8 *mc; - ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL); + ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL); if (!ttc->g) return -ENOMEM; in = kvzalloc(inlen, GFP_KERNEL); @@ -307,16 +386,31 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, return -ENOMEM; } - /* L4 Group */ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); if (use_ipv) MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); else MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + + /* TCP UDP group */ + if (groups->use_l4_type) { + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += groups->group_size[ttc->num_groups]; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0); + } + + /* L4 Group */ + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_TTC_GROUP1_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -326,7 +420,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, /* L3 Group */ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_TTC_GROUP2_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -336,7 +430,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, /* Any Group */ memset(in, 0, inlen); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_TTC_GROUP3_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -358,7 +452,7 @@ static struct mlx5_flow_handle * mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct mlx5_flow_destination *dest, - u16 etype, u8 proto) + u16 etype, u8 proto, bool use_l4_type) { MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_handle *rule; @@ -379,8 +473,13 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, if (proto) { spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); + mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param, + spec->match_criteria, + inner_headers), + MLX5_ADDR_OF(fte_match_param, + spec->match_value, + inner_headers), + proto, use_l4_type); } rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); @@ -395,7 +494,8 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, struct ttc_params *params, - struct mlx5_ttc_table *ttc) + struct mlx5_ttc_table *ttc, + bool use_l4_type) { struct mlx5_ttc_rule *rules; struct mlx5_flow_table *ft; @@ -413,7 +513,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, ¶ms->dests[tt], ttc_rules[tt].etype, - ttc_rules[tt].proto); + ttc_rules[tt].proto, + use_l4_type); if (IS_ERR(rule->rule)) { err = PTR_ERR(rule->rule); rule->rule = NULL; @@ -430,7 +531,8 @@ del_rules: return err; } -static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) +static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc, + const struct mlx5_fs_ttc_groups *groups) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int ix = 0; @@ -438,8 +540,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) int err; u8 *mc; - ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g), - GFP_KERNEL); + ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL); if (!ttc->g) return -ENOMEM; in = kvzalloc(inlen, GFP_KERNEL); @@ -449,13 +550,28 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) return -ENOMEM; } - /* L4 Group */ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); + + /* TCP UDP group */ + if (groups->use_l4_type) { + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += groups->group_size[ttc->num_groups]; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0); + } + + /* L4 Group */ + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_INNER_TTC_GROUP1_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -465,7 +581,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) /* L3 Group */ MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_INNER_TTC_GROUP2_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -475,7 +591,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) /* Any Group */ memset(in, 0, inlen); MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5_INNER_TTC_GROUP3_SIZE; + ix += groups->group_size[ttc->num_groups]; MLX5_SET_CFG(in, end_flow_index, ix - 1); ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); if (IS_ERR(ttc->g[ttc->num_groups])) @@ -496,27 +612,47 @@ err: struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, struct ttc_params *params) { + const struct mlx5_fs_ttc_groups *groups; + struct mlx5_flow_namespace *ns; struct mlx5_ttc_table *ttc; + bool use_l4_type; int err; ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); if (!ttc) return ERR_PTR(-ENOMEM); + switch (params->ns_type) { + case MLX5_FLOW_NAMESPACE_PORT_SEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type); + break; + case MLX5_FLOW_NAMESPACE_KERNEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type); + break; + default: + return ERR_PTR(-EINVAL); + } + + ns = mlx5_get_flow_namespace(dev, params->ns_type); + groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] : + &inner_ttc_groups[TTC_GROUPS_DEFAULT]; + WARN_ON_ONCE(params->ft_attr.max_fte); - params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE; - ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups); + ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr); if (IS_ERR(ttc->t)) { err = PTR_ERR(ttc->t); kvfree(ttc); return ERR_PTR(err); } - err = mlx5_create_inner_ttc_table_groups(ttc); + err = mlx5_create_inner_ttc_table_groups(ttc, groups); if (err) goto destroy_ft; - err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc); + err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type); if (err) goto destroy_ft; @@ -549,27 +685,47 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_field_support.outer_ip_version); + const struct mlx5_fs_ttc_groups *groups; + struct mlx5_flow_namespace *ns; struct mlx5_ttc_table *ttc; + bool use_l4_type; int err; ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); if (!ttc) return ERR_PTR(-ENOMEM); + switch (params->ns_type) { + case MLX5_FLOW_NAMESPACE_PORT_SEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type); + break; + case MLX5_FLOW_NAMESPACE_KERNEL: + use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) && + MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type); + break; + default: + return ERR_PTR(-EINVAL); + } + + ns = mlx5_get_flow_namespace(dev, params->ns_type); + groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] : + &ttc_groups[TTC_GROUPS_DEFAULT]; + WARN_ON_ONCE(params->ft_attr.max_fte); - params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE; - ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups); + ttc->t = mlx5_create_flow_table(ns, ¶ms->ft_attr); if (IS_ERR(ttc->t)) { err = PTR_ERR(ttc->t); kvfree(ttc); return ERR_PTR(err); } - err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer); + err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups); if (err) goto destroy_ft; - err = mlx5_generate_ttc_table_rules(dev, params, ttc); + err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type); if (err) goto destroy_ft; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h index 85fef0cd1c07..92eea6bea310 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h @@ -40,7 +40,7 @@ struct mlx5_ttc_rule { struct mlx5_ttc_table; struct ttc_params { - struct mlx5_flow_namespace *ns; + enum mlx5_flow_namespace_type ns_type; struct mlx5_flow_table_attr ft_attr; struct mlx5_flow_destination dests[MLX5_NUM_TT]; DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index da61be87a12e..d7bb31d9a446 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1336,6 +1336,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap) +#define MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(mdev, cap) \ + MLX5_CAP_FLOWTABLE(mdev, ft_field_support_2_nic_receive.cap) + #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) @@ -1359,6 +1362,9 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap) +#define MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(mdev, cap) \ + MLX5_CAP_PORT_SELECTION(mdev, ft_field_support_2_port_selection.cap) + #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index c940b329a475..40f6fa138e27 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -416,7 +416,10 @@ struct mlx5_ifc_flow_table_fields_supported_bits { /* Table 2170 - Flow Table Fields Supported 2 Format */ struct mlx5_ifc_flow_table_fields_supported_2_bits { - u8 reserved_at_0[0xe]; + u8 reserved_at_0[0x2]; + u8 inner_l4_type[0x1]; + u8 outer_l4_type[0x1]; + u8 reserved_at_4[0xa]; u8 bth_opcode[0x1]; u8 reserved_at_f[0x1]; u8 tunnel_header_0_1[0x1]; @@ -525,6 +528,12 @@ union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { u8 reserved_at_0[0x80]; }; +enum { + MLX5_PACKET_L4_TYPE_NONE, + MLX5_PACKET_L4_TYPE_TCP, + MLX5_PACKET_L4_TYPE_UDP, +}; + struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 smac_47_16[0x20]; @@ -550,7 +559,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits { u8 tcp_sport[0x10]; u8 tcp_dport[0x10]; - u8 reserved_at_c0[0x10]; + u8 l4_type[0x2]; + u8 reserved_at_c2[0xe]; u8 ipv4_ihl[0x4]; u8 reserved_at_c4[0x4]; @@ -846,7 +856,11 @@ struct mlx5_ifc_flow_table_nic_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer; - u8 reserved_at_e00[0x700]; + u8 reserved_at_e00[0x600]; + + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive; + + u8 reserved_at_1480[0x80]; struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_nic_receive_rdma; @@ -876,7 +890,9 @@ struct mlx5_ifc_port_selection_cap_bits { struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection; - u8 reserved_at_400[0x7c00]; + struct mlx5_ifc_flow_table_fields_supported_2_bits ft_field_support_2_port_selection; + + u8 reserved_at_480[0x7b80]; }; enum { @@ -2004,7 +2020,13 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_3a0[0x10]; u8 max_rqt_vhca_id[0x10]; - u8 reserved_at_3c0[0x440]; + u8 reserved_at_3c0[0x20]; + + u8 reserved_at_3e0[0x10]; + u8 pcc_ifa2[0x1]; + u8 reserved_at_3f1[0xf]; + + u8 reserved_at_400[0x400]; }; enum mlx5_ifc_flow_destination_type { -- cgit From c788d79cfa6b68b1068b25e9cdff25d6035ed191 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Tue, 2 Apr 2024 16:30:42 +0300 Subject: net/mlx5: Skip pages EQ creation for non-page supplier function Page events are not issued by device on the function if page_request_disable is set, so no need to create pages EQ. Signed-off-by: Jianbo Liu Reviewed-by: Parav Pandit Reviewed-by: Moshe Shemesh Signed-off-by: Tariq Toukan Link: https://lore.kernel.org/r/20240402133043.56322-11-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 9 ++++++++- include/linux/mlx5/mlx5_ifc.h | 4 +++- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 40a6cb052a2d..5693986ae656 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -688,6 +688,12 @@ static int create_async_eqs(struct mlx5_core_dev *dev) if (err) goto err2; + /* Skip page eq creation when the device does not request for page requests */ + if (MLX5_CAP_GEN(dev, page_request_disable)) { + mlx5_core_dbg(dev, "Skip page EQ creation\n"); + return 0; + } + param = (struct mlx5_eq_param) { .irq = table->ctrl_irq, .nent = /* TODO: sriov max_vf + */ 1, @@ -716,7 +722,8 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *table = dev->priv.eq_table; - cleanup_async_eq(dev, &table->pages_eq, "pages"); + if (!MLX5_CAP_GEN(dev, page_request_disable)) + cleanup_async_eq(dev, &table->pages_eq, "pages"); cleanup_async_eq(dev, &table->async_eq, "async"); mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ); mlx5_cmd_use_polling(dev); diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 40f6fa138e27..cc159d8563d1 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1485,7 +1485,9 @@ enum { }; struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x10]; + u8 reserved_at_0[0x6]; + u8 page_request_disable[0x1]; + u8 reserved_at_7[0x9]; u8 shared_object_to_user_object_allowed[0x1]; u8 reserved_at_13[0xe]; u8 vhca_resource_manager[0x1]; -- cgit From d2f277e26f521ccf6fb438463b41dba6123caabe Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:45 +0200 Subject: fsnotify: rename fsnotify_{get,put}_sb_connectors() Instead of counting the number of connectors in an sb, we would like to count the number of watched objects per priority group. As a start, create an accessor fsnotify_sb_watched_objects() to s_fsnotify_connectors and rename the fsnotify_{get,put}_sb_connectors() helpers to fsnotify_{get,put}_sb_watchers() to better describes the counter. Increment the counter at the end of fsnotify_attach_connector_to_object() if connector was attached instead of decrementing it on race to connect. This is fine, because fsnotify_delete_sb() cannot be running in parallel to fsnotify_attach_connector_to_object() which requires a reference to a filesystem object. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-2-amir73il@gmail.com> --- fs/notify/fsnotify.c | 4 +-- fs/notify/mark.c | 67 ++++++++++++++++++++++------------------ include/linux/fsnotify.h | 2 +- include/linux/fsnotify_backend.h | 5 +++ 4 files changed, 45 insertions(+), 33 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 2fc105a72a8f..503e7c75e777 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -92,8 +92,8 @@ void fsnotify_sb_delete(struct super_block *sb) fsnotify_unmount_inodes(sb); fsnotify_clear_marks_by_sb(sb); /* Wait for outstanding object references from connectors */ - wait_var_event(&sb->s_fsnotify_connectors, - !atomic_long_read(&sb->s_fsnotify_connectors)); + wait_var_event(fsnotify_sb_watched_objects(sb), + !atomic_long_read(fsnotify_sb_watched_objects(sb))); } /* diff --git a/fs/notify/mark.c b/fs/notify/mark.c index d6944ff86ffa..3bcce2671bd5 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -116,10 +116,43 @@ __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn) return *fsnotify_conn_mask_p(conn); } +static void fsnotify_get_sb_watched_objects(struct super_block *sb) +{ + atomic_long_inc(fsnotify_sb_watched_objects(sb)); +} + +static void fsnotify_put_sb_watched_objects(struct super_block *sb) +{ + if (atomic_long_dec_and_test(fsnotify_sb_watched_objects(sb))) + wake_up_var(fsnotify_sb_watched_objects(sb)); +} + static void fsnotify_get_inode_ref(struct inode *inode) { ihold(inode); - atomic_long_inc(&inode->i_sb->s_fsnotify_connectors); + fsnotify_get_sb_watched_objects(inode->i_sb); +} + +static void fsnotify_put_inode_ref(struct inode *inode) +{ + fsnotify_put_sb_watched_objects(inode->i_sb); + iput(inode); +} + +static void fsnotify_get_sb_watchers(struct fsnotify_mark_connector *conn) +{ + struct super_block *sb = fsnotify_connector_sb(conn); + + if (sb) + fsnotify_get_sb_watched_objects(sb); +} + +static void fsnotify_put_sb_watchers(struct fsnotify_mark_connector *conn) +{ + struct super_block *sb = fsnotify_connector_sb(conn); + + if (sb) + fsnotify_put_sb_watched_objects(sb); } /* @@ -213,31 +246,6 @@ static void fsnotify_connector_destroy_workfn(struct work_struct *work) } } -static void fsnotify_put_inode_ref(struct inode *inode) -{ - struct super_block *sb = inode->i_sb; - - iput(inode); - if (atomic_long_dec_and_test(&sb->s_fsnotify_connectors)) - wake_up_var(&sb->s_fsnotify_connectors); -} - -static void fsnotify_get_sb_connectors(struct fsnotify_mark_connector *conn) -{ - struct super_block *sb = fsnotify_connector_sb(conn); - - if (sb) - atomic_long_inc(&sb->s_fsnotify_connectors); -} - -static void fsnotify_put_sb_connectors(struct fsnotify_mark_connector *conn) -{ - struct super_block *sb = fsnotify_connector_sb(conn); - - if (sb && atomic_long_dec_and_test(&sb->s_fsnotify_connectors)) - wake_up_var(&sb->s_fsnotify_connectors); -} - static void *fsnotify_detach_connector_from_object( struct fsnotify_mark_connector *conn, unsigned int *type) @@ -261,7 +269,7 @@ static void *fsnotify_detach_connector_from_object( fsnotify_conn_sb(conn)->s_fsnotify_mask = 0; } - fsnotify_put_sb_connectors(conn); + fsnotify_put_sb_watchers(conn); rcu_assign_pointer(*(conn->obj), NULL); conn->obj = NULL; conn->type = FSNOTIFY_OBJ_TYPE_DETACHED; @@ -549,8 +557,6 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, conn->flags = 0; conn->type = obj_type; conn->obj = connp; - conn->flags = 0; - fsnotify_get_sb_connectors(conn); /* * cmpxchg() provides the barrier so that readers of *connp can see @@ -558,10 +564,11 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, */ if (cmpxchg(connp, NULL, conn)) { /* Someone else created list structure for us */ - fsnotify_put_sb_connectors(conn); kmem_cache_free(fsnotify_mark_connector_cachep, conn); + return 0; } + fsnotify_get_sb_watchers(conn); return 0; } diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 1a9de119a0f7..e470bb67c9a3 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -20,7 +20,7 @@ /* Are there any inode/mount/sb objects that are being watched at all? */ static inline bool fsnotify_sb_has_watchers(struct super_block *sb) { - return atomic_long_read(&sb->s_fsnotify_connectors); + return atomic_long_read(fsnotify_sb_watched_objects(sb)); } /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 8f40c349b228..d4e3bc55d174 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -483,6 +483,11 @@ struct fsnotify_mark_connector { struct hlist_head list; }; +static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb) +{ + return &sb->s_fsnotify_connectors; +} + /* * A mark is simply an object attached to an in core inode which allows an * fsnotify listener to indicate they are either no longer interested in events -- cgit From 230d97d39ee2eb9030309f04f98615aaeb420dac Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:47 +0200 Subject: fsnotify: create a wrapper fsnotify_find_inode_mark() In preparation to passing an object pointer to fsnotify_find_mark(), add a wrapper fsnotify_find_inode_mark() and use it where possible. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-4-amir73il@gmail.com> --- fs/nfsd/filecache.c | 4 ++-- fs/notify/dnotify/dnotify.c | 4 ++-- fs/notify/inotify/inotify_user.c | 2 +- include/linux/fsnotify_backend.h | 7 +++++++ kernel/audit_tree.c | 2 +- kernel/audit_watch.c | 2 +- 6 files changed, 14 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index ddd3e0d9cfa6..ad9083ca144b 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -159,8 +159,8 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf, struct inode *inode) do { fsnotify_group_lock(nfsd_file_fsnotify_group); - mark = fsnotify_find_mark(&inode->i_fsnotify_marks, - nfsd_file_fsnotify_group); + mark = fsnotify_find_inode_mark(inode, + nfsd_file_fsnotify_group); if (mark) { nfm = nfsd_file_mark_get(container_of(mark, struct nfsd_file_mark, diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 3464fa7e8538..f3669403fabf 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -162,7 +162,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id) if (!S_ISDIR(inode->i_mode)) return; - fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); + fsn_mark = fsnotify_find_inode_mark(inode, dnotify_group); if (!fsn_mark) return; dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); @@ -326,7 +326,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned int arg) fsnotify_group_lock(dnotify_group); /* add the new_fsn_mark or find an old one. */ - fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, dnotify_group); + fsn_mark = fsnotify_find_inode_mark(inode, dnotify_group); if (fsn_mark) { dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); spin_lock(&fsn_mark->lock); diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 85d8fdd55329..4ffc30606e0b 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -544,7 +544,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, int create = (arg & IN_MASK_CREATE); int ret; - fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); + fsn_mark = fsnotify_find_inode_mark(inode, group); if (!fsn_mark) return -ENOENT; else if (create) { diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index d4e3bc55d174..992b57a7e95f 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -789,6 +789,13 @@ static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, FSNOTIFY_OBJ_TYPE_INODE, add_flags); } +static inline struct fsnotify_mark *fsnotify_find_inode_mark( + struct inode *inode, + struct fsnotify_group *group) +{ + return fsnotify_find_mark(&inode->i_fsnotify_marks, group); +} + /* given a group and a mark, flag mark to be freed when all references are dropped */ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 1b07e6f12a07..f2f38903b2fe 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -463,7 +463,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) int n; fsnotify_group_lock(audit_tree_group); - mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group); + mark = fsnotify_find_inode_mark(inode, audit_tree_group); if (!mark) return create_chunk(inode, tree); diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c index 7a98cd176a12..7f358740e958 100644 --- a/kernel/audit_watch.c +++ b/kernel/audit_watch.c @@ -90,7 +90,7 @@ static inline struct audit_parent *audit_find_parent(struct inode *inode) struct audit_parent *parent = NULL; struct fsnotify_mark *entry; - entry = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_watch_group); + entry = fsnotify_find_inode_mark(inode, audit_watch_group); if (entry) parent = container_of(entry, struct audit_parent, mark); -- cgit From 687c217c6aa2c24e6ddf98cc7f86a5b986e5918d Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:49 +0200 Subject: fsnotify: pass object pointer and type to fsnotify mark helpers Instead of passing fsnotify_connp_t, pass the pointer to the marked object. Store the object pointer in the connector and move the definition of fsnotify_connp_t to internal fsnotify subsystem API, so it is no longer used by fsnotify backends. Suggested-by: Jan Kara Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-6-amir73il@gmail.com> --- fs/notify/fanotify/fanotify_user.c | 95 ++++++++------------------------------ fs/notify/fsnotify.h | 23 ++++----- fs/notify/mark.c | 28 ++++++----- include/linux/fsnotify_backend.h | 33 +++++-------- 4 files changed, 59 insertions(+), 120 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 04a0b31f325d..925015ee7fbd 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1076,7 +1076,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, } static int fanotify_remove_mark(struct fsnotify_group *group, - fsnotify_connp_t *connp, __u32 mask, + void *obj, unsigned int obj_type, __u32 mask, unsigned int flags, __u32 umask) { struct fsnotify_mark *fsn_mark = NULL; @@ -1084,7 +1084,7 @@ static int fanotify_remove_mark(struct fsnotify_group *group, int destroy_mark; fsnotify_group_lock(group); - fsn_mark = fsnotify_find_mark(connp, group); + fsn_mark = fsnotify_find_mark(obj, obj_type, group); if (!fsn_mark) { fsnotify_group_unlock(group); return -ENOENT; @@ -1105,30 +1105,6 @@ static int fanotify_remove_mark(struct fsnotify_group *group, return 0; } -static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, - struct vfsmount *mnt, __u32 mask, - unsigned int flags, __u32 umask) -{ - return fanotify_remove_mark(group, &real_mount(mnt)->mnt_fsnotify_marks, - mask, flags, umask); -} - -static int fanotify_remove_sb_mark(struct fsnotify_group *group, - struct super_block *sb, __u32 mask, - unsigned int flags, __u32 umask) -{ - return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, - flags, umask); -} - -static int fanotify_remove_inode_mark(struct fsnotify_group *group, - struct inode *inode, __u32 mask, - unsigned int flags, __u32 umask) -{ - return fanotify_remove_mark(group, &inode->i_fsnotify_marks, mask, - flags, umask); -} - static bool fanotify_mark_update_flags(struct fsnotify_mark *fsn_mark, unsigned int fan_flags) { @@ -1249,7 +1225,7 @@ static int fanotify_set_mark_fsid(struct fsnotify_group *group, } static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, - fsnotify_connp_t *connp, + void *obj, unsigned int obj_type, unsigned int fan_flags, struct fan_fsid *fsid) @@ -1288,7 +1264,7 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, fan_mark->fsid.val[0] = fan_mark->fsid.val[1] = 0; } - ret = fsnotify_add_mark_locked(mark, connp, obj_type, 0); + ret = fsnotify_add_mark_locked(mark, obj, obj_type, 0); if (ret) goto out_put_mark; @@ -1344,7 +1320,7 @@ static int fanotify_may_update_existing_mark(struct fsnotify_mark *fsn_mark, } static int fanotify_add_mark(struct fsnotify_group *group, - fsnotify_connp_t *connp, unsigned int obj_type, + void *obj, unsigned int obj_type, __u32 mask, unsigned int fan_flags, struct fan_fsid *fsid) { @@ -1353,9 +1329,9 @@ static int fanotify_add_mark(struct fsnotify_group *group, int ret = 0; fsnotify_group_lock(group); - fsn_mark = fsnotify_find_mark(connp, group); + fsn_mark = fsnotify_find_mark(obj, obj_type, group); if (!fsn_mark) { - fsn_mark = fanotify_add_new_mark(group, connp, obj_type, + fsn_mark = fanotify_add_new_mark(group, obj, obj_type, fan_flags, fsid); if (IS_ERR(fsn_mark)) { fsnotify_group_unlock(group); @@ -1392,30 +1368,6 @@ out: return ret; } -static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, - struct vfsmount *mnt, __u32 mask, - unsigned int flags, struct fan_fsid *fsid) -{ - return fanotify_add_mark(group, &real_mount(mnt)->mnt_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_VFSMOUNT, mask, flags, fsid); -} - -static int fanotify_add_sb_mark(struct fsnotify_group *group, - struct super_block *sb, __u32 mask, - unsigned int flags, struct fan_fsid *fsid) -{ - return fanotify_add_mark(group, &sb->s_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_SB, mask, flags, fsid); -} - -static int fanotify_add_inode_mark(struct fsnotify_group *group, - struct inode *inode, __u32 mask, - unsigned int flags, struct fan_fsid *fsid) -{ - return fanotify_add_mark(group, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, mask, flags, fsid); -} - static struct fsnotify_event *fanotify_alloc_overflow_event(void) { struct fanotify_event *oevent; @@ -1738,6 +1690,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, unsigned int mark_cmd = flags & FANOTIFY_MARK_CMD_BITS; unsigned int ignore = flags & FANOTIFY_MARK_IGNORE_BITS; unsigned int obj_type, fid_mode; + void *obj; u32 umask = 0; int ret; @@ -1896,10 +1849,16 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, } /* inode held in place by reference to path; group by fget on fd */ - if (mark_type == FAN_MARK_INODE) + if (mark_type == FAN_MARK_INODE) { inode = path.dentry->d_inode; - else + obj = inode; + } else { mnt = path.mnt; + if (mark_type == FAN_MARK_MOUNT) + obj = mnt; + else + obj = mnt->mnt_sb; + } /* * If some other task has this inode open for write we should not add @@ -1935,26 +1894,12 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, /* create/update an inode mark */ switch (mark_cmd) { case FAN_MARK_ADD: - if (mark_type == FAN_MARK_MOUNT) - ret = fanotify_add_vfsmount_mark(group, mnt, mask, - flags, fsid); - else if (mark_type == FAN_MARK_FILESYSTEM) - ret = fanotify_add_sb_mark(group, mnt->mnt_sb, mask, - flags, fsid); - else - ret = fanotify_add_inode_mark(group, inode, mask, - flags, fsid); + ret = fanotify_add_mark(group, obj, obj_type, mask, flags, + fsid); break; case FAN_MARK_REMOVE: - if (mark_type == FAN_MARK_MOUNT) - ret = fanotify_remove_vfsmount_mark(group, mnt, mask, - flags, umask); - else if (mark_type == FAN_MARK_FILESYSTEM) - ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask, - flags, umask); - else - ret = fanotify_remove_inode_mark(group, inode, mask, - flags, umask); + ret = fanotify_remove_mark(group, obj, obj_type, mask, flags, + umask); break; default: ret = -EINVAL; diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h index 6d9951aac449..b1fec7cd8ee4 100644 --- a/fs/notify/fsnotify.h +++ b/fs/notify/fsnotify.h @@ -9,22 +9,28 @@ #include "../mount.h" +/* + * fsnotify_connp_t is what we embed in objects which connector can be attached + * to. + */ +typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; + static inline struct inode *fsnotify_conn_inode( struct fsnotify_mark_connector *conn) { - return container_of(conn->obj, struct inode, i_fsnotify_marks); + return conn->obj; } static inline struct mount *fsnotify_conn_mount( struct fsnotify_mark_connector *conn) { - return container_of(conn->obj, struct mount, mnt_fsnotify_marks); + return real_mount(conn->obj); } static inline struct super_block *fsnotify_conn_sb( struct fsnotify_mark_connector *conn) { - return container_of(conn->obj, struct super_block, s_fsnotify_marks); + return conn->obj; } static inline struct super_block *fsnotify_object_sb(void *obj, @@ -45,16 +51,7 @@ static inline struct super_block *fsnotify_object_sb(void *obj, static inline struct super_block *fsnotify_connector_sb( struct fsnotify_mark_connector *conn) { - switch (conn->type) { - case FSNOTIFY_OBJ_TYPE_INODE: - return fsnotify_conn_inode(conn)->i_sb; - case FSNOTIFY_OBJ_TYPE_VFSMOUNT: - return fsnotify_conn_mount(conn)->mnt.mnt_sb; - case FSNOTIFY_OBJ_TYPE_SB: - return fsnotify_conn_sb(conn); - default: - return NULL; - } + return fsnotify_object_sb(conn->obj, conn->type); } /* destroy all events sitting in this groups notification queue */ diff --git a/fs/notify/mark.c b/fs/notify/mark.c index d9977af25832..1f611e4eab53 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -265,6 +265,7 @@ static void *fsnotify_detach_connector_from_object( struct fsnotify_mark_connector *conn, unsigned int *type) { + fsnotify_connp_t *connp = fsnotify_object_connp(conn->obj, conn->type); struct inode *inode = NULL; *type = conn->type; @@ -285,7 +286,7 @@ static void *fsnotify_detach_connector_from_object( } fsnotify_put_sb_watchers(conn); - rcu_assign_pointer(*(conn->obj), NULL); + rcu_assign_pointer(*connp, NULL); conn->obj = NULL; conn->type = FSNOTIFY_OBJ_TYPE_DETACHED; @@ -560,7 +561,7 @@ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b) } static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, - unsigned int obj_type) + void *obj, unsigned int obj_type) { struct fsnotify_mark_connector *conn; @@ -571,7 +572,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, INIT_HLIST_HEAD(&conn->list); conn->flags = 0; conn->type = obj_type; - conn->obj = connp; + conn->obj = obj; /* * cmpxchg() provides the barrier so that readers of *connp can see @@ -620,24 +621,25 @@ out: * to which group and for which inodes. These marks are ordered according to * priority, highest number first, and then by the group's location in memory. */ -static int fsnotify_add_mark_list(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, +static int fsnotify_add_mark_list(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags) { struct fsnotify_mark *lmark, *last = NULL; struct fsnotify_mark_connector *conn; + fsnotify_connp_t *connp; int cmp; int err = 0; if (WARN_ON(!fsnotify_valid_obj_type(obj_type))) return -EINVAL; + connp = fsnotify_object_connp(obj, obj_type); restart: spin_lock(&mark->lock); conn = fsnotify_grab_connector(connp); if (!conn) { spin_unlock(&mark->lock); - err = fsnotify_attach_connector_to_object(connp, obj_type); + err = fsnotify_attach_connector_to_object(connp, obj, obj_type); if (err) return err; goto restart; @@ -689,7 +691,7 @@ out_err: * event types should be delivered to which group. */ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, unsigned int obj_type, + void *obj, unsigned int obj_type, int add_flags) { struct fsnotify_group *group = mark->group; @@ -710,7 +712,7 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark, fsnotify_get_mark(mark); /* for g_list */ spin_unlock(&mark->lock); - ret = fsnotify_add_mark_list(mark, connp, obj_type, add_flags); + ret = fsnotify_add_mark_list(mark, obj, obj_type, add_flags); if (ret) goto err; @@ -728,14 +730,14 @@ err: return ret; } -int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp, +int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags) { int ret; struct fsnotify_group *group = mark->group; fsnotify_group_lock(group); - ret = fsnotify_add_mark_locked(mark, connp, obj_type, add_flags); + ret = fsnotify_add_mark_locked(mark, obj, obj_type, add_flags); fsnotify_group_unlock(group); return ret; } @@ -745,12 +747,16 @@ EXPORT_SYMBOL_GPL(fsnotify_add_mark); * Given a list of marks, find the mark associated with given group. If found * take a reference to that mark and return it, else return NULL. */ -struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, +struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type, struct fsnotify_group *group) { + fsnotify_connp_t *connp = fsnotify_object_connp(obj, obj_type); struct fsnotify_mark_connector *conn; struct fsnotify_mark *mark; + if (!connp) + return NULL; + conn = fsnotify_grab_connector(connp); if (!conn) return NULL; diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 992b57a7e95f..face68fcf850 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -456,13 +456,6 @@ FSNOTIFY_ITER_FUNCS(sb, SB) type < FSNOTIFY_ITER_TYPE_COUNT; \ type++) -/* - * fsnotify_connp_t is what we embed in objects which connector can be attached - * to. fsnotify_connp_t * is how we refer from connector back to object. - */ -struct fsnotify_mark_connector; -typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; - /* * Inode/vfsmount/sb point to this structure which tracks all marks attached to * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this @@ -476,7 +469,7 @@ struct fsnotify_mark_connector { unsigned short flags; /* flags [lock] */ union { /* Object pointer [lock] */ - fsnotify_connp_t *obj; + void *obj; /* Used listing heads to free after srcu period expires */ struct fsnotify_mark_connector *destroy_next; }; @@ -763,37 +756,35 @@ extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn); extern void fsnotify_init_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); /* Find mark belonging to given group in the list of marks */ -extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, - struct fsnotify_group *group); +struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type, + struct fsnotify_group *group); /* attach the mark to the object */ -extern int fsnotify_add_mark(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, unsigned int obj_type, - int add_flags); -extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, - fsnotify_connp_t *connp, - unsigned int obj_type, int add_flags); +int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj, + unsigned int obj_type, int add_flags); +int fsnotify_add_mark_locked(struct fsnotify_mark *mark, void *obj, + unsigned int obj_type, int add_flags); /* attach the mark to the inode */ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, struct inode *inode, int add_flags) { - return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, add_flags); + return fsnotify_add_mark(mark, inode, FSNOTIFY_OBJ_TYPE_INODE, + add_flags); } static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, struct inode *inode, int add_flags) { - return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, - FSNOTIFY_OBJ_TYPE_INODE, add_flags); + return fsnotify_add_mark_locked(mark, inode, FSNOTIFY_OBJ_TYPE_INODE, + add_flags); } static inline struct fsnotify_mark *fsnotify_find_inode_mark( struct inode *inode, struct fsnotify_group *group) { - return fsnotify_find_mark(&inode->i_fsnotify_marks, group); + return fsnotify_find_mark(inode, FSNOTIFY_OBJ_TYPE_INODE, group); } /* given a group and a mark, flag mark to be freed when all references are dropped */ -- cgit From c9d4603b054f9a63c07c7012040af4c80adb2c60 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:50 +0200 Subject: fsnotify: create helper fsnotify_update_sb_watchers() We would like to count watched objects by priority group, so we will need to update the watched object counter after adding/removing marks. Create a helper fsnotify_update_sb_watchers() and call it after attaching/detaching a mark, instead of fsnotify_{get,put}_sb_watchers() only after attaching/detaching a connector. Soon, we will use this helper to count watched objects by the highest watching priority group. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-7-amir73il@gmail.com> --- fs/notify/mark.c | 36 ++++++++++++++++++++++-------------- include/linux/fsnotify_backend.h | 1 + 2 files changed, 23 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 1f611e4eab53..647c49c20467 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -154,20 +154,23 @@ static void fsnotify_put_inode_ref(struct inode *inode) iput(inode); } -static void fsnotify_get_sb_watchers(struct fsnotify_mark_connector *conn) +/* + * Grab or drop watched objects reference depending on whether the connector + * is attached and has any marks attached. + */ +static void fsnotify_update_sb_watchers(struct super_block *sb, + struct fsnotify_mark_connector *conn) { - struct super_block *sb = fsnotify_connector_sb(conn); + bool is_watched = conn->flags & FSNOTIFY_CONN_FLAG_IS_WATCHED; + bool has_marks = conn->obj && !hlist_empty(&conn->list); - if (sb) + if (has_marks && !is_watched) { + conn->flags |= FSNOTIFY_CONN_FLAG_IS_WATCHED; fsnotify_get_sb_watched_objects(sb); -} - -static void fsnotify_put_sb_watchers(struct fsnotify_mark_connector *conn) -{ - struct super_block *sb = fsnotify_connector_sb(conn); - - if (sb) + } else if (!has_marks && is_watched) { + conn->flags &= ~FSNOTIFY_CONN_FLAG_IS_WATCHED; fsnotify_put_sb_watched_objects(sb); + } } /* @@ -266,6 +269,7 @@ static void *fsnotify_detach_connector_from_object( unsigned int *type) { fsnotify_connp_t *connp = fsnotify_object_connp(conn->obj, conn->type); + struct super_block *sb = fsnotify_connector_sb(conn); struct inode *inode = NULL; *type = conn->type; @@ -285,10 +289,10 @@ static void *fsnotify_detach_connector_from_object( fsnotify_conn_sb(conn)->s_fsnotify_mask = 0; } - fsnotify_put_sb_watchers(conn); rcu_assign_pointer(*connp, NULL); conn->obj = NULL; conn->type = FSNOTIFY_OBJ_TYPE_DETACHED; + fsnotify_update_sb_watchers(sb, conn); return inode; } @@ -340,6 +344,11 @@ void fsnotify_put_mark(struct fsnotify_mark *mark) objp = fsnotify_detach_connector_from_object(conn, &type); free_conn = true; } else { + struct super_block *sb = fsnotify_connector_sb(conn); + + /* Update watched objects after detaching mark */ + if (sb) + fsnotify_update_sb_watchers(sb, conn); objp = __fsnotify_recalc_mask(conn); type = conn->type; } @@ -581,10 +590,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, if (cmpxchg(connp, NULL, conn)) { /* Someone else created list structure for us */ kmem_cache_free(fsnotify_mark_connector_cachep, conn); - return 0; } - - fsnotify_get_sb_watchers(conn); return 0; } @@ -624,6 +630,7 @@ out: static int fsnotify_add_mark_list(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags) { + struct super_block *sb = fsnotify_object_sb(obj, obj_type); struct fsnotify_mark *lmark, *last = NULL; struct fsnotify_mark_connector *conn; fsnotify_connp_t *connp; @@ -673,6 +680,7 @@ restart: /* mark should be the last entry. last is the current last entry */ hlist_add_behind_rcu(&mark->obj_list, &last->obj_list); added: + fsnotify_update_sb_watchers(sb, conn); /* * Since connector is attached to object using cmpxchg() we are * guaranteed that connector initialization is fully visible by anyone diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index face68fcf850..83004d9e07a3 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -465,6 +465,7 @@ FSNOTIFY_ITER_FUNCS(sb, SB) struct fsnotify_mark_connector { spinlock_t lock; unsigned short type; /* Type of object [lock] */ +#define FSNOTIFY_CONN_FLAG_IS_WATCHED 0x01 #define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02 unsigned short flags; /* flags [lock] */ union { -- cgit From 07a3b8d0bf726a1e49b050bbc6bd72f031e505fe Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:51 +0200 Subject: fsnotify: lazy attach fsnotify_sb_info state to sb Define a container struct fsnotify_sb_info to hold per-sb state, including the reference to sb marks connector. Allocate the fsnotify_sb_info state before attaching connector to any object on the sb and free it only when killing sb. This state is going to be used for storing per priority watched objects counters. Suggested-by: Jan Kara Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-8-amir73il@gmail.com> --- fs/notify/fsnotify.c | 16 +++++++++++++--- fs/notify/fsnotify.h | 9 ++++++++- fs/notify/mark.c | 32 +++++++++++++++++++++++++++++++- include/linux/fs.h | 8 ++++---- include/linux/fsnotify_backend.h | 17 +++++++++++++++++ 5 files changed, 73 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 503e7c75e777..fb3f36bc6ea9 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -89,11 +89,18 @@ static void fsnotify_unmount_inodes(struct super_block *sb) void fsnotify_sb_delete(struct super_block *sb) { + struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); + + /* Were any marks ever added to any object on this sb? */ + if (!sbinfo) + return; + fsnotify_unmount_inodes(sb); fsnotify_clear_marks_by_sb(sb); /* Wait for outstanding object references from connectors */ wait_var_event(fsnotify_sb_watched_objects(sb), !atomic_long_read(fsnotify_sb_watched_objects(sb))); + kfree(sbinfo); } /* @@ -489,6 +496,7 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, { const struct path *path = fsnotify_data_path(data, data_type); struct super_block *sb = fsnotify_data_sb(data, data_type); + struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); struct fsnotify_iter_info iter_info = {}; struct mount *mnt = NULL; struct inode *inode2 = NULL; @@ -525,7 +533,7 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ - if (!sb->s_fsnotify_marks && + if ((!sbinfo || !sbinfo->sb_marks) && (!mnt || !mnt->mnt_fsnotify_marks) && (!inode || !inode->i_fsnotify_marks) && (!inode2 || !inode2->i_fsnotify_marks)) @@ -552,8 +560,10 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); - iter_info.marks[FSNOTIFY_ITER_TYPE_SB] = - fsnotify_first_mark(&sb->s_fsnotify_marks); + if (sbinfo) { + iter_info.marks[FSNOTIFY_ITER_TYPE_SB] = + fsnotify_first_mark(&sbinfo->sb_marks); + } if (mnt) { iter_info.marks[FSNOTIFY_ITER_TYPE_VFSMOUNT] = fsnotify_first_mark(&mnt->mnt_fsnotify_marks); diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h index b1fec7cd8ee4..2d059f789ee3 100644 --- a/fs/notify/fsnotify.h +++ b/fs/notify/fsnotify.h @@ -54,6 +54,13 @@ static inline struct super_block *fsnotify_connector_sb( return fsnotify_object_sb(conn->obj, conn->type); } +static inline fsnotify_connp_t *fsnotify_sb_marks(struct super_block *sb) +{ + struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); + + return sbinfo ? &sbinfo->sb_marks : NULL; +} + /* destroy all events sitting in this groups notification queue */ extern void fsnotify_flush_notify(struct fsnotify_group *group); @@ -79,7 +86,7 @@ static inline void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) /* run the list of all marks associated with sb and destroy them */ static inline void fsnotify_clear_marks_by_sb(struct super_block *sb) { - fsnotify_destroy_marks(&sb->s_fsnotify_marks); + fsnotify_destroy_marks(fsnotify_sb_marks(sb)); } /* diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 647c49c20467..b2f5d8c9cce1 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -106,7 +106,7 @@ static fsnotify_connp_t *fsnotify_object_connp(void *obj, case FSNOTIFY_OBJ_TYPE_VFSMOUNT: return &real_mount(obj)->mnt_fsnotify_marks; case FSNOTIFY_OBJ_TYPE_SB: - return &((struct super_block *)obj)->s_fsnotify_marks; + return fsnotify_sb_marks(obj); default: return NULL; } @@ -569,6 +569,26 @@ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b) return -1; } +static int fsnotify_attach_info_to_sb(struct super_block *sb) +{ + struct fsnotify_sb_info *sbinfo; + + /* sb info is freed on fsnotify_sb_delete() */ + sbinfo = kzalloc(sizeof(*sbinfo), GFP_KERNEL); + if (!sbinfo) + return -ENOMEM; + + /* + * cmpxchg() provides the barrier so that callers of fsnotify_sb_info() + * will observe an initialized structure + */ + if (cmpxchg(&sb->s_fsnotify_info, NULL, sbinfo)) { + /* Someone else created sbinfo for us */ + kfree(sbinfo); + } + return 0; +} + static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, void *obj, unsigned int obj_type) { @@ -640,6 +660,16 @@ static int fsnotify_add_mark_list(struct fsnotify_mark *mark, void *obj, if (WARN_ON(!fsnotify_valid_obj_type(obj_type))) return -EINVAL; + /* + * Attach the sb info before attaching a connector to any object on sb. + * The sb info will remain attached as long as sb lives. + */ + if (!fsnotify_sb_info(sb)) { + err = fsnotify_attach_info_to_sb(sb); + if (err) + return err; + } + connp = fsnotify_object_connp(obj, obj_type); restart: spin_lock(&mark->lock); diff --git a/include/linux/fs.h b/include/linux/fs.h index 00fc429b0af0..7f40b592f711 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -73,6 +73,8 @@ struct fscrypt_inode_info; struct fscrypt_operations; struct fsverity_info; struct fsverity_operations; +struct fsnotify_mark_connector; +struct fsnotify_sb_info; struct fs_context; struct fs_parameter_spec; struct fileattr; @@ -620,8 +622,6 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 -struct fsnotify_mark_connector; - /* * Keep mostly read-only and often accessed (especially for * the RCU path lookup and 'stat' data) fields at the beginning @@ -1249,7 +1249,7 @@ struct super_block { /* * Keep s_fs_info, s_time_gran, s_fsnotify_mask, and - * s_fsnotify_marks together for cache efficiency. They are frequently + * s_fsnotify_info together for cache efficiency. They are frequently * accessed and rarely modified. */ void *s_fs_info; /* Filesystem private info */ @@ -1261,7 +1261,7 @@ struct super_block { time64_t s_time_max; #ifdef CONFIG_FSNOTIFY __u32 s_fsnotify_mask; - struct fsnotify_mark_connector __rcu *s_fsnotify_marks; + struct fsnotify_sb_info *s_fsnotify_info; #endif /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 83004d9e07a3..c9f2b2f6b493 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -477,6 +477,23 @@ struct fsnotify_mark_connector { struct hlist_head list; }; +/* + * Container for per-sb fsnotify state (sb marks and more). + * Attached lazily on first marked object on the sb and freed when killing sb. + */ +struct fsnotify_sb_info { + struct fsnotify_mark_connector __rcu *sb_marks; +}; + +static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb) +{ +#ifdef CONFIG_FSNOTIFY + return READ_ONCE(sb->s_fsnotify_info); +#else + return NULL; +#endif +} + static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb) { return &sb->s_fsnotify_connectors; -- cgit From cb5d4f48c10445c97a22af0bd8b9cf0ed6cc8036 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:52 +0200 Subject: fsnotify: move s_fsnotify_connectors into fsnotify_sb_info Move the s_fsnotify_connectors counter into the per-sb fsnotify state. Suggested-by: Christian Brauner Signed-off-by: Amir Goldstein Reviewed-by: Christian Brauner Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-9-amir73il@gmail.com> --- include/linux/fs.h | 6 ------ include/linux/fsnotify.h | 8 +++++++- include/linux/fsnotify_backend.h | 7 ++++++- 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 7f40b592f711..c36c2f8fdbe3 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1302,12 +1302,6 @@ struct super_block { /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; - /* - * Number of inode/mount/sb objects that are being watched, note that - * inodes objects are currently double-accounted. - */ - atomic_long_t s_fsnotify_connectors; - /* Read-only state of the superblock is being changed */ int s_readonly_remount; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index e470bb67c9a3..48dc65702415 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -20,7 +20,13 @@ /* Are there any inode/mount/sb objects that are being watched at all? */ static inline bool fsnotify_sb_has_watchers(struct super_block *sb) { - return atomic_long_read(fsnotify_sb_watched_objects(sb)); + struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); + + /* Were any marks ever added to any object on this sb? */ + if (!sbinfo) + return false; + + return atomic_long_read(&sbinfo->watched_objects); } /* diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index c9f2b2f6b493..ec592aeadfa3 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -483,6 +483,11 @@ struct fsnotify_mark_connector { */ struct fsnotify_sb_info { struct fsnotify_mark_connector __rcu *sb_marks; + /* + * Number of inode/mount/sb objects that are being watched in this sb. + * Note that inodes objects are currently double-accounted. + */ + atomic_long_t watched_objects; }; static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb) @@ -496,7 +501,7 @@ static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb) static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb) { - return &sb->s_fsnotify_connectors; + return &fsnotify_sb_info(sb)->watched_objects; } /* -- cgit From 477cf917dd02853ba78a73cdeb6548889e5f8cd7 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:53 +0200 Subject: fsnotify: use an enum for group priority constants And use meaningfull names for the constants. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-10-amir73il@gmail.com> --- fs/notify/fanotify/fanotify_user.c | 11 +++++------ include/linux/fsnotify_backend.h | 20 ++++++++++++-------- 2 files changed, 17 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 925015ee7fbd..483a6a1255fb 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -1516,13 +1516,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) INIT_LIST_HEAD(&group->fanotify_data.access_list); switch (class) { case FAN_CLASS_NOTIF: - group->priority = FS_PRIO_0; + group->priority = FSNOTIFY_PRIO_NORMAL; break; case FAN_CLASS_CONTENT: - group->priority = FS_PRIO_1; + group->priority = FSNOTIFY_PRIO_CONTENT; break; case FAN_CLASS_PRE_CONTENT: - group->priority = FS_PRIO_2; + group->priority = FSNOTIFY_PRIO_PRE_CONTENT; break; default: fd = -EINVAL; @@ -1774,12 +1774,11 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, goto fput_and_out; /* - * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not - * allowed to set permissions events. + * Permission events require minimum priority FAN_CLASS_CONTENT. */ ret = -EINVAL; if (mask & FANOTIFY_PERM_EVENTS && - group->priority == FS_PRIO_0) + group->priority < FSNOTIFY_PRIO_CONTENT) goto fput_and_out; if (mask & FAN_FS_ERROR && diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index ec592aeadfa3..fc38587d8564 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -176,6 +176,17 @@ struct fsnotify_event { struct list_head list; }; +/* + * fsnotify group priorities. + * Events are sent in order from highest priority to lowest priority. + */ +enum fsnotify_group_prio { + FSNOTIFY_PRIO_NORMAL = 0, /* normal notifiers, no permissions */ + FSNOTIFY_PRIO_CONTENT, /* fanotify permission events */ + FSNOTIFY_PRIO_PRE_CONTENT, /* fanotify pre-content events */ + __FSNOTIFY_PRIO_NUM +}; + /* * A group is a "thing" that wants to receive notification about filesystem * events. The mask holds the subset of event types this group cares about. @@ -201,14 +212,7 @@ struct fsnotify_group { wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ unsigned int q_len; /* events on the queue */ unsigned int max_events; /* maximum events allowed on the list */ - /* - * Valid fsnotify group priorities. Events are send in order from highest - * priority to lowest priority. We default to the lowest priority. - */ - #define FS_PRIO_0 0 /* normal notifiers, no permissions */ - #define FS_PRIO_1 1 /* fanotify content based access control */ - #define FS_PRIO_2 2 /* fanotify pre-content access */ - unsigned int priority; + enum fsnotify_group_prio priority; /* priority for sending events */ bool shutdown; /* group is being shut down, don't queue more events */ #define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */ -- cgit From a5e57b4d370c6d320e5bfb0c919fe00aee29e039 Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Sun, 17 Mar 2024 20:41:54 +0200 Subject: fsnotify: optimize the case of no permission event watchers Commit e43de7f0862b ("fsnotify: optimize the case of no marks of any type") optimized the case where there are no fsnotify watchers on any of the filesystem's objects. It is quite common for a system to have a single local filesystem and it is quite common for the system to have some inotify watches on some config files or directories, so the optimization of no marks at all is often not in effect. Permission event watchers, which require high priority group are more rare, so optimizing the case of no marks og high priority groups can improve performance for more systems, especially for performance sensitive io workloads. Count per-sb watched objects by high priority groups and use that the optimize out the call to __fsnotify_parent() and fsnotify() in fsnotify permission hooks. Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240317184154.1200192-11-amir73il@gmail.com> --- fs/notify/fsnotify.c | 3 +++ fs/notify/mark.c | 30 +++++++++++++++++++++++++++--- include/linux/fsnotify.h | 19 ++++++++++++++++--- include/linux/fsnotify_backend.h | 11 ++++++++--- 4 files changed, 54 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index fb3f36bc6ea9..2ae965ef37e8 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -100,6 +100,9 @@ void fsnotify_sb_delete(struct super_block *sb) /* Wait for outstanding object references from connectors */ wait_var_event(fsnotify_sb_watched_objects(sb), !atomic_long_read(fsnotify_sb_watched_objects(sb))); + WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_CONTENT)); + WARN_ON(fsnotify_sb_has_priority_watchers(sb, + FSNOTIFY_PRIO_PRE_CONTENT)); kfree(sbinfo); } diff --git a/fs/notify/mark.c b/fs/notify/mark.c index b2f5d8c9cce1..c3eefa70633c 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -161,13 +161,36 @@ static void fsnotify_put_inode_ref(struct inode *inode) static void fsnotify_update_sb_watchers(struct super_block *sb, struct fsnotify_mark_connector *conn) { + struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); bool is_watched = conn->flags & FSNOTIFY_CONN_FLAG_IS_WATCHED; - bool has_marks = conn->obj && !hlist_empty(&conn->list); + struct fsnotify_mark *first_mark = NULL; + unsigned int highest_prio = 0; - if (has_marks && !is_watched) { + if (conn->obj) + first_mark = hlist_entry_safe(conn->list.first, + struct fsnotify_mark, obj_list); + if (first_mark) + highest_prio = first_mark->group->priority; + if (WARN_ON(highest_prio >= __FSNOTIFY_PRIO_NUM)) + highest_prio = 0; + + /* + * If the highest priority of group watching this object is prio, + * then watched object has a reference on counters [0..prio]. + * Update priority >= 1 watched objects counters. + */ + for (unsigned int p = conn->prio + 1; p <= highest_prio; p++) + atomic_long_inc(&sbinfo->watched_objects[p]); + for (unsigned int p = conn->prio; p > highest_prio; p--) + atomic_long_dec(&sbinfo->watched_objects[p]); + conn->prio = highest_prio; + + /* Update priority >= 0 (a.k.a total) watched objects counter */ + BUILD_BUG_ON(FSNOTIFY_PRIO_NORMAL != 0); + if (first_mark && !is_watched) { conn->flags |= FSNOTIFY_CONN_FLAG_IS_WATCHED; fsnotify_get_sb_watched_objects(sb); - } else if (!has_marks && is_watched) { + } else if (!first_mark && is_watched) { conn->flags &= ~FSNOTIFY_CONN_FLAG_IS_WATCHED; fsnotify_put_sb_watched_objects(sb); } @@ -600,6 +623,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp, spin_lock_init(&conn->lock); INIT_HLIST_HEAD(&conn->list); conn->flags = 0; + conn->prio = 0; conn->type = obj_type; conn->obj = obj; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 48dc65702415..4da80e92f804 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -17,8 +17,9 @@ #include #include -/* Are there any inode/mount/sb objects that are being watched at all? */ -static inline bool fsnotify_sb_has_watchers(struct super_block *sb) +/* Are there any inode/mount/sb objects watched with priority prio or above? */ +static inline bool fsnotify_sb_has_priority_watchers(struct super_block *sb, + int prio) { struct fsnotify_sb_info *sbinfo = fsnotify_sb_info(sb); @@ -26,7 +27,13 @@ static inline bool fsnotify_sb_has_watchers(struct super_block *sb) if (!sbinfo) return false; - return atomic_long_read(&sbinfo->watched_objects); + return atomic_long_read(&sbinfo->watched_objects[prio]); +} + +/* Are there any inode/mount/sb objects that are being watched at all? */ +static inline bool fsnotify_sb_has_watchers(struct super_block *sb) +{ + return fsnotify_sb_has_priority_watchers(sb, 0); } /* @@ -109,6 +116,12 @@ static inline int fsnotify_file(struct file *file, __u32 mask) return 0; path = &file->f_path; + /* Permission events require group prio >= FSNOTIFY_PRIO_CONTENT */ + if (mask & ALL_FSNOTIFY_PERM_EVENTS && + !fsnotify_sb_has_priority_watchers(path->dentry->d_sb, + FSNOTIFY_PRIO_CONTENT)) + return 0; + return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH); } diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index fc38587d8564..7f1ab8264e41 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -468,7 +468,8 @@ FSNOTIFY_ITER_FUNCS(sb, SB) */ struct fsnotify_mark_connector { spinlock_t lock; - unsigned short type; /* Type of object [lock] */ + unsigned char type; /* Type of object [lock] */ + unsigned char prio; /* Highest priority group */ #define FSNOTIFY_CONN_FLAG_IS_WATCHED 0x01 #define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02 unsigned short flags; /* flags [lock] */ @@ -490,8 +491,12 @@ struct fsnotify_sb_info { /* * Number of inode/mount/sb objects that are being watched in this sb. * Note that inodes objects are currently double-accounted. + * + * The value in watched_objects[prio] is the number of objects that are + * watched by groups of priority >= prio, so watched_objects[0] is the + * total number of watched objects in this sb. */ - atomic_long_t watched_objects; + atomic_long_t watched_objects[__FSNOTIFY_PRIO_NUM]; }; static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb) @@ -505,7 +510,7 @@ static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb) static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb) { - return &fsnotify_sb_info(sb)->watched_objects; + return &fsnotify_sb_info(sb)->watched_objects[0]; } /* -- cgit From 59401c3c08e1a306e29a8d6c826685e2c5c6c794 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 26 Mar 2024 09:01:18 +0000 Subject: soundwire: remove unused sdw_bus_conf structure This is redundant with sdw_bus_params, and was never used. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20240326090122.1051806-4-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/soundwire/sdw.h | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'include/linux') diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 66f814b63a43..e5d0aa58e7f6 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -542,21 +542,6 @@ enum sdw_reg_bank { SDW_BANK1, }; -/** - * struct sdw_bus_conf: Bus configuration - * - * @clk_freq: Clock frequency, in Hz - * @num_rows: Number of rows in frame - * @num_cols: Number of columns in frame - * @bank: Next register bank - */ -struct sdw_bus_conf { - unsigned int clk_freq; - unsigned int num_rows; - unsigned int num_cols; - unsigned int bank; -}; - /** * struct sdw_prepare_ch: Prepare/De-prepare Data Port channel * -- cgit From bc13cf3f6e63dd708ccd160a28e6bb696af7e9f6 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 26 Mar 2024 09:01:20 +0000 Subject: soundwire: clarify maximum allowed address The existing code sets the maximum address at 0x80000000, which is not completely accurate. The last 2 Gbytes are indeed reserved, but so are the 896 Mbytes just before. The maximum address which can be used with paging or BRA is 0x47FFFFFF per Table 131 of the SoundWire 1.2.1 specification. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20240326090122.1051806-6-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/soundwire/sdw_registers.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h index 138bec908c40..658b10fa5b20 100644 --- a/include/linux/soundwire/sdw_registers.h +++ b/include/linux/soundwire/sdw_registers.h @@ -13,7 +13,7 @@ #define SDW_REG_NO_PAGE 0x00008000 #define SDW_REG_OPTIONAL_PAGE 0x00010000 -#define SDW_REG_MAX 0x80000000 +#define SDW_REG_MAX 0x48000000 #define SDW_DPN_SIZE 0x100 #define SDW_BANK1_OFFSET 0x10 -- cgit From d0a69cd0369a390cc1c100e52e78a273695a170c Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 26 Mar 2024 09:20:26 +0000 Subject: soundwire: intel: add more values for SYNCPRD Starting with MeteorLake, the input to the SoundWire IP can be 24.576 MHz (aka Audio Cardinal Clock) or 96 MHz (Audio PLL). Signed-off-by: Pierre-Louis Bossart Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20240326092030.1062802-4-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/soundwire/sdw_intel.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 00bb22d96ae5..fa40b85d5019 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -34,8 +34,10 @@ /* SYNC */ #define SDW_SHIM_SYNC 0xC -#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) -#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24_576 (24576 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_96 (96000 / SDW_CADENCE_GSYNC_KHZ - 1) #define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0) #define SDW_SHIM_SYNC_SYNCCPU BIT(15) #define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16) -- cgit From 09ee49e3de6bcecc57028682c673d180ec2d436b Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 26 Mar 2024 09:20:27 +0000 Subject: soundwire: intel: add support for MeteorLake additional clocks In the MeteorLake hardware, the SoundWire link clock can be selected from the Xtal, audio cardinal clock (24.576 MHz) or the 96 MHz audio PLL. This patches add the clock selection in a backwards-compatible manner, using the ACPI firmware as the source of information and checking its compatibility with hardware capabilities. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20240326092030.1062802-5-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- drivers/soundwire/intel.c | 43 +++++++++++++++++++++++++++++++------ include/linux/soundwire/sdw_intel.h | 5 +++++ 2 files changed, 42 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 839268175079..01e1a0f3ec39 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -345,8 +345,10 @@ static int intel_link_power_up(struct sdw_intel *sdw) u32 spa_mask, cpa_mask; u32 link_control; int ret = 0; + u32 clock_source; u32 syncprd; u32 sync_reg; + bool lcap_mlcs; mutex_lock(sdw->link_res->shim_lock); @@ -358,12 +360,35 @@ static int intel_link_power_up(struct sdw_intel *sdw) * is only dependent on the oscillator clock provided to * the IP, so adjust based on _DSD properties reported in DSDT * tables. The values reported are based on either 24MHz - * (CNL/CML) or 38.4 MHz (ICL/TGL+). + * (CNL/CML) or 38.4 MHz (ICL/TGL+). On MeteorLake additional + * frequencies are available with the MLCS clock source selection. */ - if (prop->mclk_freq % 6000000) - syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4; - else - syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24; + lcap_mlcs = intel_readl(shim, SDW_SHIM_LCAP) & SDW_SHIM_LCAP_MLCS_MASK; + + if (prop->mclk_freq % 6000000) { + if (prop->mclk_freq % 2400000) { + if (lcap_mlcs) { + syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24_576; + clock_source = SDW_SHIM_MLCS_CARDINAL_CLK; + } else { + dev_err(sdw->cdns.dev, "%s: invalid clock configuration, mclk %d lcap_mlcs %d\n", + __func__, prop->mclk_freq, lcap_mlcs); + ret = -EINVAL; + goto out; + } + } else { + syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_38_4; + clock_source = SDW_SHIM_MLCS_XTAL_CLK; + } + } else { + if (lcap_mlcs) { + syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_96; + clock_source = SDW_SHIM_MLCS_AUDIO_PLL_CLK; + } else { + syncprd = SDW_SHIM_SYNC_SYNCPRD_VAL_24; + clock_source = SDW_SHIM_MLCS_XTAL_CLK; + } + } if (!*shim_mask) { dev_dbg(sdw->cdns.dev, "powering up all links\n"); @@ -403,6 +428,13 @@ static int intel_link_power_up(struct sdw_intel *sdw) "Failed to set SHIM_SYNC: %d\n", ret); goto out; } + + /* update link clock if needed */ + if (lcap_mlcs) { + link_control = intel_readl(shim, SDW_SHIM_LCTL); + u32p_replace_bits(&link_control, clock_source, SDW_SHIM_LCTL_MLCS_MASK); + intel_writel(shim, SDW_SHIM_LCTL, link_control); + } } *shim_mask |= BIT(link_id); @@ -1087,4 +1119,3 @@ const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = { .sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked, }; EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL); - diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index fa40b85d5019..8e78417156e3 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -22,6 +22,7 @@ /* LCAP */ #define SDW_SHIM_LCAP 0x0 #define SDW_SHIM_LCAP_LCOUNT_MASK GENMASK(2, 0) +#define SDW_SHIM_LCAP_MLCS_MASK BIT(8) /* LCTL */ #define SDW_SHIM_LCTL 0x4 @@ -30,6 +31,10 @@ #define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0) #define SDW_SHIM_LCTL_CPA BIT(8) #define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8) +#define SDW_SHIM_LCTL_MLCS_MASK GENMASK(29, 27) +#define SDW_SHIM_MLCS_XTAL_CLK 0x0 +#define SDW_SHIM_MLCS_CARDINAL_CLK 0x1 +#define SDW_SHIM_MLCS_AUDIO_PLL_CLK 0x2 /* SYNC */ #define SDW_SHIM_SYNC 0xC -- cgit From 68d6f4f3fbd9b1baae53e7cf33fb3362b5a21494 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Mon, 25 Mar 2024 19:34:01 -0600 Subject: fs: Annotate struct file_handle with __counted_by() and use struct_size() Prepare for the coming implementation by GCC and Clang of the __counted_by attribute. Flexible array members annotated with __counted_by can have their accesses bounds-checked at run-time via CONFIG_UBSAN_BOUNDS (for array indexing) and CONFIG_FORTIFY_SOURCE (for strcpy/memcpy-family functions). While there, use struct_size() helper, instead of the open-coded version. [brauner@kernel.org: contains a fix by Edward for an OOB access] Reported-by: syzbot+4139435cb1b34cf759c2@syzkaller.appspotmail.com Signed-off-by: Edward Adam Davis Link: https://lore.kernel.org/r/tencent_A7845DD769577306D813742365E976E3A205@qq.com Signed-off-by: Gustavo A. R. Silva Link: https://lore.kernel.org/r/ZgImCXTdGDTeBvSS@neat Reviewed-by: Jan Kara Signed-off-by: Christian Brauner --- fs/fhandle.c | 6 +++--- include/linux/fs.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/fhandle.c b/fs/fhandle.c index 57a12614addf..8a7f86c2139a 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -36,7 +36,7 @@ static long do_sys_name_to_handle(const struct path *path, if (f_handle.handle_bytes > MAX_HANDLE_SZ) return -EINVAL; - handle = kzalloc(sizeof(struct file_handle) + f_handle.handle_bytes, + handle = kzalloc(struct_size(handle, f_handle, f_handle.handle_bytes), GFP_KERNEL); if (!handle) return -ENOMEM; @@ -71,7 +71,7 @@ static long do_sys_name_to_handle(const struct path *path, /* copy the mount id */ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) || copy_to_user(ufh, handle, - sizeof(struct file_handle) + handle_bytes)) + struct_size(handle, f_handle, handle_bytes))) retval = -EFAULT; kfree(handle); return retval; @@ -192,7 +192,7 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh, retval = -EINVAL; goto out_err; } - handle = kmalloc(sizeof(struct file_handle) + f_handle.handle_bytes, + handle = kmalloc(struct_size(handle, f_handle, f_handle.handle_bytes), GFP_KERNEL); if (!handle) { retval = -ENOMEM; diff --git a/include/linux/fs.h b/include/linux/fs.h index 00fc429b0af0..914245085597 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1033,7 +1033,7 @@ struct file_handle { __u32 handle_bytes; int handle_type; /* file identifier */ - unsigned char f_handle[]; + unsigned char f_handle[] __counted_by(handle_bytes); }; static inline struct file *get_file(struct file *f) -- cgit From 5c9668cfc6d7c7de30fa57fa0a6c0ddc93462969 Mon Sep 17 00:00:00 2001 From: Gatien Chevallier Date: Fri, 5 Jan 2024 14:03:56 +0100 Subject: firewall: introduce stm32_firewall framework Introduce a STM32 firewall framework that offers to firewall consumers different firewall services such as the ability to check their access rights against their firewall controller(s). The STM32 firewall framework offers a generic API for STM32 firewall controllers that is defined in their drivers to best fit the specificity of each firewall. There are various types of firewalls: -Peripheral firewalls that filter accesses to peripherals -Memory firewalls that filter accesses to memories or memory regions -No type for undefined type of firewall Signed-off-by: Gatien Chevallier Signed-off-by: Alexandre Torgue --- MAINTAINERS | 5 + arch/arm/mach-stm32/Kconfig | 1 + arch/arm64/Kconfig.platforms | 1 + drivers/bus/Kconfig | 10 + drivers/bus/Makefile | 1 + drivers/bus/stm32_firewall.c | 294 ++++++++++++++++++++++++++++++ drivers/bus/stm32_firewall.h | 83 +++++++++ include/linux/bus/stm32_firewall_device.h | 142 +++++++++++++++ 8 files changed, 537 insertions(+) create mode 100644 drivers/bus/stm32_firewall.c create mode 100644 drivers/bus/stm32_firewall.h create mode 100644 include/linux/bus/stm32_firewall_device.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index aa3b947fb080..b896bd80d361 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20811,6 +20811,11 @@ T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.yaml F: drivers/media/i2c/st-mipid02.c +ST STM32 FIREWALL +M: Gatien Chevallier +S: Maintained +F: drivers/bus/stm32_firewall.c + ST STM32 I2C/SMBUS DRIVER M: Pierre-Yves MORDRET M: Alain Volmat diff --git a/arch/arm/mach-stm32/Kconfig b/arch/arm/mach-stm32/Kconfig index 98145031586f..ae21a9f78f9c 100644 --- a/arch/arm/mach-stm32/Kconfig +++ b/arch/arm/mach-stm32/Kconfig @@ -12,6 +12,7 @@ menuconfig ARCH_STM32 select PINCTRL select RESET_CONTROLLER select STM32_EXTI + select STM32_FIREWALL help Support for STMicroelectronics STM32 processors. diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 24335565bad5..13ed0a483617 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -305,6 +305,7 @@ config ARCH_STM32 select ARM_SMC_MBOX select ARM_SCMI_PROTOCOL select COMMON_CLK_SCMI + select STM32_FIREWALL help This enables support for ARMv8 based STMicroelectronics STM32 family, including: diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index d5e7fa9173a1..64cd2ee03aa3 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -163,6 +163,16 @@ config QCOM_SSC_BLOCK_BUS i2c/spi/uart controllers, a hexagon core, and a clock controller which provides clocks for the above. +config STM32_FIREWALL + bool "STM32 Firewall framework" + depends on (ARCH_STM32 || COMPILE_TEST) && OF + select OF_DYNAMIC + help + Say y to enable STM32 firewall framework and its services. Firewall + controllers will be able to register to the framework. Access for + hardware resources linked to a firewall controller can be requested + through this STM32 framework. + config SUN50I_DE2_BUS bool "Allwinner A64 DE2 Bus Driver" default ARM64 diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index d90eed189a65..fc0511450ec2 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o obj-$(CONFIG_QCOM_SSC_BLOCK_BUS) += qcom-ssc-block-bus.o +obj-$(CONFIG_STM32_FIREWALL) += stm32_firewall.o obj-$(CONFIG_SUN50I_DE2_BUS) += sun50i-de2.o obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_OF) += simple-pm-bus.o diff --git a/drivers/bus/stm32_firewall.c b/drivers/bus/stm32_firewall.c new file mode 100644 index 000000000000..decb79449047 --- /dev/null +++ b/drivers/bus/stm32_firewall.c @@ -0,0 +1,294 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "stm32_firewall.h" + +/* Corresponds to STM32_FIREWALL_MAX_EXTRA_ARGS + firewall ID */ +#define STM32_FIREWALL_MAX_ARGS (STM32_FIREWALL_MAX_EXTRA_ARGS + 1) + +static LIST_HEAD(firewall_controller_list); +static DEFINE_MUTEX(firewall_controller_list_lock); + +/* Firewall device API */ + +int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, + unsigned int nb_firewall) +{ + struct stm32_firewall_controller *ctrl; + struct of_phandle_iterator it; + unsigned int i, j = 0; + int err; + + if (!firewall || !nb_firewall) + return -EINVAL; + + /* Parse property with phandle parsed out */ + of_for_each_phandle(&it, err, np, "access-controllers", "#access-controller-cells", 0) { + struct of_phandle_args provider_args; + struct device_node *provider = it.node; + const char *fw_entry; + bool match = false; + + if (err) { + pr_err("Unable to get access-controllers property for node %s\n, err: %d", + np->full_name, err); + of_node_put(provider); + return err; + } + + if (j > nb_firewall) { + pr_err("Too many firewall controllers"); + of_node_put(provider); + return -EINVAL; + } + + provider_args.args_count = of_phandle_iterator_args(&it, provider_args.args, + STM32_FIREWALL_MAX_ARGS); + + /* Check if the parsed phandle corresponds to a registered firewall controller */ + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl->dev->of_node->phandle == it.phandle) { + match = true; + firewall[j].firewall_ctrl = ctrl; + break; + } + } + mutex_unlock(&firewall_controller_list_lock); + + if (!match) { + firewall[j].firewall_ctrl = NULL; + pr_err("No firewall controller registered for %s\n", np->full_name); + of_node_put(provider); + return -ENODEV; + } + + err = of_property_read_string_index(np, "access-controller-names", j, &fw_entry); + if (err == 0) + firewall[j].entry = fw_entry; + + /* Handle the case when there are no arguments given along with the phandle */ + if (provider_args.args_count < 0 || + provider_args.args_count > STM32_FIREWALL_MAX_ARGS) { + of_node_put(provider); + return -EINVAL; + } else if (provider_args.args_count == 0) { + firewall[j].extra_args_size = 0; + firewall[j].firewall_id = U32_MAX; + j++; + continue; + } + + /* The firewall ID is always the first argument */ + firewall[j].firewall_id = provider_args.args[0]; + + /* Extra args start at the second argument */ + for (i = 0; i < provider_args.args_count - 1; i++) + firewall[j].extra_args[i] = provider_args.args[i + 1]; + + /* Remove the firewall ID arg that is not an extra argument */ + firewall[j].extra_args_size = provider_args.args_count - 1; + + j++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_get_firewall); + +int stm32_firewall_grant_access(struct stm32_firewall *firewall) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || firewall->firewall_id == U32_MAX) + return -EINVAL; + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) + return -ENODEV; + + return firewall_controller->grant_access(firewall_controller, firewall->firewall_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_grant_access); + +int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) + return -EINVAL; + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) + return -ENODEV; + + return firewall_controller->grant_access(firewall_controller, subsystem_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_grant_access_by_id); + +void stm32_firewall_release_access(struct stm32_firewall *firewall) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || firewall->firewall_id == U32_MAX) { + pr_debug("Incorrect arguments when releasing a firewall access\n"); + return; + } + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) { + pr_debug("No firewall controller to release\n"); + return; + } + + firewall_controller->release_access(firewall_controller, firewall->firewall_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_release_access); + +void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + struct stm32_firewall_controller *firewall_controller; + + if (!firewall || subsystem_id == U32_MAX || firewall->firewall_id == U32_MAX) { + pr_debug("Incorrect arguments when releasing a firewall access"); + return; + } + + firewall_controller = firewall->firewall_ctrl; + + if (!firewall_controller) { + pr_debug("No firewall controller to release"); + return; + } + + firewall_controller->release_access(firewall_controller, subsystem_id); +} +EXPORT_SYMBOL_GPL(stm32_firewall_release_access_by_id); + +/* Firewall controller API */ + +int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall_controller *ctrl; + + if (!firewall_controller) + return -ENODEV; + + pr_info("Registering %s firewall controller\n", firewall_controller->name); + + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl == firewall_controller) { + pr_debug("%s firewall controller already registered\n", + firewall_controller->name); + mutex_unlock(&firewall_controller_list_lock); + return 0; + } + } + list_add_tail(&firewall_controller->entry, &firewall_controller_list); + mutex_unlock(&firewall_controller_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_controller_register); + +void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall_controller *ctrl; + bool controller_removed = false; + + if (!firewall_controller) { + pr_debug("Null reference while unregistering firewall controller\n"); + return; + } + + mutex_lock(&firewall_controller_list_lock); + list_for_each_entry(ctrl, &firewall_controller_list, entry) { + if (ctrl == firewall_controller) { + controller_removed = true; + list_del_init(&ctrl->entry); + break; + } + } + mutex_unlock(&firewall_controller_list_lock); + + if (!controller_removed) + pr_debug("There was no firewall controller named %s to unregister\n", + firewall_controller->name); +} +EXPORT_SYMBOL_GPL(stm32_firewall_controller_unregister); + +int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller) +{ + struct stm32_firewall *firewalls; + struct device_node *child; + struct device *parent; + unsigned int i; + int len; + int err; + + parent = firewall_controller->dev; + + dev_dbg(parent, "Populating %s system bus\n", dev_name(firewall_controller->dev)); + + for_each_available_child_of_node(dev_of_node(parent), child) { + /* The access-controllers property is mandatory for firewall bus devices */ + len = of_count_phandle_with_args(child, "access-controllers", + "#access-controller-cells"); + if (len <= 0) { + of_node_put(child); + return -EINVAL; + } + + firewalls = kcalloc(len, sizeof(*firewalls), GFP_KERNEL); + if (!firewalls) { + of_node_put(child); + return -ENOMEM; + } + + err = stm32_firewall_get_firewall(child, firewalls, (unsigned int)len); + if (err) { + kfree(firewalls); + of_node_put(child); + return err; + } + + for (i = 0; i < len; i++) { + if (firewall_controller->grant_access(firewall_controller, + firewalls[i].firewall_id)) { + /* + * Peripheral access not allowed or not defined. + * Mark the node as populated so platform bus won't probe it + */ + of_detach_node(child); + dev_err(parent, "%s: Device driver will not be probed\n", + child->full_name); + } + } + + kfree(firewalls); + } + + return 0; +} +EXPORT_SYMBOL_GPL(stm32_firewall_populate_bus); diff --git a/drivers/bus/stm32_firewall.h b/drivers/bus/stm32_firewall.h new file mode 100644 index 000000000000..e5fac85fe346 --- /dev/null +++ b/drivers/bus/stm32_firewall.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#ifndef _STM32_FIREWALL_H +#define _STM32_FIREWALL_H + +#include +#include +#include +#include +#include + +/** + * STM32_PERIPHERAL_FIREWALL: This type of firewall protects peripherals + * STM32_MEMORY_FIREWALL: This type of firewall protects memories/subsets of memory + * zones + * STM32_NOTYPE_FIREWALL: Undefined firewall type + */ + +#define STM32_PERIPHERAL_FIREWALL BIT(1) +#define STM32_MEMORY_FIREWALL BIT(2) +#define STM32_NOTYPE_FIREWALL BIT(3) + +/** + * struct stm32_firewall_controller - Information on firewall controller supplying services + * + * @name: Name of the firewall controller + * @dev: Device reference of the firewall controller + * @mmio: Base address of the firewall controller + * @entry: List entry of the firewall controller list + * @type: Type of firewall + * @max_entries: Number of entries covered by the firewall + * @grant_access: Callback used to grant access for a device access against a + * firewall controller + * @release_access: Callback used to release resources taken by a device when access was + * granted + * @grant_memory_range_access: Callback used to grant access for a device to a given memory region + */ +struct stm32_firewall_controller { + const char *name; + struct device *dev; + void __iomem *mmio; + struct list_head entry; + unsigned int type; + unsigned int max_entries; + + int (*grant_access)(struct stm32_firewall_controller *ctrl, u32 id); + void (*release_access)(struct stm32_firewall_controller *ctrl, u32 id); + int (*grant_memory_range_access)(struct stm32_firewall_controller *ctrl, phys_addr_t paddr, + size_t size); +}; + +/** + * stm32_firewall_controller_register - Register a firewall controller to the STM32 firewall + * framework + * @firewall_controller: Firewall controller to register + * + * Returns 0 in case of success or -ENODEV if no controller was given. + */ +int stm32_firewall_controller_register(struct stm32_firewall_controller *firewall_controller); + +/** + * stm32_firewall_controller_unregister - Unregister a firewall controller from the STM32 + * firewall framework + * @firewall_controller: Firewall controller to unregister + */ +void stm32_firewall_controller_unregister(struct stm32_firewall_controller *firewall_controller); + +/** + * stm32_firewall_populate_bus - Populate device tree nodes that have a correct firewall + * configuration. This is used at boot-time only, as a sanity check + * between device tree and firewalls hardware configurations to + * prevent a kernel crash when a device driver is not granted access + * + * @firewall_controller: Firewall controller which nodes will be populated or not + * + * Returns 0 in case of success or appropriate errno code if error occurred. + */ +int stm32_firewall_populate_bus(struct stm32_firewall_controller *firewall_controller); + +#endif /* _STM32_FIREWALL_H */ diff --git a/include/linux/bus/stm32_firewall_device.h b/include/linux/bus/stm32_firewall_device.h new file mode 100644 index 000000000000..18e0a2fc3816 --- /dev/null +++ b/include/linux/bus/stm32_firewall_device.h @@ -0,0 +1,142 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023, STMicroelectronics - All Rights Reserved + */ + +#ifndef STM32_FIREWALL_DEVICE_H +#define STM32_FIREWALL_DEVICE_H + +#include +#include +#include + +#define STM32_FIREWALL_MAX_EXTRA_ARGS 5 + +/* Opaque reference to stm32_firewall_controller */ +struct stm32_firewall_controller; + +/** + * struct stm32_firewall - Information on a device's firewall. Each device can have more than one + * firewall. + * + * @firewall_ctrl: Pointer referencing a firewall controller of the device. It is + * opaque so a device cannot manipulate the controller's ops or access + * the controller's data + * @extra_args: Extra arguments that are implementation dependent + * @entry: Name of the firewall entry + * @extra_args_size: Number of extra arguments + * @firewall_id: Firewall ID associated the device for this firewall controller + */ +struct stm32_firewall { + struct stm32_firewall_controller *firewall_ctrl; + u32 extra_args[STM32_FIREWALL_MAX_EXTRA_ARGS]; + const char *entry; + size_t extra_args_size; + u32 firewall_id; +}; + +#if IS_ENABLED(CONFIG_STM32_FIREWALL) +/** + * stm32_firewall_get_firewall - Get the firewall(s) associated to given device. + * The firewall controller reference is always the first argument + * of each of the access-controller property entries. + * The firewall ID is always the second argument of each of the + * access-controller property entries. + * If there's no argument linked to the phandle, then the firewall ID + * field is set to U32_MAX, which is an invalid ID. + * + * @np: Device node to parse + * @firewall: Array of firewall references + * @nb_firewall: Number of firewall references to get. Must be at least 1. + * + * Returns 0 on success, -ENODEV if there's no match with a firewall controller or appropriate errno + * code if error occurred. + */ +int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, + unsigned int nb_firewall); + +/** + * stm32_firewall_grant_access - Request firewall access rights and grant access. + * + * @firewall: Firewall reference containing the ID to check against its firewall + * controller + * + * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or + * appropriate errno code if error occurred + */ +int stm32_firewall_grant_access(struct stm32_firewall *firewall); + +/** + * stm32_firewall_release_access - Release access granted from a call to + * stm32_firewall_grant_access(). + * + * @firewall: Firewall reference containing the ID to check against its firewall + * controller + */ +void stm32_firewall_release_access(struct stm32_firewall *firewall); + +/** + * stm32_firewall_grant_access_by_id - Request firewall access rights of a given device + * based on a specific firewall ID + * + * Warnings: + * There is no way to ensure that the given ID will correspond to the firewall referenced in the + * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this + * function must be used with caution. + * This function should be used for subsystem resources that do not have the same firewall ID + * as their parent. + * U32_MAX is an invalid ID. + * + * @firewall: Firewall reference containing the firewall controller + * @subsystem_id: Firewall ID of the subsystem resource + * + * Returns 0 if access is granted, -EACCES if access is denied, -ENODEV if firewall is null or + * appropriate errno code if error occurred + */ +int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id); + +/** + * stm32_firewall_release_access_by_id - Release access granted from a call to + * stm32_firewall_grant_access_by_id(). + * + * Warnings: + * There is no way to ensure that the given ID will correspond to the firewall referenced in the + * device node if the ID did not come from stm32_firewall_get_firewall(). In that case, this + * function must be used with caution. + * This function should be used for subsystem resources that do not have the same firewall ID + * as their parent. + * U32_MAX is an invalid ID. + * + * @firewall: Firewall reference containing the firewall controller + * @subsystem_id: Firewall ID of the subsystem resource + */ +void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id); + +#else /* CONFIG_STM32_FIREWALL */ + +int stm32_firewall_get_firewall(struct device_node *np, struct stm32_firewall *firewall, + unsigned int nb_firewall); +{ + return -ENODEV; +} + +int stm32_firewall_grant_access(struct stm32_firewall *firewall) +{ + return -ENODEV; +} + +void stm32_firewall_release_access(struct stm32_firewall *firewall) +{ +} + +int stm32_firewall_grant_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ + return -ENODEV; +} + +void stm32_firewall_release_access_by_id(struct stm32_firewall *firewall, u32 subsystem_id) +{ +} + +#endif /* CONFIG_STM32_FIREWALL */ +#endif /* STM32_FIREWALL_DEVICE_H */ -- cgit From 0a525621b7e5b49202b19d8f75382c6778fdd0c1 Mon Sep 17 00:00:00 2001 From: Philo Lu Date: Fri, 5 Apr 2024 10:55:34 +0800 Subject: bpf: store both map ptr and state in bpf_insn_aux_data Currently, bpf_insn_aux_data->map_ptr_state is used to store either map_ptr or its poison state (i.e., BPF_MAP_PTR_POISON). Thus BPF_MAP_PTR_POISON must be checked before reading map_ptr. In certain cases, we may need valid map_ptr even in case of poison state. This will be explained in next patch with bpf_for_each_map_elem() helper. This patch changes map_ptr_state into a new struct including both map pointer and its state (poison/unpriv). It's in the same union with struct bpf_loop_inline_state, so there is no extra memory overhead. Besides, macros BPF_MAP_PTR_UNPRIV/BPF_MAP_PTR_POISON/BPF_MAP_PTR are no longer needed. This patch does not change any existing functionality. Signed-off-by: Philo Lu Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20240405025536.18113-2-lulie@linux.alibaba.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 9 ++++++++- kernel/bpf/verifier.c | 36 ++++++++++++++++-------------------- 2 files changed, 24 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7cb1b75eee38..36d19cd32eb5 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -502,6 +502,13 @@ struct bpf_loop_inline_state { u32 callback_subprogno; /* valid when fit_for_inline is true */ }; +/* pointer and state for maps */ +struct bpf_map_ptr_state { + struct bpf_map *map_ptr; + bool poison; + bool unpriv; +}; + /* Possible states for alu_state member. */ #define BPF_ALU_SANITIZE_SRC (1U << 0) #define BPF_ALU_SANITIZE_DST (1U << 1) @@ -514,7 +521,7 @@ struct bpf_loop_inline_state { struct bpf_insn_aux_data { union { enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ - unsigned long map_ptr_state; /* pointer/poison value for maps */ + struct bpf_map_ptr_state map_ptr_state; s32 call_imm; /* saved imm field of call insn */ u32 alu_limit; /* limit for add/sub register with pointer */ struct { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 4353cb09c35b..62fedb1f3312 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -190,11 +190,6 @@ struct bpf_verifier_stack_elem { #define BPF_MAP_KEY_POISON (1ULL << 63) #define BPF_MAP_KEY_SEEN (1ULL << 62) -#define BPF_MAP_PTR_UNPRIV 1UL -#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ - POISON_POINTER_DELTA)) -#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) - #define BPF_GLOBAL_PERCPU_MA_MAX_SIZE 512 static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx); @@ -209,21 +204,22 @@ static bool is_trusted_reg(const struct bpf_reg_state *reg); static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) { - return BPF_MAP_PTR(aux->map_ptr_state) == BPF_MAP_PTR_POISON; + return aux->map_ptr_state.poison; } static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) { - return aux->map_ptr_state & BPF_MAP_PTR_UNPRIV; + return aux->map_ptr_state.unpriv; } static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, - const struct bpf_map *map, bool unpriv) + struct bpf_map *map, + bool unpriv, bool poison) { - BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); unpriv |= bpf_map_ptr_unpriv(aux); - aux->map_ptr_state = (unsigned long)map | - (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); + aux->map_ptr_state.unpriv = unpriv; + aux->map_ptr_state.poison = poison; + aux->map_ptr_state.map_ptr = map; } static bool bpf_map_key_poisoned(const struct bpf_insn_aux_data *aux) @@ -9660,7 +9656,7 @@ static int set_map_elem_callback_state(struct bpf_verifier_env *env, return -EINVAL; } - map = BPF_MAP_PTR(insn_aux->map_ptr_state); + map = insn_aux->map_ptr_state.map_ptr; if (!map->ops->map_set_for_each_callback_args || !map->ops->map_for_each_callback) { verbose(env, "callback function not allowed for map\n"); @@ -10019,12 +10015,12 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, return -EACCES; } - if (!BPF_MAP_PTR(aux->map_ptr_state)) + if (!aux->map_ptr_state.map_ptr) + bpf_map_ptr_store(aux, meta->map_ptr, + !meta->map_ptr->bypass_spec_v1, false); + else if (aux->map_ptr_state.map_ptr != meta->map_ptr) bpf_map_ptr_store(aux, meta->map_ptr, - !meta->map_ptr->bypass_spec_v1); - else if (BPF_MAP_PTR(aux->map_ptr_state) != meta->map_ptr) - bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, - !meta->map_ptr->bypass_spec_v1); + !meta->map_ptr->bypass_spec_v1, true); return 0; } @@ -19840,7 +19836,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) !bpf_map_ptr_unpriv(aux)) { struct bpf_jit_poke_descriptor desc = { .reason = BPF_POKE_REASON_TAIL_CALL, - .tail_call.map = BPF_MAP_PTR(aux->map_ptr_state), + .tail_call.map = aux->map_ptr_state.map_ptr, .tail_call.key = bpf_map_key_immediate(aux), .insn_idx = i + delta, }; @@ -19869,7 +19865,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) return -EINVAL; } - map_ptr = BPF_MAP_PTR(aux->map_ptr_state); + map_ptr = aux->map_ptr_state.map_ptr; insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, map_ptr->max_entries, 2); insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, @@ -19977,7 +19973,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env) if (bpf_map_ptr_poisoned(aux)) goto patch_call_imm; - map_ptr = BPF_MAP_PTR(aux->map_ptr_state); + map_ptr = aux->map_ptr_state.map_ptr; ops = map_ptr->ops; if (insn->imm == BPF_FUNC_map_lookup_elem && ops->map_gen_lookup) { -- cgit From 4aafb8ab2a626ac714931ec7894cf16df948b359 Mon Sep 17 00:00:00 2001 From: Cosmin Ratiu Date: Thu, 4 Apr 2024 20:33:54 +0300 Subject: net/mlx5e: Support FEC settings for 100G/lane modes This consists of: 1. Expose the 100G/lane capability bit in the PCAM reg. 2. Expose the per link mode FEC capability masks in the PPLM reg. 3. Set the overrides according to ethtool parameters. FEC for new modes is set if and only if the PCAM 100G/lane capability is advertised and the capability mask for a given link mode reports that it can accept the requested FEC mode. Signed-off-by: Cosmin Ratiu Reviewed-by: Gal Pressman Signed-off-by: Tariq Toukan Link: https://lore.kernel.org/r/20240404173357.123307-3-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en/port.c | 36 +++++++++++++++++++++-- include/linux/mlx5/mlx5_ifc.h | 20 +++++++++++-- 2 files changed, 52 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index b4681a93807d..b4efc780e297 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -292,10 +292,15 @@ enum mlx5e_fec_supported_link_mode { MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X, MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X, MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X, + MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X, + MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X, + MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X, + MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X, MLX5E_MAX_FEC_SUPPORTED_LINK_MODE, }; #define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X +#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X #define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \ do { \ @@ -313,7 +318,10 @@ static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev, enum mlx5e_fec_supported_link_mode link_mode) { return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE || - MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm); + (link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE && + MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) || + (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE && + MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm)); } /* get/set FEC admin field for a given speed */ @@ -348,6 +356,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write, case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X: MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x); break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_1x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_2x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_4x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X: + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x); + break; default: return -EINVAL; } @@ -389,6 +409,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap, case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X: *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x); break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_1x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_2x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_4x); + break; + case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X: + *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x); + break; default: return -EINVAL; } @@ -501,7 +533,7 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy) /* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514 * to link modes up to 25G per lane and to * MLX5E_FEC_RS_544_514 in the new link modes based on - * 50 G per lane + * 50G or 100G per lane */ if (conf_fec == (1 << MLX5E_FEC_RS_528_514) && i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE) diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cc159d8563d1..35ffc9b9f241 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -9817,7 +9817,21 @@ struct mlx5_ifc_pplm_reg_bits { u8 fec_override_admin_100g_2x[0x10]; u8 fec_override_admin_50g_1x[0x10]; - u8 reserved_at_140[0x140]; + u8 fec_override_cap_800g_8x[0x10]; + u8 fec_override_cap_400g_4x[0x10]; + + u8 fec_override_cap_200g_2x[0x10]; + u8 fec_override_cap_100g_1x[0x10]; + + u8 reserved_at_180[0xa0]; + + u8 fec_override_admin_800g_8x[0x10]; + u8 fec_override_admin_400g_4x[0x10]; + + u8 fec_override_admin_200g_2x[0x10]; + u8 fec_override_admin_100g_1x[0x10]; + + u8 reserved_at_260[0x20]; }; struct mlx5_ifc_ppcnt_reg_bits { @@ -10189,7 +10203,9 @@ struct mlx5_ifc_mtutc_reg_bits { }; struct mlx5_ifc_pcam_enhanced_features_bits { - u8 reserved_at_0[0x68]; + u8 reserved_at_0[0x48]; + u8 fec_100G_per_lane_in_pplm[0x1]; + u8 reserved_at_49[0x1f]; u8 fec_50G_per_lane_in_pplm[0x1]; u8 reserved_at_69[0x4]; u8 rx_icrc_encapsulated_counter[0x1]; -- cgit From 0e9c127729be19adcdf2f5cc1f3450a4322fdf3a Mon Sep 17 00:00:00 2001 From: Rahul Rameshbabu Date: Wed, 3 Apr 2024 14:28:39 -0700 Subject: ethtool: add interface to read Tx hardware timestamping statistics Multiple network devices that support hardware timestamping appear to have common behavior with regards to timestamp handling. Implement common Tx hardware timestamping statistics in a tx_stats struct_group. Common Rx hardware timestamping statistics can subsequently be implemented in a rx_stats struct_group for ethtool_ts_stats. Signed-off-by: Rahul Rameshbabu Reviewed-by: Dragos Tatulea Link: https://lore.kernel.org/r/20240403212931.128541-2-rrameshbabu@nvidia.com Signed-off-by: Jakub Kicinski --- Documentation/netlink/specs/ethtool.yaml | 17 +++++++++ Documentation/networking/ethtool-netlink.rst | 9 +++++ include/linux/ethtool.h | 27 ++++++++++++++- include/uapi/linux/ethtool_netlink.h | 14 ++++++++ net/ethtool/tsinfo.c | 52 +++++++++++++++++++++++++++- 5 files changed, 117 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index d0e4a47e0f21..b76916394ec9 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -565,6 +565,18 @@ attribute-sets: - name: tx-lpi-timer type: u32 + - + name: ts-stat + attributes: + - + name: tx-pkts + type: uint + - + name: tx-lost + type: uint + - + name: tx-err + type: uint - name: tsinfo attributes: @@ -587,6 +599,10 @@ attribute-sets: - name: phc-index type: u32 + - + name: stats + type: nest + nested-attributes: ts-stat - name: cable-result attributes: @@ -1394,6 +1410,7 @@ operations: - tx-types - rx-filters - phc-index + - stats dump: *tsinfo-get-op - name: cable-test-act diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index d583d9abf2f8..08d330b0f50f 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -1237,12 +1237,21 @@ Kernel response contents: ``ETHTOOL_A_TSINFO_TX_TYPES`` bitset supported Tx types ``ETHTOOL_A_TSINFO_RX_FILTERS`` bitset supported Rx filters ``ETHTOOL_A_TSINFO_PHC_INDEX`` u32 PTP hw clock index + ``ETHTOOL_A_TSINFO_STATS`` nested HW timestamping statistics ===================================== ====== ========================== ``ETHTOOL_A_TSINFO_PHC_INDEX`` is absent if there is no associated PHC (there is no special value for this case). The bitset attributes are omitted if they would be empty (no bit set). +Additional hardware timestamping statistics response contents: + + ===================================== ====== =================================== + ``ETHTOOL_A_TS_STAT_TX_PKTS`` u64 Packets with Tx HW timestamps + ``ETHTOOL_A_TS_STAT_TX_LOST`` u64 Tx HW timestamp not arrived count + ``ETHTOOL_A_TS_STAT_TX_ERR`` u64 HW error request Tx timestamp count + ===================================== ====== =================================== + CABLE_TEST ========== diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 9901e563f706..6fd9107d3cc0 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -480,6 +480,26 @@ struct ethtool_rmon_stats { ); }; +/** + * struct ethtool_ts_stats - HW timestamping statistics + * @pkts: Number of packets successfully timestamped by the hardware. + * @lost: Number of hardware timestamping requests where the timestamping + * information from the hardware never arrived for submission with + * the skb. + * @err: Number of arbitrary timestamp generation error events that the + * hardware encountered, exclusive of @lost statistics. Cases such + * as resource exhaustion, unavailability, firmware errors, and + * detected illogical timestamp values not submitted with the skb + * are inclusive to this counter. + */ +struct ethtool_ts_stats { + struct_group(tx_stats, + u64 pkts; + u64 lost; + u64 err; + ); +}; + #define ETH_MODULE_EEPROM_PAGE_LEN 128 #define ETH_MODULE_MAX_I2C_ADDRESS 0x7f @@ -755,7 +775,10 @@ struct ethtool_rxfh_param { * @get_ts_info: Get the time stamping and PTP hardware clock capabilities. * It may be called with RCU, or rtnl or reference on the device. * Drivers supporting transmit time stamps in software should set this to - * ethtool_op_get_ts_info(). + * ethtool_op_get_ts_info(). Drivers must not zero statistics which they + * don't report. The stats structure is initialized to ETHTOOL_STAT_NOT_SET + * indicating driver does not report statistics. + * @get_ts_stats: Query the device hardware timestamping statistics. * @get_module_info: Get the size and type of the eeprom contained within * a plug-in module. * @get_module_eeprom: Get the eeprom information from the plug-in module @@ -898,6 +921,8 @@ struct ethtool_ops { struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + void (*get_ts_stats)(struct net_device *dev, + struct ethtool_ts_stats *ts_stats); int (*get_module_info)(struct net_device *, struct ethtool_modinfo *); int (*get_module_eeprom)(struct net_device *, diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index accbb1a231df..708272026d80 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -478,12 +478,26 @@ enum { ETHTOOL_A_TSINFO_TX_TYPES, /* bitset */ ETHTOOL_A_TSINFO_RX_FILTERS, /* bitset */ ETHTOOL_A_TSINFO_PHC_INDEX, /* u32 */ + ETHTOOL_A_TSINFO_STATS, /* nest - _A_TSINFO_STAT */ /* add new constants above here */ __ETHTOOL_A_TSINFO_CNT, ETHTOOL_A_TSINFO_MAX = (__ETHTOOL_A_TSINFO_CNT - 1) }; +enum { + ETHTOOL_A_TS_STAT_UNSPEC, + + ETHTOOL_A_TS_STAT_TX_PKTS, /* u64 */ + ETHTOOL_A_TS_STAT_TX_LOST, /* u64 */ + ETHTOOL_A_TS_STAT_TX_ERR, /* u64 */ + + /* add new constants above here */ + __ETHTOOL_A_TS_STAT_CNT, + ETHTOOL_A_TS_STAT_MAX = (__ETHTOOL_A_TS_STAT_CNT - 1) + +}; + /* PHC VCLOCKS */ enum { diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c index 9daed0aab162..be2755c8d8fd 100644 --- a/net/ethtool/tsinfo.c +++ b/net/ethtool/tsinfo.c @@ -13,14 +13,18 @@ struct tsinfo_req_info { struct tsinfo_reply_data { struct ethnl_reply_data base; struct ethtool_ts_info ts_info; + struct ethtool_ts_stats stats; }; #define TSINFO_REPDATA(__reply_base) \ container_of(__reply_base, struct tsinfo_reply_data, base) +#define ETHTOOL_TS_STAT_CNT \ + (__ETHTOOL_A_TS_STAT_CNT - (ETHTOOL_A_TS_STAT_UNSPEC + 1)) + const struct nla_policy ethnl_tsinfo_get_policy[] = { [ETHTOOL_A_TSINFO_HEADER] = - NLA_POLICY_NESTED(ethnl_header_policy), + NLA_POLICY_NESTED(ethnl_header_policy_stats), }; static int tsinfo_prepare_data(const struct ethnl_req_info *req_base, @@ -34,6 +38,12 @@ static int tsinfo_prepare_data(const struct ethnl_req_info *req_base, ret = ethnl_ops_begin(dev); if (ret < 0) return ret; + if (req_base->flags & ETHTOOL_FLAG_STATS && + dev->ethtool_ops->get_ts_stats) { + ethtool_stats_init((u64 *)&data->stats, + sizeof(data->stats) / sizeof(u64)); + dev->ethtool_ops->get_ts_stats(dev, &data->stats); + } ret = __ethtool_get_ts_info(dev, &data->ts_info); ethnl_ops_complete(dev); @@ -79,10 +89,47 @@ static int tsinfo_reply_size(const struct ethnl_req_info *req_base, } if (ts_info->phc_index >= 0) len += nla_total_size(sizeof(u32)); /* _TSINFO_PHC_INDEX */ + if (req_base->flags & ETHTOOL_FLAG_STATS) + len += nla_total_size(0) + /* _TSINFO_STATS */ + nla_total_size_64bit(sizeof(u64)) * ETHTOOL_TS_STAT_CNT; return len; } +static int tsinfo_put_stat(struct sk_buff *skb, u64 val, u16 attrtype) +{ + if (val == ETHTOOL_STAT_NOT_SET) + return 0; + if (nla_put_uint(skb, attrtype, val)) + return -EMSGSIZE; + return 0; +} + +static int tsinfo_put_stats(struct sk_buff *skb, + const struct ethtool_ts_stats *stats) +{ + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_TSINFO_STATS); + if (!nest) + return -EMSGSIZE; + + if (tsinfo_put_stat(skb, stats->tx_stats.pkts, + ETHTOOL_A_TS_STAT_TX_PKTS) || + tsinfo_put_stat(skb, stats->tx_stats.lost, + ETHTOOL_A_TS_STAT_TX_LOST) || + tsinfo_put_stat(skb, stats->tx_stats.err, + ETHTOOL_A_TS_STAT_TX_ERR)) + goto err_cancel; + + nla_nest_end(skb, nest); + return 0; + +err_cancel: + nla_nest_cancel(skb, nest); + return -EMSGSIZE; +} + static int tsinfo_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) @@ -119,6 +166,9 @@ static int tsinfo_fill_reply(struct sk_buff *skb, if (ts_info->phc_index >= 0 && nla_put_u32(skb, ETHTOOL_A_TSINFO_PHC_INDEX, ts_info->phc_index)) return -EMSGSIZE; + if (req_base->flags & ETHTOOL_FLAG_STATS && + tsinfo_put_stats(skb, &data->stats)) + return -EMSGSIZE; return 0; } -- cgit From 9f06f87fef689d28588cde8c7ebb00a67da34026 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 3 Apr 2024 13:21:39 -0700 Subject: net: skbuff: generalize the skb->decrypted bit The ->decrypted bit can be reused for other crypto protocols. Remove the direct dependency on TLS, add helpers to clean up the ifdefs leaking out everywhere. Signed-off-by: Jakub Kicinski Reviewed-by: David Ahern Signed-off-by: David S. Miller --- include/linux/skbuff.h | 15 ++++++++++++--- include/net/sock.h | 4 +--- net/Kconfig | 3 +++ net/core/sock.c | 5 ++--- net/ipv4/tcp_input.c | 12 +++--------- net/ipv4/tcp_ipv4.c | 4 +--- net/ipv4/tcp_offload.c | 4 +--- net/tls/Kconfig | 1 + 8 files changed, 24 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 03ea36a82cdd..7dfb906d92f7 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -992,7 +992,7 @@ struct sk_buff { #ifdef CONFIG_NETFILTER_SKIP_EGRESS __u8 nf_skip_egress:1; #endif -#ifdef CONFIG_TLS_DEVICE +#ifdef CONFIG_SKB_DECRYPTED __u8 decrypted:1; #endif __u8 slow_gro:1; @@ -1615,17 +1615,26 @@ static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) static inline int skb_cmp_decrypted(const struct sk_buff *skb1, const struct sk_buff *skb2) { -#ifdef CONFIG_TLS_DEVICE +#ifdef CONFIG_SKB_DECRYPTED return skb2->decrypted - skb1->decrypted; #else return 0; #endif } +static inline bool skb_is_decrypted(const struct sk_buff *skb) +{ +#ifdef CONFIG_SKB_DECRYPTED + return skb->decrypted; +#else + return false; +#endif +} + static inline void skb_copy_decrypted(struct sk_buff *to, const struct sk_buff *from) { -#ifdef CONFIG_TLS_DEVICE +#ifdef CONFIG_SKB_DECRYPTED to->decrypted = from->decrypted; #endif } diff --git a/include/net/sock.h b/include/net/sock.h index 2253eefe2848..a495330c5c49 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -2835,12 +2835,10 @@ static inline struct sk_buff *sk_validate_xmit_skb(struct sk_buff *skb, if (sk && sk_fullsock(sk) && sk->sk_validate_xmit_skb) { skb = sk->sk_validate_xmit_skb(sk, dev, skb); -#ifdef CONFIG_TLS_DEVICE - } else if (unlikely(skb->decrypted)) { + } else if (unlikely(skb_is_decrypted(skb))) { pr_warn_ratelimited("unencrypted skb with no associated socket - dropping\n"); kfree_skb(skb); skb = NULL; -#endif } #endif diff --git a/net/Kconfig b/net/Kconfig index 3e57ccf0da27..d5ab791f7afa 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -60,6 +60,9 @@ config NET_XGRESS config NET_REDIRECT bool +config SKB_DECRYPTED + bool + config SKB_EXTENSIONS bool diff --git a/net/core/sock.c b/net/core/sock.c index 5ed411231fc7..fe9195186c13 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2526,13 +2526,12 @@ EXPORT_SYMBOL(skb_set_owner_w); static bool can_skb_orphan_partial(const struct sk_buff *skb) { -#ifdef CONFIG_TLS_DEVICE /* Drivers depend on in-order delivery for crypto offload, * partial orphan breaks out-of-order-OK logic. */ - if (skb->decrypted) + if (skb_is_decrypted(skb)) return false; -#endif + return (skb->destructor == sock_wfree || (IS_ENABLED(CONFIG_INET) && skb->destructor == tcp_wfree)); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8d44ab5671ea..1f28a2561795 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4805,10 +4805,8 @@ static bool tcp_try_coalesce(struct sock *sk, if (!mptcp_skb_can_collapse(to, from)) return false; -#ifdef CONFIG_TLS_DEVICE - if (from->decrypted != to->decrypted) + if (skb_cmp_decrypted(from, to)) return false; -#endif if (!skb_try_coalesce(to, from, fragstolen, &delta)) return false; @@ -5377,9 +5375,7 @@ restart: break; memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); -#ifdef CONFIG_TLS_DEVICE - nskb->decrypted = skb->decrypted; -#endif + skb_copy_decrypted(nskb, skb); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start; if (list) __skb_queue_before(list, skb, nskb); @@ -5409,10 +5405,8 @@ restart: !mptcp_skb_can_collapse(nskb, skb) || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) goto end; -#ifdef CONFIG_TLS_DEVICE - if (skb->decrypted != nskb->decrypted) + if (skb_cmp_decrypted(skb, nskb)) goto end; -#endif } } } diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 56b75efcfd12..52963c3bb8ca 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -2044,10 +2044,8 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || ((TCP_SKB_CB(tail)->tcp_flags ^ TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || -#ifdef CONFIG_TLS_DEVICE - tail->decrypted != skb->decrypted || -#endif !mptcp_skb_can_collapse(tail, skb) || + skb_cmp_decrypted(tail, skb) || thtail->doff != th->doff || memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) goto no_coalesce; diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index ebe4722bb020..fab0973f995b 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -265,9 +265,7 @@ found: flush |= (len - 1) >= mss; flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); -#ifdef CONFIG_TLS_DEVICE - flush |= p->decrypted ^ skb->decrypted; -#endif + flush |= skb_cmp_decrypted(p, skb); if (flush || skb_gro_receive(p, skb)) { mss = 1; diff --git a/net/tls/Kconfig b/net/tls/Kconfig index 0cdc1f7b6b08..ce8d56a19187 100644 --- a/net/tls/Kconfig +++ b/net/tls/Kconfig @@ -20,6 +20,7 @@ config TLS config TLS_DEVICE bool "Transport Layer Security HW offload" depends on TLS + select SKB_DECRYPTED select SOCK_VALIDATE_XMIT select SOCK_RX_QUEUE_MAPPING default n -- cgit From 6916e461e7933d3d003441291c543938f2ccb371 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 4 Apr 2024 11:29:51 +0200 Subject: net: phy: Introduce ethernet link topology representation Link topologies containing multiple network PHYs attached to the same net_device can be found when using a PHY as a media converter for use with an SFP connector, on which an SFP transceiver containing a PHY can be used. With the current model, the transceiver's PHY can't be used for operations such as cable testing, timestamping, macsec offload, etc. The reason being that most of the logic for these configuration, coming from either ethtool netlink or ioctls tend to use netdev->phydev, which in multi-phy systems will reference the PHY closest to the MAC. Introduce a numbering scheme allowing to enumerate PHY devices that belong to any netdev, which can in turn allow userspace to take more precise decisions with regard to each PHY's configuration. The numbering is maintained per-netdev, in a phy_device_list. The numbering works similarly to a netdevice's ifindex, with identifiers that are only recycled once INT_MAX has been reached. This prevents races that could occur between PHY listing and SFP transceiver removal/insertion. The identifiers are assigned at phy_attach time, as the numbering depends on the netdevice the phy is attached to. The PHY index can be re-used for PHYs that are persistent. Signed-off-by: Maxime Chevallier Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- MAINTAINERS | 2 + drivers/net/phy/Makefile | 2 +- drivers/net/phy/phy_device.c | 7 +++ drivers/net/phy/phy_link_topology.c | 105 +++++++++++++++++++++++++++++++++ include/linux/netdevice.h | 4 +- include/linux/phy.h | 4 ++ include/linux/phy_link_topology.h | 72 ++++++++++++++++++++++ include/linux/phy_link_topology_core.h | 25 ++++++++ include/uapi/linux/ethtool.h | 16 +++++ net/core/dev.c | 9 +++ 10 files changed, 244 insertions(+), 2 deletions(-) create mode 100644 drivers/net/phy/phy_link_topology.c create mode 100644 include/linux/phy_link_topology.h create mode 100644 include/linux/phy_link_topology_core.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index 046598b471b7..4745ea94d463 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8015,6 +8015,8 @@ F: include/linux/mii.h F: include/linux/of_net.h F: include/linux/phy.h F: include/linux/phy_fixed.h +F: include/linux/phy_link_topology.h +F: include/linux/phy_link_topology_core.h F: include/linux/phylib_stubs.h F: include/linux/platform_data/mdio-bcm-unimac.h F: include/linux/platform_data/mdio-gpio.h diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 202ed7f450da..1d8be374915f 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -2,7 +2,7 @@ # Makefile for Linux PHY drivers libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \ - linkmode.o + linkmode.o phy_link_topology.o mdio-bus-y += mdio_bus.o mdio_device.o ifdef CONFIG_MDIO_DEVICE diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 6c6ec9475709..452fc8b3406d 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -1511,6 +1512,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (phydev->sfp_bus_attached) dev->sfp_bus = phydev->sfp_bus; + + err = phy_link_topo_add_phy(dev->link_topo, phydev, + PHY_UPSTREAM_MAC, dev); + if (err) + goto error; } /* Some Ethernet drivers try to connect to a PHY device before @@ -1938,6 +1944,7 @@ void phy_detach(struct phy_device *phydev) if (dev) { phydev->attached_dev->phydev = NULL; phydev->attached_dev = NULL; + phy_link_topo_del_phy(dev->link_topo, phydev); } phydev->phylink = NULL; diff --git a/drivers/net/phy/phy_link_topology.c b/drivers/net/phy/phy_link_topology.c new file mode 100644 index 000000000000..985941c5c558 --- /dev/null +++ b/drivers/net/phy/phy_link_topology.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Infrastructure to handle all PHY devices connected to a given netdev, + * either directly or indirectly attached. + * + * Copyright (c) 2023 Maxime Chevallier + */ + +#include +#include +#include +#include +#include + +struct phy_link_topology *phy_link_topo_create(struct net_device *dev) +{ + struct phy_link_topology *topo; + + topo = kzalloc(sizeof(*topo), GFP_KERNEL); + if (!topo) + return ERR_PTR(-ENOMEM); + + xa_init_flags(&topo->phys, XA_FLAGS_ALLOC1); + topo->next_phy_index = 1; + + return topo; +} + +void phy_link_topo_destroy(struct phy_link_topology *topo) +{ + if (!topo) + return; + + xa_destroy(&topo->phys); + kfree(topo); +} + +int phy_link_topo_add_phy(struct phy_link_topology *topo, + struct phy_device *phy, + enum phy_upstream upt, void *upstream) +{ + struct phy_device_node *pdn; + int ret; + + pdn = kzalloc(sizeof(*pdn), GFP_KERNEL); + if (!pdn) + return -ENOMEM; + + pdn->phy = phy; + switch (upt) { + case PHY_UPSTREAM_MAC: + pdn->upstream.netdev = (struct net_device *)upstream; + if (phy_on_sfp(phy)) + pdn->parent_sfp_bus = pdn->upstream.netdev->sfp_bus; + break; + case PHY_UPSTREAM_PHY: + pdn->upstream.phydev = (struct phy_device *)upstream; + if (phy_on_sfp(phy)) + pdn->parent_sfp_bus = pdn->upstream.phydev->sfp_bus; + break; + default: + ret = -EINVAL; + goto err; + } + pdn->upstream_type = upt; + + /* Attempt to re-use a previously allocated phy_index */ + if (phy->phyindex) { + ret = xa_insert(&topo->phys, phy->phyindex, pdn, GFP_KERNEL); + + /* Errors could be either -ENOMEM or -EBUSY. If the phy has an + * index, and there's another entry at the same index, this is + * unexpected and we still error-out + */ + if (ret) + goto err; + return 0; + } + + ret = xa_alloc_cyclic(&topo->phys, &phy->phyindex, pdn, xa_limit_32b, + &topo->next_phy_index, GFP_KERNEL); + if (ret) + goto err; + + return 0; + +err: + kfree(pdn); + return ret; +} +EXPORT_SYMBOL_GPL(phy_link_topo_add_phy); + +void phy_link_topo_del_phy(struct phy_link_topology *topo, + struct phy_device *phy) +{ + struct phy_device_node *pdn = xa_erase(&topo->phys, phy->phyindex); + + /* We delete the PHY from the topology, however we don't re-set the + * phy->phyindex field. If the PHY isn't gone, we can re-assign it the + * same index next time it's added back to the topology + */ + + kfree(pdn); +} +EXPORT_SYMBOL_GPL(phy_link_topo_del_phy); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 0c198620ac93..d45f330d083d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -40,7 +40,6 @@ #include #endif #include - #include #include #include @@ -52,6 +51,7 @@ #include #include #include +#include struct netpoll_info; struct device; @@ -1974,6 +1974,7 @@ enum netdev_reg_state { * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp * * @priomap: XXX: need comments on this one + * @link_topo: Physical link topology tracking attached PHYs * @phydev: Physical device may attach itself * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. @@ -2364,6 +2365,7 @@ struct net_device { #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) struct netprio_map __rcu *priomap; #endif + struct phy_link_topology *link_topo; struct phy_device *phydev; struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; diff --git a/include/linux/phy.h b/include/linux/phy.h index e6e83304558e..8c848c79b1fd 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -550,6 +550,9 @@ struct macsec_ops; * @drv: Pointer to the driver for this PHY instance * @devlink: Create a link between phy dev and mac dev, if the external phy * used by current mac interface is managed by another mac interface. + * @phyindex: Unique id across the phy's parent tree of phys to address the PHY + * from userspace, similar to ifindex. A zero index means the PHY + * wasn't assigned an id yet. * @phy_id: UID for this device found during discovery * @c45_ids: 802.3-c45 Device Identifiers if is_c45. * @is_c45: Set to true if this PHY uses clause 45 addressing. @@ -650,6 +653,7 @@ struct phy_device { struct device_link *devlink; + u32 phyindex; u32 phy_id; struct phy_c45_device_ids c45_ids; diff --git a/include/linux/phy_link_topology.h b/include/linux/phy_link_topology.h new file mode 100644 index 000000000000..6b79feb607e7 --- /dev/null +++ b/include/linux/phy_link_topology.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PHY device list allow maintaining a list of PHY devices that are + * part of a netdevice's link topology. PHYs can for example be chained, + * as is the case when using a PHY that exposes an SFP module, on which an + * SFP transceiver that embeds a PHY is connected. + * + * This list can then be used by userspace to leverage individual PHY + * capabilities. + */ +#ifndef __PHY_LINK_TOPOLOGY_H +#define __PHY_LINK_TOPOLOGY_H + +#include +#include + +struct xarray; +struct phy_device; +struct net_device; +struct sfp_bus; + +struct phy_device_node { + enum phy_upstream upstream_type; + + union { + struct net_device *netdev; + struct phy_device *phydev; + } upstream; + + struct sfp_bus *parent_sfp_bus; + + struct phy_device *phy; +}; + +struct phy_link_topology { + struct xarray phys; + u32 next_phy_index; +}; + +static inline struct phy_device * +phy_link_topo_get_phy(struct phy_link_topology *topo, u32 phyindex) +{ + struct phy_device_node *pdn = xa_load(&topo->phys, phyindex); + + if (pdn) + return pdn->phy; + + return NULL; +} + +#if IS_REACHABLE(CONFIG_PHYLIB) +int phy_link_topo_add_phy(struct phy_link_topology *topo, + struct phy_device *phy, + enum phy_upstream upt, void *upstream); + +void phy_link_topo_del_phy(struct phy_link_topology *lt, struct phy_device *phy); + +#else +static inline int phy_link_topo_add_phy(struct phy_link_topology *topo, + struct phy_device *phy, + enum phy_upstream upt, void *upstream) +{ + return 0; +} + +static inline void phy_link_topo_del_phy(struct phy_link_topology *topo, + struct phy_device *phy) +{ +} +#endif + +#endif /* __PHY_LINK_TOPOLOGY_H */ diff --git a/include/linux/phy_link_topology_core.h b/include/linux/phy_link_topology_core.h new file mode 100644 index 000000000000..0a6479055745 --- /dev/null +++ b/include/linux/phy_link_topology_core.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PHY_LINK_TOPOLOGY_CORE_H +#define __PHY_LINK_TOPOLOGY_CORE_H + +struct phy_link_topology; + +#if IS_REACHABLE(CONFIG_PHYLIB) + +struct phy_link_topology *phy_link_topo_create(struct net_device *dev); +void phy_link_topo_destroy(struct phy_link_topology *topo); + +#else + +static inline struct phy_link_topology *phy_link_topo_create(struct net_device *dev) +{ + return NULL; +} + +static inline void phy_link_topo_destroy(struct phy_link_topology *topo) +{ +} + +#endif + +#endif /* __PHY_LINK_TOPOLOGY_CORE_H */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 11fc18988bc2..95c2f09f0d0a 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -2268,4 +2268,20 @@ struct ethtool_link_settings { * __u32 map_lp_advertising[link_mode_masks_nwords]; */ }; + +/** + * enum phy_upstream - Represents the upstream component a given PHY device + * is connected to, as in what is on the other end of the MII bus. Most PHYs + * will be attached to an Ethernet MAC controller, but in some cases, there's + * an intermediate PHY used as a media-converter, which will driver another + * MII interface as its output. + * @PHY_UPSTREAM_MAC: Upstream component is a MAC (a switch port, + * or ethernet controller) + * @PHY_UPSTREAM_PHY: Upstream component is a PHY (likely a media converter) + */ +enum phy_upstream { + PHY_UPSTREAM_MAC, + PHY_UPSTREAM_PHY, +}; + #endif /* _UAPI_LINUX_ETHTOOL_H */ diff --git a/net/core/dev.c b/net/core/dev.c index 92f5bddbc2de..854a3a28a8d8 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -158,6 +158,7 @@ #include #include #include +#include #include "dev.h" #include "net-sysfs.h" @@ -10962,6 +10963,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, #ifdef CONFIG_NET_SCHED hash_init(dev->qdisc_hash); #endif + dev->link_topo = phy_link_topo_create(dev); + if (IS_ERR(dev->link_topo)) { + dev->link_topo = NULL; + goto free_all; + } + dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; setup(dev); @@ -11050,6 +11057,8 @@ void free_netdev(struct net_device *dev) free_percpu(dev->xdp_bulkq); dev->xdp_bulkq = NULL; + phy_link_topo_destroy(dev->link_topo); + /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED) { netdev_freemem(dev); -- cgit From 0ec5ed6c130e3906ba4ec82d740444a21504fbbf Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 4 Apr 2024 11:29:52 +0200 Subject: net: sfp: pass the phy_device when disconnecting an sfp module's PHY Pass the phy_device as a parameter to the sfp upstream .disconnect_phy operation. This is preparatory work to help track phy devices across a net_device's link. Signed-off-by: Maxime Chevallier Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/phylink.c | 3 ++- drivers/net/phy/sfp-bus.c | 4 ++-- include/linux/sfp.h | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 84a97088dfc6..0e692a3bcf1a 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -3408,7 +3408,8 @@ static int phylink_sfp_connect_phy(void *upstream, struct phy_device *phy) return ret; } -static void phylink_sfp_disconnect_phy(void *upstream) +static void phylink_sfp_disconnect_phy(void *upstream, + struct phy_device *phydev) { phylink_disconnect_phy(upstream); } diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index db39dec7f247..e05013aeecc3 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -486,7 +486,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) bus->socket_ops->stop(bus->sfp); bus->socket_ops->detach(bus->sfp); if (bus->phydev && ops && ops->disconnect_phy) - ops->disconnect_phy(bus->upstream); + ops->disconnect_phy(bus->upstream, bus->phydev); } bus->registered = false; } @@ -742,7 +742,7 @@ void sfp_remove_phy(struct sfp_bus *bus) const struct sfp_upstream_ops *ops = sfp_get_upstream_ops(bus); if (ops && ops->disconnect_phy) - ops->disconnect_phy(bus->upstream); + ops->disconnect_phy(bus->upstream, bus->phydev); bus->phydev = NULL; } EXPORT_SYMBOL_GPL(sfp_remove_phy); diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 9346cd44814d..0573e53b0c11 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -544,7 +544,7 @@ struct sfp_upstream_ops { void (*link_down)(void *priv); void (*link_up)(void *priv); int (*connect_phy)(void *priv, struct phy_device *); - void (*disconnect_phy)(void *priv); + void (*disconnect_phy)(void *priv, struct phy_device *); }; #if IS_ENABLED(CONFIG_SFP) -- cgit From e75e4e074c4475a3a6145593ecf2dcaf3995fa50 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 4 Apr 2024 11:29:53 +0200 Subject: net: phy: add helpers to handle sfp phy connect/disconnect There are a few PHY drivers that can handle SFP modules through their sfp_upstream_ops. Introduce Phylib helpers to keep track of connected SFP PHYs in a netdevice's namespace, by adding the SFP PHY to the upstream PHY's netdev's namespace. By doing so, these SFP PHYs can be enumerated and exposed to users, which will be able to use their capabilities. Signed-off-by: Maxime Chevallier Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/marvell-88x2222.c | 2 ++ drivers/net/phy/marvell.c | 2 ++ drivers/net/phy/marvell10g.c | 2 ++ drivers/net/phy/phy_device.c | 48 +++++++++++++++++++++++++++++++++++++++ drivers/net/phy/qcom/at803x.c | 2 ++ drivers/net/phy/qcom/qca807x.c | 2 ++ include/linux/phy.h | 2 ++ 7 files changed, 60 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/phy/marvell-88x2222.c b/drivers/net/phy/marvell-88x2222.c index b88398e6872b..0b777cdd7078 100644 --- a/drivers/net/phy/marvell-88x2222.c +++ b/drivers/net/phy/marvell-88x2222.c @@ -553,6 +553,8 @@ static const struct sfp_upstream_ops sfp_phy_ops = { .link_down = mv2222_sfp_link_down, .attach = phy_sfp_attach, .detach = phy_sfp_detach, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int mv2222_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 0003276452f6..860dc4001d41 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -3550,6 +3550,8 @@ static const struct sfp_upstream_ops m88e1510_sfp_ops = { .module_remove = m88e1510_sfp_remove, .attach = phy_sfp_attach, .detach = phy_sfp_detach, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int m88e1510_probe(struct phy_device *phydev) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index ad43e280930c..6642eb642d4b 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -503,6 +503,8 @@ static int mv3310_sfp_insert(void *upstream, const struct sfp_eeprom_id *id) static const struct sfp_upstream_ops mv3310_sfp_ops = { .attach = phy_sfp_attach, .detach = phy_sfp_detach, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, .module_insert = mv3310_sfp_insert, }; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 452fc8b3406d..616bd7ba46cb 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -277,6 +277,14 @@ static void phy_mdio_device_remove(struct mdio_device *mdiodev) static struct phy_driver genphy_driver; +static struct phy_link_topology *phy_get_link_topology(struct phy_device *phydev) +{ + if (phydev->attached_dev) + return phydev->attached_dev->link_topo; + + return NULL; +} + static LIST_HEAD(phy_fixup_list); static DEFINE_MUTEX(phy_fixup_lock); @@ -1370,6 +1378,46 @@ phy_standalone_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(phy_standalone); +/** + * phy_sfp_connect_phy - Connect the SFP module's PHY to the upstream PHY + * @upstream: pointer to the upstream phy device + * @phy: pointer to the SFP module's phy device + * + * This helper allows keeping track of PHY devices on the link. It adds the + * SFP module's phy to the phy namespace of the upstream phy + */ +int phy_sfp_connect_phy(void *upstream, struct phy_device *phy) +{ + struct phy_device *phydev = upstream; + struct phy_link_topology *topo = phy_get_link_topology(phydev); + + if (topo) + return phy_link_topo_add_phy(topo, phy, PHY_UPSTREAM_PHY, phydev); + + return 0; +} +EXPORT_SYMBOL(phy_sfp_connect_phy); + +/** + * phy_sfp_disconnect_phy - Disconnect the SFP module's PHY from the upstream PHY + * @upstream: pointer to the upstream phy device + * @phy: pointer to the SFP module's phy device + * + * This helper allows keeping track of PHY devices on the link. It removes the + * SFP module's phy to the phy namespace of the upstream phy. As the module phy + * will be destroyed, re-inserting the same module will add a new phy with a + * new index. + */ +void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy) +{ + struct phy_device *phydev = upstream; + struct phy_link_topology *topo = phy_get_link_topology(phydev); + + if (topo) + phy_link_topo_del_phy(topo, phy); +} +EXPORT_SYMBOL(phy_sfp_disconnect_phy); + /** * phy_sfp_attach - attach the SFP bus to the PHY upstream network device * @upstream: pointer to the phy device diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c index c8f83e5f78ab..105602581a03 100644 --- a/drivers/net/phy/qcom/at803x.c +++ b/drivers/net/phy/qcom/at803x.c @@ -770,6 +770,8 @@ static const struct sfp_upstream_ops at8031_sfp_ops = { .attach = phy_sfp_attach, .detach = phy_sfp_detach, .module_insert = at8031_sfp_insert, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int at8031_parse_dt(struct phy_device *phydev) diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c index 672c6929119a..5eb0ab1cb70e 100644 --- a/drivers/net/phy/qcom/qca807x.c +++ b/drivers/net/phy/qcom/qca807x.c @@ -699,6 +699,8 @@ static const struct sfp_upstream_ops qca807x_sfp_ops = { .detach = phy_sfp_detach, .module_insert = qca807x_sfp_insert, .module_remove = qca807x_sfp_remove, + .connect_phy = phy_sfp_connect_phy, + .disconnect_phy = phy_sfp_disconnect_phy, }; static int qca807x_probe(struct phy_device *phydev) diff --git a/include/linux/phy.h b/include/linux/phy.h index 8c848c79b1fd..3ddfe7fe781a 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1758,6 +1758,8 @@ int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); int __phy_resume(struct phy_device *phydev); int phy_loopback(struct phy_device *phydev, bool enable); +int phy_sfp_connect_phy(void *upstream, struct phy_device *phy); +void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy); void phy_sfp_attach(void *upstream, struct sfp_bus *bus); void phy_sfp_detach(void *upstream, struct sfp_bus *bus); int phy_sfp_probe(struct phy_device *phydev, -- cgit From fdd353965b52de7580795824de7010a73ff98f30 Mon Sep 17 00:00:00 2001 From: Maxime Chevallier Date: Thu, 4 Apr 2024 11:29:54 +0200 Subject: net: sfp: Add helper to return the SFP bus name Knowing the bus name is helpful when we want to expose the link topology to userspace, add a helper to return the SFP bus name. Signed-off-by: Maxime Chevallier Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/phy/sfp-bus.c | 11 +++++++++++ include/linux/sfp.h | 6 ++++++ 2 files changed, 17 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index e05013aeecc3..413021619afe 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -859,3 +859,14 @@ void sfp_unregister_socket(struct sfp_bus *bus) sfp_bus_put(bus); } EXPORT_SYMBOL_GPL(sfp_unregister_socket); + +const char *sfp_get_name(struct sfp_bus *bus) +{ + ASSERT_RTNL(); + + if (bus->sfp_dev) + return dev_name(bus->sfp_dev); + + return NULL; +} +EXPORT_SYMBOL_GPL(sfp_get_name); diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 0573e53b0c11..55c0ab17c9e2 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -570,6 +570,7 @@ struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode); int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream, const struct sfp_upstream_ops *ops); void sfp_bus_del_upstream(struct sfp_bus *bus); +const char *sfp_get_name(struct sfp_bus *bus); #else static inline int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, @@ -648,6 +649,11 @@ static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream, static inline void sfp_bus_del_upstream(struct sfp_bus *bus) { } + +static inline const char *sfp_get_name(struct sfp_bus *bus) +{ + return NULL; +} #endif #endif -- cgit From e82051193a171f393d2a165a7ce18d8a2e2b4837 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 7 Apr 2024 02:42:36 -0400 Subject: new helper: copy_to_iter_full() ... and convert copy_linear_skb() to using that. Signed-off-by: Al Viro --- include/linux/uio.h | 10 ++++++++++ include/net/udp.h | 9 +-------- 2 files changed, 11 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/uio.h b/include/linux/uio.h index 00cebe2b70de..7020adedfa08 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -205,6 +205,16 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) return 0; } +static __always_inline __must_check +bool copy_to_iter_full(const void *addr, size_t bytes, struct iov_iter *i) +{ + size_t copied = copy_to_iter(addr, bytes, i); + if (likely(copied == bytes)) + return true; + iov_iter_revert(i, copied); + return false; +} + static __always_inline __must_check bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) { diff --git a/include/net/udp.h b/include/net/udp.h index 488a6d2babcc..c4e05b14b648 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -379,14 +379,7 @@ static inline bool udp_skb_is_linear(struct sk_buff *skb) static inline int copy_linear_skb(struct sk_buff *skb, int len, int off, struct iov_iter *to) { - int n; - - n = copy_to_iter(skb->data + off, len, to); - if (n == len) - return 0; - - iov_iter_revert(to, n); - return -EFAULT; + return copy_to_iter_full(skb->data + off, len, to) ? 0 : -EFAULT; } /* -- cgit From 210a03c9d51aa0e6e6f06980116e3256da8d4c48 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Thu, 28 Mar 2024 13:27:24 +0100 Subject: fs: claw back a few FMODE_* bits There's a bunch of flags that are purely based on what the file operations support while also never being conditionally set or unset. IOW, they're not subject to change for individual files. Imho, such flags don't need to live in f_mode they might as well live in the fops structs itself. And the fops struct already has that lonely mmap_supported_flags member. We might as well turn that into a generic fop_flags member and move a few flags from FMODE_* space into FOP_* space. That gets us four FMODE_* bits back and the ability for new static flags that are about file ops to not have to live in FMODE_* space but in their own FOP_* space. It's not the most beautiful thing ever but it gets the job done. Yes, there'll be an additional pointer chase but hopefully that won't matter for these flags. I suspect there's a few more we can move into there and that we can also redirect a bunch of new flag suggestions that follow this pattern into the fop_flags field instead of f_mode. Link: https://lore.kernel.org/r/20240328-gewendet-spargel-aa60a030ef74@brauner Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Reviewed-by: Jens Axboe Signed-off-by: Christian Brauner --- block/bdev.c | 2 +- block/fops.c | 1 + drivers/dax/device.c | 2 +- fs/btrfs/file.c | 4 ++-- fs/ext4/file.c | 6 +++--- fs/f2fs/file.c | 3 ++- fs/read_write.c | 2 +- fs/xfs/xfs_file.c | 8 +++++--- include/linux/fs.h | 22 ++++++++++++---------- io_uring/io_uring.c | 2 +- io_uring/rw.c | 9 +++++---- mm/mmap.c | 4 +++- 12 files changed, 37 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index 7a5f611c3d2e..1322dfe32c5d 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -904,7 +904,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, disk_unblock_events(disk); bdev_file->f_flags |= O_LARGEFILE; - bdev_file->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT; + bdev_file->f_mode |= FMODE_CAN_ODIRECT; if (bdev_nowait(bdev)) bdev_file->f_mode |= FMODE_NOWAIT; bdev_file->f_mapping = bdev->bd_inode->i_mapping; diff --git a/block/fops.c b/block/fops.c index 679d9b752fe8..af6c244314af 100644 --- a/block/fops.c +++ b/block/fops.c @@ -863,6 +863,7 @@ const struct file_operations def_blk_fops = { .splice_read = filemap_splice_read, .splice_write = iter_file_splice_write, .fallocate = blkdev_fallocate, + .fop_flags = FOP_BUFFER_RASYNC, }; static __init int blkdev_init(void) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 93ebedc5ec8c..c24ef4d3cf31 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -377,7 +377,7 @@ static const struct file_operations dax_fops = { .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, - .mmap_supported_flags = MAP_SYNC, + .fop_flags = FOP_MMAP_SYNC, }; static void dev_dax_cdev_del(void *cdev) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f9d76072398d..1640c46f2153 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3719,8 +3719,7 @@ static int btrfs_file_open(struct inode *inode, struct file *filp) { int ret; - filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC | - FMODE_CAN_ODIRECT; + filp->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; ret = fsverity_file_open(inode, filp); if (ret) @@ -3850,6 +3849,7 @@ const struct file_operations btrfs_file_operations = { .compat_ioctl = btrfs_compat_ioctl, #endif .remap_file_range = btrfs_remap_file_range, + .fop_flags = FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC, }; int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end) diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 54d6ff22585c..28c51b0cc4db 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -885,8 +885,7 @@ static int ext4_file_open(struct inode *inode, struct file *filp) return ret; } - filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | - FMODE_DIO_PARALLEL_WRITE; + filp->f_mode |= FMODE_NOWAIT; return dquot_file_open(inode, filp); } @@ -938,7 +937,6 @@ const struct file_operations ext4_file_operations = { .compat_ioctl = ext4_compat_ioctl, #endif .mmap = ext4_file_mmap, - .mmap_supported_flags = MAP_SYNC, .open = ext4_file_open, .release = ext4_release_file, .fsync = ext4_sync_file, @@ -946,6 +944,8 @@ const struct file_operations ext4_file_operations = { .splice_read = ext4_file_splice_read, .splice_write = iter_file_splice_write, .fallocate = ext4_fallocate, + .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC | + FOP_DIO_PARALLEL_WRITE, }; const struct inode_operations ext4_file_inode_operations = { diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 1761ad125f97..2b65e09822d4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -569,7 +569,7 @@ static int f2fs_file_open(struct inode *inode, struct file *filp) if (err) return err; - filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; + filp->f_mode |= FMODE_NOWAIT; filp->f_mode |= FMODE_CAN_ODIRECT; return dquot_file_open(inode, filp); @@ -5045,4 +5045,5 @@ const struct file_operations f2fs_file_operations = { .splice_read = f2fs_file_splice_read, .splice_write = iter_file_splice_write, .fadvise = f2fs_file_fadvise, + .fop_flags = FOP_BUFFER_RASYNC, }; diff --git a/fs/read_write.c b/fs/read_write.c index d4c036e82b6c..2115d1f40bd5 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1685,7 +1685,7 @@ int generic_write_checks_count(struct kiocb *iocb, loff_t *count) if ((iocb->ki_flags & IOCB_NOWAIT) && !((iocb->ki_flags & IOCB_DIRECT) || - (file->f_mode & FMODE_BUF_WASYNC))) + (file->f_op->fop_flags & FOP_BUFFER_WASYNC))) return -EINVAL; return generic_write_check_limits(iocb->ki_filp, iocb->ki_pos, count); diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 632653e00906..147439ad3581 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -1230,8 +1230,7 @@ xfs_file_open( { if (xfs_is_shutdown(XFS_M(inode->i_sb))) return -EIO; - file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC | FMODE_BUF_WASYNC | - FMODE_DIO_PARALLEL_WRITE | FMODE_CAN_ODIRECT; + file->f_mode |= FMODE_NOWAIT | FMODE_CAN_ODIRECT; return generic_file_open(inode, file); } @@ -1490,7 +1489,6 @@ const struct file_operations xfs_file_operations = { .compat_ioctl = xfs_file_compat_ioctl, #endif .mmap = xfs_file_mmap, - .mmap_supported_flags = MAP_SYNC, .open = xfs_file_open, .release = xfs_file_release, .fsync = xfs_file_fsync, @@ -1498,6 +1496,8 @@ const struct file_operations xfs_file_operations = { .fallocate = xfs_file_fallocate, .fadvise = xfs_file_fadvise, .remap_file_range = xfs_file_remap_range, + .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC | + FOP_DIO_PARALLEL_WRITE, }; const struct file_operations xfs_dir_file_operations = { @@ -1510,4 +1510,6 @@ const struct file_operations xfs_dir_file_operations = { .compat_ioctl = xfs_file_compat_ioctl, #endif .fsync = xfs_dir_fsync, + .fop_flags = FOP_MMAP_SYNC | FOP_BUFFER_RASYNC | FOP_BUFFER_WASYNC | + FOP_DIO_PARALLEL_WRITE, }; diff --git a/include/linux/fs.h b/include/linux/fs.h index 914245085597..53cddf0131ca 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -163,9 +163,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_NOREUSE ((__force fmode_t)0x800000) -/* File supports non-exclusive O_DIRECT writes from multiple threads */ -#define FMODE_DIO_PARALLEL_WRITE ((__force fmode_t)0x1000000) - /* File is embedded in backing_file object */ #define FMODE_BACKING ((__force fmode_t)0x2000000) @@ -181,12 +178,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File does not contribute to nr_files count */ #define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) -/* File supports async buffered reads */ -#define FMODE_BUF_RASYNC ((__force fmode_t)0x40000000) - -/* File supports async nowait buffered writes */ -#define FMODE_BUF_WASYNC ((__force fmode_t)0x80000000) - /* * Attribute flags. These should be or-ed together to figure out what * has been changed! @@ -2001,8 +1992,11 @@ struct iov_iter; struct io_uring_cmd; struct offset_ctx; +typedef unsigned int __bitwise fop_flags_t; + struct file_operations { struct module *owner; + fop_flags_t fop_flags; loff_t (*llseek) (struct file *, loff_t, int); ssize_t (*read) (struct file *, char __user *, size_t, loff_t *); ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); @@ -2015,7 +2009,6 @@ struct file_operations { long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); long (*compat_ioctl) (struct file *, unsigned int, unsigned long); int (*mmap) (struct file *, struct vm_area_struct *); - unsigned long mmap_supported_flags; int (*open) (struct inode *, struct file *); int (*flush) (struct file *, fl_owner_t id); int (*release) (struct inode *, struct file *); @@ -2046,6 +2039,15 @@ struct file_operations { unsigned int poll_flags); } __randomize_layout; +/* Supports async buffered reads */ +#define FOP_BUFFER_RASYNC ((__force fop_flags_t)(1 << 0)) +/* Supports async buffered writes */ +#define FOP_BUFFER_WASYNC ((__force fop_flags_t)(1 << 1)) +/* Supports synchronous page faults for mappings */ +#define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2)) +/* Supports non-exclusive O_DIRECT writes from multiple threads */ +#define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3)) + /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, int (*) (struct file *, struct dir_context *)); diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 5d4b448fdc50..d73c9ad2d2f8 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -471,7 +471,7 @@ static void io_prep_async_work(struct io_kiocb *req) /* don't serialize this request if the fs doesn't need it */ if (should_hash && (req->file->f_flags & O_DIRECT) && - (req->file->f_mode & FMODE_DIO_PARALLEL_WRITE)) + (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE)) should_hash = false; if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); diff --git a/io_uring/rw.c b/io_uring/rw.c index 0585ebcc9773..d9dfde1142a1 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -683,7 +683,8 @@ static bool io_rw_should_retry(struct io_kiocb *req) * just use poll if we can, and don't attempt if the fs doesn't * support callback based unlocks */ - if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC)) + if (io_file_can_poll(req) || + !(req->file->f_op->fop_flags & FOP_BUFFER_RASYNC)) return false; wait->wait.func = io_async_buf_func; @@ -1022,10 +1023,10 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(!io_file_supports_nowait(req))) goto copy_iov; - /* File path supports NOWAIT for non-direct_IO only for block devices. */ + /* Check if we can support NOWAIT. */ if (!(kiocb->ki_flags & IOCB_DIRECT) && - !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) && - (req->flags & REQ_F_ISREG)) + !(req->file->f_op->fop_flags & FOP_BUFFER_WASYNC) && + (req->flags & REQ_F_ISREG)) goto copy_iov; kiocb->ki_flags |= IOCB_NOWAIT; diff --git a/mm/mmap.c b/mm/mmap.c index 6dbda99a47da..3490af70f259 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1294,7 +1294,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; - flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; + flags_mask = LEGACY_MAP_MASK; + if (file->f_op->fop_flags & FOP_MMAP_SYNC) + flags_mask |= MAP_SYNC; switch (flags & MAP_TYPE) { case MAP_SHARED: -- cgit From 0a4f544d83990e4b7326020a802d64956fa366de Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Sun, 7 Apr 2024 15:16:55 +0200 Subject: fs: use bit shifts for FMODE_* flags Make it easier to see what bits are still available. Link: https://lore.kernel.org/r/20240406061604.GA538574@ZenIV Signed-off-by: Christian Brauner --- include/linux/fs.h | 57 +++++++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 53cddf0131ca..2ba98930c4c4 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -110,21 +110,24 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, */ /* file is open for reading */ -#define FMODE_READ ((__force fmode_t)0x1) +#define FMODE_READ ((__force fmode_t)(1 << 0)) /* file is open for writing */ -#define FMODE_WRITE ((__force fmode_t)0x2) +#define FMODE_WRITE ((__force fmode_t)(1 << 1)) /* file is seekable */ -#define FMODE_LSEEK ((__force fmode_t)0x4) +#define FMODE_LSEEK ((__force fmode_t)(1 << 2)) /* file can be accessed using pread */ -#define FMODE_PREAD ((__force fmode_t)0x8) +#define FMODE_PREAD ((__force fmode_t)(1 << 3)) /* file can be accessed using pwrite */ -#define FMODE_PWRITE ((__force fmode_t)0x10) +#define FMODE_PWRITE ((__force fmode_t)(1 << 4)) /* File is opened for execution with sys_execve / sys_uselib */ -#define FMODE_EXEC ((__force fmode_t)0x20) +#define FMODE_EXEC ((__force fmode_t)(1 << 5)) + +/* FMODE_* bits 6 to 8 */ + /* 32bit hashes as llseek() offset (for directories) */ -#define FMODE_32BITHASH ((__force fmode_t)0x200) +#define FMODE_32BITHASH ((__force fmode_t)(1 << 9)) /* 64bit hashes as llseek() offset (for directories) */ -#define FMODE_64BITHASH ((__force fmode_t)0x400) +#define FMODE_64BITHASH ((__force fmode_t)(1 << 10)) /* * Don't update ctime and mtime. @@ -132,51 +135,53 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, * Currently a special hack for the XFS open_by_handle ioctl, but we'll * hopefully graduate it to a proper O_CMTIME flag supported by open(2) soon. */ -#define FMODE_NOCMTIME ((__force fmode_t)0x800) +#define FMODE_NOCMTIME ((__force fmode_t)(1 << 11)) /* Expect random access pattern */ -#define FMODE_RANDOM ((__force fmode_t)0x1000) +#define FMODE_RANDOM ((__force fmode_t)(1 << 12)) /* File is huge (eg. /dev/mem): treat loff_t as unsigned */ -#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)0x2000) +#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13)) /* File is opened with O_PATH; almost nothing can be done with it */ -#define FMODE_PATH ((__force fmode_t)0x4000) +#define FMODE_PATH ((__force fmode_t)(1 << 14)) /* File needs atomic accesses to f_pos */ -#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) +#define FMODE_ATOMIC_POS ((__force fmode_t)(1 << 15)) /* Write access to underlying fs */ -#define FMODE_WRITER ((__force fmode_t)0x10000) +#define FMODE_WRITER ((__force fmode_t)(1 << 16)) /* Has read method(s) */ -#define FMODE_CAN_READ ((__force fmode_t)0x20000) +#define FMODE_CAN_READ ((__force fmode_t)(1 << 17)) /* Has write method(s) */ -#define FMODE_CAN_WRITE ((__force fmode_t)0x40000) +#define FMODE_CAN_WRITE ((__force fmode_t)(1 << 18)) -#define FMODE_OPENED ((__force fmode_t)0x80000) -#define FMODE_CREATED ((__force fmode_t)0x100000) +#define FMODE_OPENED ((__force fmode_t)(1 << 19)) +#define FMODE_CREATED ((__force fmode_t)(1 << 20)) /* File is stream-like */ -#define FMODE_STREAM ((__force fmode_t)0x200000) +#define FMODE_STREAM ((__force fmode_t)(1 << 21)) /* File supports DIRECT IO */ -#define FMODE_CAN_ODIRECT ((__force fmode_t)0x400000) +#define FMODE_CAN_ODIRECT ((__force fmode_t)(1 << 22)) + +#define FMODE_NOREUSE ((__force fmode_t)(1 << 23)) -#define FMODE_NOREUSE ((__force fmode_t)0x800000) +/* FMODE_* bit 24 */ /* File is embedded in backing_file object */ -#define FMODE_BACKING ((__force fmode_t)0x2000000) +#define FMODE_BACKING ((__force fmode_t)(1 << 25)) /* File was opened by fanotify and shouldn't generate fanotify events */ -#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) +#define FMODE_NONOTIFY ((__force fmode_t)(1 << 26)) /* File is capable of returning -EAGAIN if I/O will block */ -#define FMODE_NOWAIT ((__force fmode_t)0x8000000) +#define FMODE_NOWAIT ((__force fmode_t)(1 << 27)) /* File represents mount that needs unmounting */ -#define FMODE_NEED_UNMOUNT ((__force fmode_t)0x10000000) +#define FMODE_NEED_UNMOUNT ((__force fmode_t)(1 << 28)) /* File does not contribute to nr_files count */ -#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000) +#define FMODE_NOACCOUNT ((__force fmode_t)(1 << 29)) /* * Attribute flags. These should be or-ed together to figure out what -- cgit From 6648e613226e18897231ab5e42ffc29e63fa3365 Mon Sep 17 00:00:00 2001 From: Jason Xing Date: Thu, 4 Apr 2024 10:10:01 +0800 Subject: bpf, skmsg: Fix NULL pointer dereference in sk_psock_skb_ingress_enqueue Fix NULL pointer data-races in sk_psock_skb_ingress_enqueue() which syzbot reported [1]. [1] BUG: KCSAN: data-race in sk_psock_drop / sk_psock_skb_ingress_enqueue write to 0xffff88814b3278b8 of 8 bytes by task 10724 on cpu 1: sk_psock_stop_verdict net/core/skmsg.c:1257 [inline] sk_psock_drop+0x13e/0x1f0 net/core/skmsg.c:843 sk_psock_put include/linux/skmsg.h:459 [inline] sock_map_close+0x1a7/0x260 net/core/sock_map.c:1648 unix_release+0x4b/0x80 net/unix/af_unix.c:1048 __sock_release net/socket.c:659 [inline] sock_close+0x68/0x150 net/socket.c:1421 __fput+0x2c1/0x660 fs/file_table.c:422 __fput_sync+0x44/0x60 fs/file_table.c:507 __do_sys_close fs/open.c:1556 [inline] __se_sys_close+0x101/0x1b0 fs/open.c:1541 __x64_sys_close+0x1f/0x30 fs/open.c:1541 do_syscall_64+0xd3/0x1d0 entry_SYSCALL_64_after_hwframe+0x6d/0x75 read to 0xffff88814b3278b8 of 8 bytes by task 10713 on cpu 0: sk_psock_data_ready include/linux/skmsg.h:464 [inline] sk_psock_skb_ingress_enqueue+0x32d/0x390 net/core/skmsg.c:555 sk_psock_skb_ingress_self+0x185/0x1e0 net/core/skmsg.c:606 sk_psock_verdict_apply net/core/skmsg.c:1008 [inline] sk_psock_verdict_recv+0x3e4/0x4a0 net/core/skmsg.c:1202 unix_read_skb net/unix/af_unix.c:2546 [inline] unix_stream_read_skb+0x9e/0xf0 net/unix/af_unix.c:2682 sk_psock_verdict_data_ready+0x77/0x220 net/core/skmsg.c:1223 unix_stream_sendmsg+0x527/0x860 net/unix/af_unix.c:2339 sock_sendmsg_nosec net/socket.c:730 [inline] __sock_sendmsg+0x140/0x180 net/socket.c:745 ____sys_sendmsg+0x312/0x410 net/socket.c:2584 ___sys_sendmsg net/socket.c:2638 [inline] __sys_sendmsg+0x1e9/0x280 net/socket.c:2667 __do_sys_sendmsg net/socket.c:2676 [inline] __se_sys_sendmsg net/socket.c:2674 [inline] __x64_sys_sendmsg+0x46/0x50 net/socket.c:2674 do_syscall_64+0xd3/0x1d0 entry_SYSCALL_64_after_hwframe+0x6d/0x75 value changed: 0xffffffff83d7feb0 -> 0x0000000000000000 Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 10713 Comm: syz-executor.4 Tainted: G W 6.8.0-syzkaller-08951-gfe46a7dd189e #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/29/2024 Prior to this, commit 4cd12c6065df ("bpf, sockmap: Fix NULL pointer dereference in sk_psock_verdict_data_ready()") fixed one NULL pointer similarly due to no protection of saved_data_ready. Here is another different caller causing the same issue because of the same reason. So we should protect it with sk_callback_lock read lock because the writer side in the sk_psock_drop() uses "write_lock_bh(&sk->sk_callback_lock);". To avoid errors that could happen in future, I move those two pairs of lock into the sk_psock_data_ready(), which is suggested by John Fastabend. Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Reported-by: syzbot+aa8c8ec2538929f18f2d@syzkaller.appspotmail.com Signed-off-by: Jason Xing Signed-off-by: Daniel Borkmann Reviewed-by: John Fastabend Closes: https://syzkaller.appspot.com/bug?extid=aa8c8ec2538929f18f2d Link: https://lore.kernel.org/all/20240329134037.92124-1-kerneljasonxing@gmail.com Link: https://lore.kernel.org/bpf/20240404021001.94815-1-kerneljasonxing@gmail.com --- include/linux/skmsg.h | 2 ++ net/core/skmsg.c | 5 +---- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index e65ec3fd2799..a509caf823d6 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -461,10 +461,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock) static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock) { + read_lock_bh(&sk->sk_callback_lock); if (psock->saved_data_ready) psock->saved_data_ready(sk); else sk->sk_data_ready(sk); + read_unlock_bh(&sk->sk_callback_lock); } static inline void psock_set_prog(struct bpf_prog **pprog, diff --git a/net/core/skmsg.c b/net/core/skmsg.c index 4d75ef9d24bf..fd20aae30be2 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1226,11 +1226,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk) rcu_read_lock(); psock = sk_psock(sk); - if (psock) { - read_lock_bh(&sk->sk_callback_lock); + if (psock) sk_psock_data_ready(sk, psock); - read_unlock_bh(&sk->sk_callback_lock); - } rcu_read_unlock(); } } -- cgit From d2c3a7eb1afada8cfb5fde41489913ea5733a319 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 5 Apr 2024 10:29:26 +0000 Subject: tcp: more struct tcp_sock adjustments tp->recvmsg_inq is used from tcp recvmsg() thus should be in tcp_sock_read_rx group. tp->tcp_clock_cache and tp->tcp_mstamp are written both in rx and tx paths, thus are better placed in tcp_sock_write_txrx group. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- include/linux/tcp.h | 6 +++--- net/ipv4/tcp.c | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 55399ee2a57e..6a5e08b937b3 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -244,6 +244,7 @@ struct tcp_sock { /* OOO segments go in this rbtree. Socket lock must be held. */ struct rb_root out_of_order_queue; u32 snd_ssthresh; /* Slow start size threshold */ + u8 recvmsg_inq : 1;/* Indicate # of bytes in queue upon recvmsg */ __cacheline_group_end(tcp_sock_read_rx); /* TX read-write hotpath cache lines */ @@ -266,8 +267,6 @@ struct tcp_sock { u32 mdev_us; /* medium deviation */ u32 rtt_seq; /* sequence number to update rttvar */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ - u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ - u64 tcp_mstamp; /* most recent packet received/sent */ struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ struct sk_buff *highest_sack; /* skb just after the highest * skb with SACKed bit set @@ -284,6 +283,8 @@ struct tcp_sock { * 0x5?10 << 16 + snd_wnd in net byte order */ __be32 pred_flags; + u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ + u64 tcp_mstamp; /* most recent packet received/sent */ u32 rcv_nxt; /* What we want to receive next */ u32 snd_nxt; /* Next sequence we send */ u32 snd_una; /* First byte we want an ack for */ @@ -370,7 +371,6 @@ struct tcp_sock { tlp_retrans:1, /* TLP is a retransmission */ unused:5; u8 thin_lto : 1,/* Use linear timeouts for thin streams */ - recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ fastopen_client_fail:2, /* reason why fastopen failed */ diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 92ee60492314..664c8ecb076b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -4648,16 +4648,16 @@ static void __init tcp_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, lsndtime); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, mdev_us); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_wstamp_ns); - CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_clock_cache); - CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tcp_mstamp); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, rtt_seq); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, tsorted_sent_queue); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, highest_sack); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_tx, ecn_flags); - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 105); + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_tx, 89); /* TXRX read-write hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, pred_flags); + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_clock_cache); + CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, tcp_mstamp); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_nxt); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_nxt); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, snd_una); @@ -4670,7 +4670,7 @@ static void __init tcp_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, app_limited); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rcv_wnd); CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_txrx, rx_opt); - CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 76); + CACHELINE_ASSERT_GROUP_SIZE(struct tcp_sock, tcp_sock_write_txrx, 92); /* RX read-write hotpath cache lines */ CACHELINE_ASSERT_GROUP_MEMBER(struct tcp_sock, tcp_sock_write_rx, bytes_received); -- cgit From 5e5e51422cd189bc1b627f619f0f99324e6e4de9 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 25 Mar 2024 08:40:08 +0200 Subject: math64: Tidy up mul_u64_u32_shr() Put together declaration and initialization of local variables. Suggested-by: Thomas Gleixner Signed-off-by: Adrian Hunter Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240325064023.2997-5-adrian.hunter@intel.com --- include/linux/math64.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/math64.h b/include/linux/math64.h index bf74478926d4..fd13622b2056 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -179,16 +179,12 @@ static __always_inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) #ifndef mul_u64_u32_shr static __always_inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) { - u32 ah, al; + u32 ah = a >> 32, al = a; u64 ret; - al = a; - ah = a >> 32; - ret = mul_u32_u32(al, mul) >> shift; if (ah) ret += mul_u32_u32(ah, mul) << (32 - shift); - return ret; } #endif /* mul_u64_u32_shr */ -- cgit From 1beb35ec615f676d49d68b6dc23c7418ba8ff145 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Mon, 25 Mar 2024 08:40:09 +0200 Subject: vdso, math64: Provide mul_u64_u32_add_u64_shr() Provide mul_u64_u32_add_u64_shr() which is a calculation that will be used by timekeeping and VDSO. Place #include after #include to allow architecture-specific overrides, at least for the kernel. Signed-off-by: Adrian Hunter Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240325064023.2997-6-adrian.hunter@intel.com --- include/linux/math64.h | 2 +- include/vdso/math64.h | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/math64.h b/include/linux/math64.h index fd13622b2056..d34def7f9a8c 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -4,8 +4,8 @@ #include #include -#include #include +#include #if BITS_PER_LONG == 64 diff --git a/include/vdso/math64.h b/include/vdso/math64.h index 7da703ee5561..22ae212f8b28 100644 --- a/include/vdso/math64.h +++ b/include/vdso/math64.h @@ -21,4 +21,42 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) return ret; } +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_add_u64_shr +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + return (u64)((((unsigned __int128)a * mul) + b) >> shift); +} +#endif /* mul_u64_u32_add_u64_shr */ + +#else + +#ifndef mul_u64_u32_add_u64_shr +#ifndef mul_u32_u32 +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} +#define mul_u32_u32 mul_u32_u32 +#endif +static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift) +{ + u32 ah = a >> 32, al = a; + bool ovf; + u64 ret; + + ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret); + ret >>= shift; + if (ovf && shift) + ret += 1ULL << (64 - shift); + if (ah) + ret += mul_u32_u32(ah, mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_add_u64_shr */ + +#endif + #endif /* __VDSO_MATH64_H */ -- cgit From 70ee853eec5693fefd8348a2b049d9cb83362e58 Mon Sep 17 00:00:00 2001 From: Richard Fitzgerald Date: Mon, 8 Apr 2024 11:18:00 +0100 Subject: regmap: Add regmap_read_bypassed() Add a regmap_read_bypassed() to allow reads from the hardware registers while the regmap is in cache-only mode. A typical use for this is to keep the cache in cache-only mode until the hardware has reached a valid state, but one or more status registers must be polled to determine when this state is reached. For example, firmware download on the cs35l56 can take several seconds if there are multiple amps sharing limited bus bandwidth. This is too long to block in probe() so it is done as a background task. The device must be soft-reset to reboot the firmware and during this time the registers are not accessible, so the cache should be in cache-only. But the driver must poll a register to detect when reboot has completed. Signed-off-by: Richard Fitzgerald Fixes: 8a731fd37f8b ("ASoC: cs35l56: Move utility functions to shared file") Link: https://msgid.link/r/20240408101803.43183-2-rf@opensource.cirrus.com Signed-off-by: Mark Brown --- drivers/base/regmap/regmap.c | 37 +++++++++++++++++++++++++++++++++++++ include/linux/regmap.h | 8 ++++++++ 2 files changed, 45 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 5cb425f6f02d..0a34dd3c4f38 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -2838,6 +2838,43 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val) } EXPORT_SYMBOL_GPL(regmap_read); +/** + * regmap_read_bypassed() - Read a value from a single register direct + * from the device, bypassing the cache + * + * @map: Register map to read from + * @reg: Register to be read from + * @val: Pointer to store read value + * + * A value of zero will be returned on success, a negative errno will + * be returned in error cases. + */ +int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val) +{ + int ret; + bool bypass, cache_only; + + if (!IS_ALIGNED(reg, map->reg_stride)) + return -EINVAL; + + map->lock(map->lock_arg); + + bypass = map->cache_bypass; + cache_only = map->cache_only; + map->cache_bypass = true; + map->cache_only = false; + + ret = _regmap_read(map, reg, val); + + map->cache_bypass = bypass; + map->cache_only = cache_only; + + map->unlock(map->lock_arg); + + return ret; +} +EXPORT_SYMBOL_GPL(regmap_read_bypassed); + /** * regmap_raw_read() - Read raw data from the device * diff --git a/include/linux/regmap.h b/include/linux/regmap.h index b743241cfb7c..d470303b1bbb 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1230,6 +1230,7 @@ int regmap_multi_reg_write_bypassed(struct regmap *map, int regmap_raw_write_async(struct regmap *map, unsigned int reg, const void *val, size_t val_len); int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val); +int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val); int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_len); int regmap_noinc_read(struct regmap *map, unsigned int reg, @@ -1739,6 +1740,13 @@ static inline int regmap_read(struct regmap *map, unsigned int reg, return -EINVAL; } +static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg, + unsigned int *val) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, size_t val_len) { -- cgit From daeeb032f42d066a49e07b7f6effc9f51b7a5479 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 2 Apr 2024 20:56:43 +0200 Subject: thermal: core: Move threshold out of struct thermal_trip The threshold field in struct thermal_trip is only used internally by the thermal core and it is better to prevent drivers from misusing it. It also takes some space unnecessarily in the trip tables passed by drivers to the core during thermal zone registration. For this reason, introduce struct thermal_trip_desc as a wrapper around struct thermal_trip, move the threshold field directly into it and make the thermal core store struct thermal_trip_desc objects in the internal thermal zone trip tables. Adjust all of the code using trip tables in the thermal core accordingly. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Lukasz Luba --- drivers/thermal/gov_fair_share.c | 7 ++++-- drivers/thermal/gov_power_allocator.c | 6 +++-- drivers/thermal/thermal_core.c | 46 ++++++++++++++++++++--------------- drivers/thermal/thermal_core.h | 7 ++++-- drivers/thermal/thermal_debugfs.c | 6 +++-- drivers/thermal/thermal_helpers.c | 8 +++--- drivers/thermal/thermal_netlink.c | 6 +++-- drivers/thermal/thermal_sysfs.c | 20 +++++++-------- drivers/thermal/thermal_trip.c | 15 ++++++------ include/linux/thermal.h | 9 ++++--- 10 files changed, 78 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/drivers/thermal/gov_fair_share.c b/drivers/thermal/gov_fair_share.c index 4da25a0009d7..6ef8cfde7749 100644 --- a/drivers/thermal/gov_fair_share.c +++ b/drivers/thermal/gov_fair_share.c @@ -17,10 +17,13 @@ static int get_trip_level(struct thermal_zone_device *tz) { - const struct thermal_trip *trip, *level_trip = NULL; + const struct thermal_trip *level_trip = NULL; + const struct thermal_trip_desc *td; int trip_level = -1; - for_each_trip(tz, trip) { + for_each_trip_desc(tz, td) { + const struct thermal_trip *trip = &td->trip; + if (trip->temperature >= tz->temperature) continue; diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c index e25e48d76aa7..ac1d02193a1b 100644 --- a/drivers/thermal/gov_power_allocator.c +++ b/drivers/thermal/gov_power_allocator.c @@ -496,9 +496,11 @@ static void get_governor_trips(struct thermal_zone_device *tz, const struct thermal_trip *first_passive = NULL; const struct thermal_trip *last_passive = NULL; const struct thermal_trip *last_active = NULL; - const struct thermal_trip *trip; + const struct thermal_trip_desc *td; + + for_each_trip_desc(tz, td) { + const struct thermal_trip *trip = &td->trip; - for_each_trip(tz, trip) { switch (trip->type) { case THERMAL_TRIP_PASSIVE: if (!first_passive) { diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 34a31bc72023..fc6ff0a4faa1 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -361,17 +361,19 @@ static void handle_critical_trips(struct thermal_zone_device *tz, } static void handle_thermal_trip(struct thermal_zone_device *tz, - struct thermal_trip *trip) + struct thermal_trip_desc *td) { + const struct thermal_trip *trip = &td->trip; + if (trip->temperature == THERMAL_TEMP_INVALID) return; if (tz->last_temperature == THERMAL_TEMP_INVALID) { /* Initialization. */ - trip->threshold = trip->temperature; - if (tz->temperature >= trip->threshold) - trip->threshold -= trip->hysteresis; - } else if (tz->last_temperature < trip->threshold) { + td->threshold = trip->temperature; + if (tz->temperature >= td->threshold) + td->threshold -= trip->hysteresis; + } else if (tz->last_temperature < td->threshold) { /* * The trip threshold is equal to the trip temperature, unless * the latter has changed in the meantime. In either case, @@ -382,9 +384,9 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, if (tz->temperature >= trip->temperature) { thermal_notify_tz_trip_up(tz, trip); thermal_debug_tz_trip_up(tz, trip); - trip->threshold = trip->temperature - trip->hysteresis; + td->threshold = trip->temperature - trip->hysteresis; } else { - trip->threshold = trip->temperature; + td->threshold = trip->temperature; } } else { /* @@ -400,9 +402,9 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, if (tz->temperature < trip->temperature - trip->hysteresis) { thermal_notify_tz_trip_down(tz, trip); thermal_debug_tz_trip_down(tz, trip); - trip->threshold = trip->temperature; + td->threshold = trip->temperature; } else { - trip->threshold = trip->temperature - trip->hysteresis; + td->threshold = trip->temperature - trip->hysteresis; } } @@ -458,7 +460,7 @@ static void thermal_zone_device_init(struct thermal_zone_device *tz) void __thermal_zone_device_update(struct thermal_zone_device *tz, enum thermal_notify_event event) { - struct thermal_trip *trip; + struct thermal_trip_desc *td; if (tz->suspended) return; @@ -472,8 +474,8 @@ void __thermal_zone_device_update(struct thermal_zone_device *tz, tz->notify_event = event; - for_each_trip(tz, trip) - handle_thermal_trip(tz, trip); + for_each_trip_desc(tz, td) + handle_thermal_trip(tz, td); monitor_thermal_zone(tz); } @@ -766,7 +768,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, if (trip_index < 0 || trip_index >= tz->num_trips) return -EINVAL; - return thermal_bind_cdev_to_trip(tz, &tz->trips[trip_index], cdev, + return thermal_bind_cdev_to_trip(tz, &tz->trips[trip_index].trip, cdev, upper, lower, weight); } EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device); @@ -825,7 +827,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, if (trip_index < 0 || trip_index >= tz->num_trips) return -EINVAL; - return thermal_unbind_cdev_from_trip(tz, &tz->trips[trip_index], cdev); + return thermal_unbind_cdev_from_trip(tz, &tz->trips[trip_index].trip, cdev); } EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device); @@ -1221,16 +1223,19 @@ static void thermal_set_delay_jiffies(unsigned long *delay_jiffies, int delay_ms int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp) { - int i, ret = -EINVAL; + const struct thermal_trip_desc *td; + int ret = -EINVAL; if (tz->ops.get_crit_temp) return tz->ops.get_crit_temp(tz, temp); mutex_lock(&tz->lock); - for (i = 0; i < tz->num_trips; i++) { - if (tz->trips[i].type == THERMAL_TRIP_CRITICAL) { - *temp = tz->trips[i].temperature; + for_each_trip_desc(tz, td) { + const struct thermal_trip *trip = &td->trip; + + if (trip->type == THERMAL_TRIP_CRITICAL) { + *temp = trip->temperature; ret = 0; break; } @@ -1274,7 +1279,9 @@ thermal_zone_device_register_with_trips(const char *type, const struct thermal_zone_params *tzp, int passive_delay, int polling_delay) { + const struct thermal_trip *trip = trips; struct thermal_zone_device *tz; + struct thermal_trip_desc *td; int id; int result; struct thermal_governor *governor; @@ -1339,7 +1346,8 @@ thermal_zone_device_register_with_trips(const char *type, tz->device.class = thermal_class; tz->devdata = devdata; tz->num_trips = num_trips; - memcpy(tz->trips, trips, num_trips * sizeof(*trips)); + for_each_trip_desc(tz, td) + td->trip = *trip++; thermal_set_delay_jiffies(&tz->passive_delay_jiffies, passive_delay); thermal_set_delay_jiffies(&tz->polling_delay_jiffies, polling_delay); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 0d8a42bb7ce8..4ad9c671bc13 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -120,8 +120,11 @@ void thermal_governor_update_tz(struct thermal_zone_device *tz, enum thermal_notify_event reason); /* Helpers */ -#define for_each_trip(__tz, __trip) \ - for (__trip = __tz->trips; __trip - __tz->trips < __tz->num_trips; __trip++) +#define for_each_trip_desc(__tz, __td) \ + for (__td = __tz->trips; __td - __tz->trips < __tz->num_trips; __td++) + +#define trip_to_trip_desc(__trip) \ + container_of(__trip, struct thermal_trip_desc, trip) void __thermal_zone_set_trips(struct thermal_zone_device *tz); int thermal_zone_trip_id(const struct thermal_zone_device *tz, diff --git a/drivers/thermal/thermal_debugfs.c b/drivers/thermal/thermal_debugfs.c index c617e8b9f0dd..78c4cb30e91d 100644 --- a/drivers/thermal/thermal_debugfs.c +++ b/drivers/thermal/thermal_debugfs.c @@ -744,7 +744,7 @@ static void tze_seq_stop(struct seq_file *s, void *v) static int tze_seq_show(struct seq_file *s, void *v) { struct thermal_zone_device *tz = s->private; - struct thermal_trip *trip; + struct thermal_trip_desc *td; struct tz_episode *tze; const char *type; int trip_id; @@ -757,7 +757,9 @@ static int tze_seq_show(struct seq_file *s, void *v) seq_printf(s, "| trip | type | temp(°mC) | hyst(°mC) | duration | avg(°mC) | min(°mC) | max(°mC) |\n"); - for_each_trip(tz, trip) { + for_each_trip_desc(tz, td) { + const struct thermal_trip *trip = &td->trip; + /* * There is no possible mitigation happening at the * critical trip point, so the stats will be always diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c index c5a057b59c42..d9f4e26ec125 100644 --- a/drivers/thermal/thermal_helpers.c +++ b/drivers/thermal/thermal_helpers.c @@ -50,7 +50,7 @@ get_thermal_instance(struct thermal_zone_device *tz, mutex_lock(&tz->lock); mutex_lock(&cdev->lock); - trip = &tz->trips[trip_index]; + trip = &tz->trips[trip_index].trip; list_for_each_entry(pos, &tz->thermal_instances, tz_node) { if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { @@ -82,7 +82,7 @@ EXPORT_SYMBOL(get_thermal_instance); */ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) { - const struct thermal_trip *trip; + const struct thermal_trip_desc *td; int crit_temp = INT_MAX; int ret = -EINVAL; @@ -91,7 +91,9 @@ int __thermal_zone_get_temp(struct thermal_zone_device *tz, int *temp) ret = tz->ops.get_temp(tz, temp); if (IS_ENABLED(CONFIG_THERMAL_EMULATION) && tz->emul_temperature) { - for_each_trip(tz, trip) { + for_each_trip_desc(tz, td) { + const struct thermal_trip *trip = &td->trip; + if (trip->type == THERMAL_TRIP_CRITICAL) { crit_temp = trip->temperature; break; diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c index 76a231a29654..0ef6368da766 100644 --- a/drivers/thermal/thermal_netlink.c +++ b/drivers/thermal/thermal_netlink.c @@ -445,7 +445,7 @@ out_cancel_nest: static int thermal_genl_cmd_tz_get_trip(struct param *p) { struct sk_buff *msg = p->msg; - const struct thermal_trip *trip; + const struct thermal_trip_desc *td; struct thermal_zone_device *tz; struct nlattr *start_trip; int id; @@ -465,7 +465,9 @@ static int thermal_genl_cmd_tz_get_trip(struct param *p) mutex_lock(&tz->lock); - for_each_trip(tz, trip) { + for_each_trip_desc(tz, td) { + const struct thermal_trip *trip = &td->trip; + if (nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, thermal_zone_trip_id(tz, trip)) || nla_put_u32(msg, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, trip->type) || diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c index 5b533fa40437..88211ccdfbd6 100644 --- a/drivers/thermal/thermal_sysfs.c +++ b/drivers/thermal/thermal_sysfs.c @@ -88,7 +88,7 @@ trip_point_type_show(struct device *dev, struct device_attribute *attr, if (sscanf(attr->attr.name, "trip_point_%d_type", &trip_id) != 1) return -EINVAL; - switch (tz->trips[trip_id].type) { + switch (tz->trips[trip_id].trip.type) { case THERMAL_TRIP_CRITICAL: return sprintf(buf, "critical\n"); case THERMAL_TRIP_HOT: @@ -120,7 +120,7 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr, mutex_lock(&tz->lock); - trip = &tz->trips[trip_id]; + trip = &tz->trips[trip_id].trip; if (temp != trip->temperature) { if (tz->ops.set_trip_temp) { @@ -150,7 +150,7 @@ trip_point_temp_show(struct device *dev, struct device_attribute *attr, if (sscanf(attr->attr.name, "trip_point_%d_temp", &trip_id) != 1) return -EINVAL; - return sprintf(buf, "%d\n", tz->trips[trip_id].temperature); + return sprintf(buf, "%d\n", tz->trips[trip_id].trip.temperature); } static ssize_t @@ -171,7 +171,7 @@ trip_point_hyst_store(struct device *dev, struct device_attribute *attr, mutex_lock(&tz->lock); - trip = &tz->trips[trip_id]; + trip = &tz->trips[trip_id].trip; if (hyst != trip->hysteresis) { trip->hysteresis = hyst; @@ -194,7 +194,7 @@ trip_point_hyst_show(struct device *dev, struct device_attribute *attr, if (sscanf(attr->attr.name, "trip_point_%d_hyst", &trip_id) != 1) return -EINVAL; - return sprintf(buf, "%d\n", tz->trips[trip_id].hysteresis); + return sprintf(buf, "%d\n", tz->trips[trip_id].trip.hysteresis); } static ssize_t @@ -393,7 +393,7 @@ static const struct attribute_group *thermal_zone_attribute_groups[] = { */ static int create_trip_attrs(struct thermal_zone_device *tz) { - const struct thermal_trip *trip; + const struct thermal_trip_desc *td; struct attribute **attrs; /* This function works only for zones with at least one trip */ @@ -429,8 +429,8 @@ static int create_trip_attrs(struct thermal_zone_device *tz) return -ENOMEM; } - for_each_trip(tz, trip) { - int indx = thermal_zone_trip_id(tz, trip); + for_each_trip_desc(tz, td) { + int indx = thermal_zone_trip_id(tz, &td->trip); /* create trip type attribute */ snprintf(tz->trip_type_attrs[indx].name, THERMAL_NAME_LENGTH, @@ -452,7 +452,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz) tz->trip_temp_attrs[indx].name; tz->trip_temp_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_temp_attrs[indx].attr.show = trip_point_temp_show; - if (trip->flags & THERMAL_TRIP_FLAG_RW_TEMP) { + if (td->trip.flags & THERMAL_TRIP_FLAG_RW_TEMP) { tz->trip_temp_attrs[indx].attr.attr.mode |= S_IWUSR; tz->trip_temp_attrs[indx].attr.store = trip_point_temp_store; @@ -467,7 +467,7 @@ static int create_trip_attrs(struct thermal_zone_device *tz) tz->trip_hyst_attrs[indx].name; tz->trip_hyst_attrs[indx].attr.attr.mode = S_IRUGO; tz->trip_hyst_attrs[indx].attr.show = trip_point_hyst_show; - if (trip->flags & THERMAL_TRIP_FLAG_RW_HYST) { + if (td->trip.flags & THERMAL_TRIP_FLAG_RW_HYST) { tz->trip_hyst_attrs[indx].attr.attr.mode |= S_IWUSR; tz->trip_hyst_attrs[indx].attr.store = trip_point_hyst_store; diff --git a/drivers/thermal/thermal_trip.c b/drivers/thermal/thermal_trip.c index 497abf0d47ca..7cf43b697272 100644 --- a/drivers/thermal/thermal_trip.c +++ b/drivers/thermal/thermal_trip.c @@ -13,11 +13,11 @@ int for_each_thermal_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data) { - struct thermal_trip *trip; + struct thermal_trip_desc *td; int ret; - for_each_trip(tz, trip) { - ret = cb(trip, data); + for_each_trip_desc(tz, td) { + ret = cb(&td->trip, data); if (ret) return ret; } @@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(thermal_zone_get_num_trips); */ void __thermal_zone_set_trips(struct thermal_zone_device *tz) { - const struct thermal_trip *trip; + const struct thermal_trip_desc *td; int low = -INT_MAX, high = INT_MAX; int ret; @@ -72,7 +72,8 @@ void __thermal_zone_set_trips(struct thermal_zone_device *tz) if (!tz->ops.set_trips) return; - for_each_trip(tz, trip) { + for_each_trip_desc(tz, td) { + const struct thermal_trip *trip = &td->trip; int trip_low; trip_low = trip->temperature - trip->hysteresis; @@ -110,7 +111,7 @@ int __thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, if (!tz || trip_id < 0 || trip_id >= tz->num_trips || !trip) return -EINVAL; - *trip = tz->trips[trip_id]; + *trip = tz->trips[trip_id].trip; return 0; } EXPORT_SYMBOL_GPL(__thermal_zone_get_trip); @@ -135,7 +136,7 @@ int thermal_zone_trip_id(const struct thermal_zone_device *tz, * Assume the trip to be located within the bounds of the thermal * zone's trips[] table. */ - return trip - tz->trips; + return trip_to_trip_desc(trip) - tz->trips; } void thermal_zone_trip_updated(struct thermal_zone_device *tz, const struct thermal_trip *trip) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index c33f50177f51..67bd13303349 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -61,7 +61,6 @@ enum thermal_notify_event { * struct thermal_trip - representation of a point in temperature domain * @temperature: temperature value in miliCelsius * @hysteresis: relative hysteresis in miliCelsius - * @threshold: trip crossing notification threshold miliCelsius * @type: trip point type * @priv: pointer to driver data associated with this trip * @flags: flags representing binary properties of the trip @@ -69,12 +68,16 @@ enum thermal_notify_event { struct thermal_trip { int temperature; int hysteresis; - int threshold; enum thermal_trip_type type; u8 flags; void *priv; }; +struct thermal_trip_desc { + struct thermal_trip trip; + int threshold; +}; + #define THERMAL_TRIP_FLAG_RW_TEMP BIT(0) #define THERMAL_TRIP_FLAG_RW_HYST BIT(1) @@ -203,7 +206,7 @@ struct thermal_zone_device { #ifdef CONFIG_THERMAL_DEBUGFS struct thermal_debugfs *debugfs; #endif - struct thermal_trip trips[] __counted_by(num_trips); + struct thermal_trip_desc trips[] __counted_by(num_trips); }; /** -- cgit From b1ae92dcfa8ed7d5044c525c3a868fcb4d0f9c0e Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Tue, 2 Apr 2024 20:57:57 +0200 Subject: thermal: core: Make struct thermal_zone_device definition internal Move the definitions of struct thermal_trip_desc and struct thermal_zone_device to an internal header file in the thermal core, as they don't need to be accessible to any code other than the thermal core and so they don't need to be present in a global header. No intentional functional impact. Signed-off-by: Rafael J. Wysocki Reviewed-by: Lukasz Luba Acked-by: Daniel Lezcano --- drivers/thermal/thermal_core.h | 85 ++++++++++++++++++++++++++++++++++++ drivers/thermal/thermal_trace.h | 2 + drivers/thermal/thermal_trace_ipa.h | 2 + include/linux/thermal.h | 87 +------------------------------------ 4 files changed, 91 insertions(+), 85 deletions(-) (limited to 'include/linux') diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 4ad9c671bc13..5dd3be585e9e 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -15,6 +15,91 @@ #include "thermal_netlink.h" #include "thermal_debugfs.h" +struct thermal_trip_desc { + struct thermal_trip trip; + int threshold; +}; + +/** + * struct thermal_zone_device - structure for a thermal zone + * @id: unique id number for each thermal zone + * @type: the thermal zone device type + * @device: &struct device for this thermal zone + * @removal: removal completion + * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature + * @trip_type_attrs: attributes for trip points for sysfs: trip type + * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis + * @mode: current mode of this thermal zone + * @devdata: private pointer for device private data + * @num_trips: number of trip points the thermal zone supports + * @passive_delay_jiffies: number of jiffies to wait between polls when + * performing passive cooling. + * @polling_delay_jiffies: number of jiffies to wait between polls when + * checking whether trip points have been crossed (0 for + * interrupt driven systems) + * @temperature: current temperature. This is only for core code, + * drivers should use thermal_zone_get_temp() to get the + * current temperature + * @last_temperature: previous temperature read + * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION + * @passive: 1 if you've crossed a passive trip point, 0 otherwise. + * @prev_low_trip: the low current temperature if you've crossed a passive + trip point. + * @prev_high_trip: the above current temperature if you've crossed a + passive trip point. + * @need_update: if equals 1, thermal_zone_device_update needs to be invoked. + * @ops: operations this &thermal_zone_device supports + * @tzp: thermal zone parameters + * @governor: pointer to the governor for this thermal zone + * @governor_data: private pointer for governor data + * @thermal_instances: list of &struct thermal_instance of this thermal zone + * @ida: &struct ida to generate unique id for this zone's cooling + * devices + * @lock: lock to protect thermal_instances list + * @node: node in thermal_tz_list (in thermal_core.c) + * @poll_queue: delayed work for polling + * @notify_event: Last notification event + * @suspended: thermal zone suspend indicator + * @trips: array of struct thermal_trip objects + */ +struct thermal_zone_device { + int id; + char type[THERMAL_NAME_LENGTH]; + struct device device; + struct completion removal; + struct attribute_group trips_attribute_group; + struct thermal_attr *trip_temp_attrs; + struct thermal_attr *trip_type_attrs; + struct thermal_attr *trip_hyst_attrs; + enum thermal_device_mode mode; + void *devdata; + int num_trips; + unsigned long passive_delay_jiffies; + unsigned long polling_delay_jiffies; + int temperature; + int last_temperature; + int emul_temperature; + int passive; + int prev_low_trip; + int prev_high_trip; + atomic_t need_update; + struct thermal_zone_device_ops ops; + struct thermal_zone_params *tzp; + struct thermal_governor *governor; + void *governor_data; + struct list_head thermal_instances; + struct ida ida; + struct mutex lock; + struct list_head node; + struct delayed_work poll_queue; + enum thermal_notify_event notify_event; + bool suspended; +#ifdef CONFIG_THERMAL_DEBUGFS + struct thermal_debugfs *debugfs; +#endif + struct thermal_trip_desc trips[] __counted_by(num_trips); +}; + /* Default Thermal Governor */ #if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE) #define DEFAULT_THERMAL_GOVERNOR "step_wise" diff --git a/drivers/thermal/thermal_trace.h b/drivers/thermal/thermal_trace.h index 459c8ce6cf3b..88a962f560f2 100644 --- a/drivers/thermal/thermal_trace.h +++ b/drivers/thermal/thermal_trace.h @@ -9,6 +9,8 @@ #include #include +#include "thermal_core.h" + TRACE_DEFINE_ENUM(THERMAL_TRIP_CRITICAL); TRACE_DEFINE_ENUM(THERMAL_TRIP_HOT); TRACE_DEFINE_ENUM(THERMAL_TRIP_PASSIVE); diff --git a/drivers/thermal/thermal_trace_ipa.h b/drivers/thermal/thermal_trace_ipa.h index b16b5dd863d9..a82821eebc88 100644 --- a/drivers/thermal/thermal_trace_ipa.h +++ b/drivers/thermal/thermal_trace_ipa.h @@ -7,6 +7,8 @@ #include +#include "thermal_core.h" + TRACE_EVENT(thermal_power_allocator, TP_PROTO(struct thermal_zone_device *tz, u32 total_req_power, u32 total_granted_power, int num_actors, u32 power_range, diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 67bd13303349..62b174528cfe 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -73,17 +73,14 @@ struct thermal_trip { void *priv; }; -struct thermal_trip_desc { - struct thermal_trip trip; - int threshold; -}; - #define THERMAL_TRIP_FLAG_RW_TEMP BIT(0) #define THERMAL_TRIP_FLAG_RW_HYST BIT(1) #define THERMAL_TRIP_FLAG_RW (THERMAL_TRIP_FLAG_RW_TEMP | \ THERMAL_TRIP_FLAG_RW_HYST) +struct thermal_zone_device; + struct thermal_zone_device_ops { int (*bind) (struct thermal_zone_device *, struct thermal_cooling_device *); @@ -129,86 +126,6 @@ struct thermal_cooling_device { #endif }; -/** - * struct thermal_zone_device - structure for a thermal zone - * @id: unique id number for each thermal zone - * @type: the thermal zone device type - * @device: &struct device for this thermal zone - * @removal: removal completion - * @trip_temp_attrs: attributes for trip points for sysfs: trip temperature - * @trip_type_attrs: attributes for trip points for sysfs: trip type - * @trip_hyst_attrs: attributes for trip points for sysfs: trip hysteresis - * @mode: current mode of this thermal zone - * @devdata: private pointer for device private data - * @num_trips: number of trip points the thermal zone supports - * @passive_delay_jiffies: number of jiffies to wait between polls when - * performing passive cooling. - * @polling_delay_jiffies: number of jiffies to wait between polls when - * checking whether trip points have been crossed (0 for - * interrupt driven systems) - * @temperature: current temperature. This is only for core code, - * drivers should use thermal_zone_get_temp() to get the - * current temperature - * @last_temperature: previous temperature read - * @emul_temperature: emulated temperature when using CONFIG_THERMAL_EMULATION - * @passive: 1 if you've crossed a passive trip point, 0 otherwise. - * @prev_low_trip: the low current temperature if you've crossed a passive - trip point. - * @prev_high_trip: the above current temperature if you've crossed a - passive trip point. - * @need_update: if equals 1, thermal_zone_device_update needs to be invoked. - * @ops: operations this &thermal_zone_device supports - * @tzp: thermal zone parameters - * @governor: pointer to the governor for this thermal zone - * @governor_data: private pointer for governor data - * @thermal_instances: list of &struct thermal_instance of this thermal zone - * @ida: &struct ida to generate unique id for this zone's cooling - * devices - * @lock: lock to protect thermal_instances list - * @node: node in thermal_tz_list (in thermal_core.c) - * @poll_queue: delayed work for polling - * @notify_event: Last notification event - * @suspended: thermal zone suspend indicator - * @trips: array of struct thermal_trip objects - */ -struct thermal_zone_device { - int id; - char type[THERMAL_NAME_LENGTH]; - struct device device; - struct completion removal; - struct attribute_group trips_attribute_group; - struct thermal_attr *trip_temp_attrs; - struct thermal_attr *trip_type_attrs; - struct thermal_attr *trip_hyst_attrs; - enum thermal_device_mode mode; - void *devdata; - int num_trips; - unsigned long passive_delay_jiffies; - unsigned long polling_delay_jiffies; - int temperature; - int last_temperature; - int emul_temperature; - int passive; - int prev_low_trip; - int prev_high_trip; - atomic_t need_update; - struct thermal_zone_device_ops ops; - struct thermal_zone_params *tzp; - struct thermal_governor *governor; - void *governor_data; - struct list_head thermal_instances; - struct ida ida; - struct mutex lock; - struct list_head node; - struct delayed_work poll_queue; - enum thermal_notify_event notify_event; - bool suspended; -#ifdef CONFIG_THERMAL_DEBUGFS - struct thermal_debugfs *debugfs; -#endif - struct thermal_trip_desc trips[] __counted_by(num_trips); -}; - /** * struct thermal_governor - structure that holds thermal governor information * @name: name of the governor -- cgit From 0444d574fbc3d3af0e426f7c2b72f5830269f096 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Thu, 4 Apr 2024 21:27:07 +0200 Subject: thermal: core: Relocate the struct thermal_governor definition Notice that struct thermal_governor is only used by the thermal core and so move its definition to thermal_core.h. No functional impact. Signed-off-by: Rafael J. Wysocki Acked-by: Daniel Lezcano Reviewed-by: Lukasz Luba --- drivers/thermal/thermal_core.h | 25 +++++++++++++++++++++++++ include/linux/thermal.h | 25 ------------------------- 2 files changed, 25 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 9d3ef1e0645f..b461d9583834 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -22,6 +22,31 @@ struct thermal_trip_desc { int threshold; }; +/** + * struct thermal_governor - structure that holds thermal governor information + * @name: name of the governor + * @bind_to_tz: callback called when binding to a thermal zone. If it + * returns 0, the governor is bound to the thermal zone, + * otherwise it fails. + * @unbind_from_tz: callback called when a governor is unbound from a + * thermal zone. + * @throttle: callback called for every trip point even if temperature is + * below the trip point temperature + * @update_tz: callback called when thermal zone internals have changed, e.g. + * thermal cooling instance was added/removed + * @governor_list: node in thermal_governor_list (in thermal_core.c) + */ +struct thermal_governor { + const char *name; + int (*bind_to_tz)(struct thermal_zone_device *tz); + void (*unbind_from_tz)(struct thermal_zone_device *tz); + int (*throttle)(struct thermal_zone_device *tz, + const struct thermal_trip *trip); + void (*update_tz)(struct thermal_zone_device *tz, + enum thermal_notify_event reason); + struct list_head governor_list; +}; + /** * struct thermal_zone_device - structure for a thermal zone * @id: unique id number for each thermal zone diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 62b174528cfe..f1155c0439c4 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -126,31 +126,6 @@ struct thermal_cooling_device { #endif }; -/** - * struct thermal_governor - structure that holds thermal governor information - * @name: name of the governor - * @bind_to_tz: callback called when binding to a thermal zone. If it - * returns 0, the governor is bound to the thermal zone, - * otherwise it fails. - * @unbind_from_tz: callback called when a governor is unbound from a - * thermal zone. - * @throttle: callback called for every trip point even if temperature is - * below the trip point temperature - * @update_tz: callback called when thermal zone internals have changed, e.g. - * thermal cooling instance was added/removed - * @governor_list: node in thermal_governor_list (in thermal_core.c) - */ -struct thermal_governor { - const char *name; - int (*bind_to_tz)(struct thermal_zone_device *tz); - void (*unbind_from_tz)(struct thermal_zone_device *tz); - int (*throttle)(struct thermal_zone_device *tz, - const struct thermal_trip *trip); - void (*update_tz)(struct thermal_zone_device *tz, - enum thermal_notify_event reason); - struct list_head governor_list; -}; - /* Structure to define Thermal Zone parameters */ struct thermal_zone_params { const char *governor_name; -- cgit From e3ac0f367d5806af09d2070bb7951af2f59d1f52 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 3 Apr 2024 16:49:04 +0100 Subject: OPP: OF: Export dev_opp_pm_calc_power() for usage from EM There are device drivers which can modify voltage values for OPPs. It could be due to the chip binning and those drivers have specific chip knowledge about it. This adjustment can happen after Energy Model is registered, thus EM can have stale data about power. Export dev_opp_pm_calc_power() which can be used by Energy Model to calculate new power with the new voltage for OPPs. Acked-by: Viresh Kumar Reviewed-by: Dietmar Eggemann Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- drivers/opp/of.c | 17 ++++++++++++----- include/linux/pm_opp.h | 8 ++++++++ 2 files changed, 20 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/opp/of.c b/drivers/opp/of.c index f9f0b22bccbb..282eb5966fd0 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -1494,20 +1494,26 @@ _get_dt_power(struct device *dev, unsigned long *uW, unsigned long *kHz) return 0; } -/* - * Callback function provided to the Energy Model framework upon registration. +/** + * dev_pm_opp_calc_power() - Calculate power value for device with EM + * @dev : Device for which an Energy Model has to be registered + * @uW : New power value that is calculated + * @kHz : Frequency for which the new power is calculated + * * This computes the power estimated by @dev at @kHz if it is the frequency * of an existing OPP, or at the frequency of the first OPP above @kHz otherwise * (see dev_pm_opp_find_freq_ceil()). This function updates @kHz to the ceiled * frequency and @uW to the associated power. The power is estimated as * P = C * V^2 * f with C being the device's capacitance and V and f * respectively the voltage and frequency of the OPP. + * It is also used as a callback function provided to the Energy Model + * framework upon registration. * * Returns -EINVAL if the power calculation failed because of missing * parameters, 0 otherwise. */ -static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, - unsigned long *kHz) +int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, + unsigned long *kHz) { struct dev_pm_opp *opp; struct device_node *np; @@ -1544,6 +1550,7 @@ static int __maybe_unused _get_power(struct device *dev, unsigned long *uW, return 0; } +EXPORT_SYMBOL_GPL(dev_pm_opp_calc_power); static bool _of_has_opp_microwatt_property(struct device *dev) { @@ -1619,7 +1626,7 @@ int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus) goto failed; } - EM_SET_ACTIVE_POWER_CB(em_cb, _get_power); + EM_SET_ACTIVE_POWER_CB(em_cb, dev_pm_opp_calc_power); register_em: ret = em_dev_register_perf_domain(dev, nr_opp, &em_cb, cpus, true); diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 065a47382302..dd7c8441af42 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -476,6 +476,8 @@ struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); int of_get_required_opp_performance_state(struct device_node *np, int index); int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_table *opp_table); int dev_pm_opp_of_register_em(struct device *dev, struct cpumask *cpus); +int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, + unsigned long *kHz); static inline void dev_pm_opp_of_unregister_em(struct device *dev) { em_dev_unregister_perf_domain(dev); @@ -539,6 +541,12 @@ static inline void dev_pm_opp_of_unregister_em(struct device *dev) { } +static inline int dev_pm_opp_calc_power(struct device *dev, unsigned long *uW, + unsigned long *kHz) +{ + return -EOPNOTSUPP; +} + static inline int of_get_required_opp_performance_state(struct device_node *np, int index) { return -EOPNOTSUPP; -- cgit From cf61d53b026805e8222ca28ac2795611eb7fa547 Mon Sep 17 00:00:00 2001 From: Lukasz Luba Date: Wed, 3 Apr 2024 16:49:06 +0100 Subject: PM: EM: Add em_dev_update_chip_binning() Add a function which allows to modify easily the EM after the new voltage information is available. The device drivers for the chip can adjust the voltage values after setup. The voltage for the same frequency in OPP can be different due to chip binning. The voltage impacts the power usage and the EM power values can be updated to reflect that. Reviewed-by: Dietmar Eggemann Signed-off-by: Lukasz Luba Signed-off-by: Rafael J. Wysocki --- include/linux/energy_model.h | 5 +++++ kernel/power/energy_model.c | 48 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) (limited to 'include/linux') diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 70cd7258cd29..1ff52020cf75 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -172,6 +172,7 @@ struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd); void em_table_free(struct em_perf_table __rcu *table); int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, int nr_states); +int em_dev_update_chip_binning(struct device *dev); /** * em_pd_get_efficient_state() - Get an efficient performance state from the EM @@ -386,6 +387,10 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, { return -EINVAL; } +static inline int em_dev_update_chip_binning(struct device *dev) +{ + return -EINVAL; +} #endif #endif diff --git a/kernel/power/energy_model.c b/kernel/power/energy_model.c index 6960dd7393b2..927cc55ba0b3 100644 --- a/kernel/power/energy_model.c +++ b/kernel/power/energy_model.c @@ -808,3 +808,51 @@ static void em_update_workfn(struct work_struct *work) { em_check_capacity_update(); } + +/** + * em_dev_update_chip_binning() - Update Energy Model after the new voltage + * information is present in the OPPs. + * @dev : Device for which the Energy Model has to be updated. + * + * This function allows to update easily the EM with new values available in + * the OPP framework and DT. It can be used after the chip has been properly + * verified by device drivers and the voltages adjusted for the 'chip binning'. + */ +int em_dev_update_chip_binning(struct device *dev) +{ + struct em_perf_table __rcu *em_table; + struct em_perf_domain *pd; + int i, ret; + + if (IS_ERR_OR_NULL(dev)) + return -EINVAL; + + pd = em_pd_get(dev); + if (!pd) { + dev_warn(dev, "Couldn't find Energy Model\n"); + return -EINVAL; + } + + em_table = em_table_dup(pd); + if (!em_table) { + dev_warn(dev, "EM: allocation failed\n"); + return -ENOMEM; + } + + /* Update power values which might change due to new voltage in OPPs */ + for (i = 0; i < pd->nr_perf_states; i++) { + unsigned long freq = em_table->state[i].frequency; + unsigned long power; + + ret = dev_pm_opp_calc_power(dev, &power, &freq); + if (ret) { + em_table_free(em_table); + return ret; + } + + em_table->state[i].power = power; + } + + return em_recalc_and_update(dev, pd, em_table); +} +EXPORT_SYMBOL_GPL(em_dev_update_chip_binning); -- cgit From f81d13df1aa8b02fa8c6ac4b396dcda92fdfdac0 Mon Sep 17 00:00:00 2001 From: "Luke D. Jones" Date: Thu, 4 Apr 2024 13:16:44 +1300 Subject: platform/x86: asus-wmi: add support for 2024 ROG Mini-LED MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support the 2024 mini-led backlight and adjust the related functions to select the relevant dev-id. Also add `available_mini_led_mode` to the platform sysfs since the available mini-led levels can be different. Reviewed-by: Ilpo Järvinen signed-off-by: Luke D. Jones Link: https://lore.kernel.org/r/20240404001652.86207-2-luke@ljones.dev Signed-off-by: Hans de Goede --- Documentation/ABI/testing/sysfs-platform-asus-wmi | 8 ++ drivers/platform/x86/asus-wmi.c | 96 ++++++++++++++++++++--- include/linux/platform_data/x86/asus-wmi.h | 1 + 3 files changed, 95 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi index 8a7e25bde085..ef1ac1a20a71 100644 --- a/Documentation/ABI/testing/sysfs-platform-asus-wmi +++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi @@ -126,6 +126,14 @@ Description: Change the mini-LED mode: * 0 - Single-zone, * 1 - Multi-zone + * 2 - Multi-zone strong (available on newer generation mini-led) + +What: /sys/devices/platform//available_mini_led_mode +Date: Apr 2024 +KernelVersion: 6.10 +Contact: "Luke Jones" +Description: + List the available mini-led modes. What: /sys/devices/platform//ppt_pl1_spl Date: Jun 2023 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index df4c103459da..e8fbd7f9d343 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -126,6 +126,17 @@ module_param(fnlock_default, bool, 0444); #define ASUS_SCREENPAD_BRIGHT_MAX 255 #define ASUS_SCREENPAD_BRIGHT_DEFAULT 60 +#define ASUS_MINI_LED_MODE_MASK 0x03 +/* Standard modes for devices with only on/off */ +#define ASUS_MINI_LED_OFF 0x00 +#define ASUS_MINI_LED_ON 0x01 +/* New mode on some devices, define here to clarify remapping later */ +#define ASUS_MINI_LED_STRONG_MODE 0x02 +/* New modes for devices with 3 mini-led mode types */ +#define ASUS_MINI_LED_2024_WEAK 0x00 +#define ASUS_MINI_LED_2024_STRONG 0x01 +#define ASUS_MINI_LED_2024_OFF 0x02 + /* Controls the power state of the USB0 hub on ROG Ally which input is on */ #define ASUS_USB0_PWR_EC0_CSEE "\\_SB.PCI0.SBRG.EC0.CSEE" /* 300ms so far seems to produce a reliable result on AC and battery */ @@ -288,7 +299,7 @@ struct asus_wmi { bool battery_rsoc_available; bool panel_overdrive_available; - bool mini_led_mode_available; + u32 mini_led_dev_id; struct hotplug_slot hotplug_slot; struct mutex hotplug_lock; @@ -2108,13 +2119,33 @@ static ssize_t mini_led_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { struct asus_wmi *asus = dev_get_drvdata(dev); - int result; + u32 value; + int err; - result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_MINI_LED_MODE); - if (result < 0) - return result; + err = asus_wmi_get_devstate(asus, asus->mini_led_dev_id, &value); + if (err < 0) + return err; + value = value & ASUS_MINI_LED_MODE_MASK; - return sysfs_emit(buf, "%d\n", result); + /* + * Remap the mode values to match previous generation mini-led. The last gen + * WMI 0 == off, while on this version WMI 2 ==off (flipped). + */ + if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) { + switch (value) { + case ASUS_MINI_LED_2024_WEAK: + value = ASUS_MINI_LED_ON; + break; + case ASUS_MINI_LED_2024_STRONG: + value = ASUS_MINI_LED_STRONG_MODE; + break; + case ASUS_MINI_LED_2024_OFF: + value = ASUS_MINI_LED_OFF; + break; + } + } + + return sysfs_emit(buf, "%d\n", value); } static ssize_t mini_led_mode_store(struct device *dev, @@ -2130,11 +2161,32 @@ static ssize_t mini_led_mode_store(struct device *dev, if (result) return result; - if (mode > 1) + if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE && + mode > ASUS_MINI_LED_ON) + return -EINVAL; + if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2 && + mode > ASUS_MINI_LED_STRONG_MODE) return -EINVAL; - err = asus_wmi_set_devstate(ASUS_WMI_DEVID_MINI_LED_MODE, mode, &result); + /* + * Remap the mode values so expected behaviour is the same as the last + * generation of mini-LED with 0 == off, 1 == on. + */ + if (asus->mini_led_dev_id == ASUS_WMI_DEVID_MINI_LED_MODE2) { + switch (mode) { + case ASUS_MINI_LED_OFF: + mode = ASUS_MINI_LED_2024_OFF; + break; + case ASUS_MINI_LED_ON: + mode = ASUS_MINI_LED_2024_WEAK; + break; + case ASUS_MINI_LED_STRONG_MODE: + mode = ASUS_MINI_LED_2024_STRONG; + break; + } + } + err = asus_wmi_set_devstate(asus->mini_led_dev_id, mode, &result); if (err) { pr_warn("Failed to set mini-LED: %d\n", err); return err; @@ -2151,6 +2203,23 @@ static ssize_t mini_led_mode_store(struct device *dev, } static DEVICE_ATTR_RW(mini_led_mode); +static ssize_t available_mini_led_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + + switch (asus->mini_led_dev_id) { + case ASUS_WMI_DEVID_MINI_LED_MODE: + return sysfs_emit(buf, "0 1\n"); + case ASUS_WMI_DEVID_MINI_LED_MODE2: + return sysfs_emit(buf, "0 1 2\n"); + } + + return sysfs_emit(buf, "0\n"); +} + +static DEVICE_ATTR_RO(available_mini_led_mode); + /* Quirks *********************************************************************/ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus) @@ -4139,6 +4208,7 @@ static struct attribute *platform_attributes[] = { &dev_attr_nv_temp_target.attr, &dev_attr_panel_od.attr, &dev_attr_mini_led_mode.attr, + &dev_attr_available_mini_led_mode.attr, NULL }; @@ -4191,7 +4261,9 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, else if (attr == &dev_attr_panel_od.attr) ok = asus->panel_overdrive_available; else if (attr == &dev_attr_mini_led_mode.attr) - ok = asus->mini_led_mode_available; + ok = asus->mini_led_dev_id != 0; + else if (attr == &dev_attr_available_mini_led_mode.attr) + ok = asus->mini_led_dev_id != 0; if (devid != -1) ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); @@ -4444,10 +4516,14 @@ static int asus_wmi_add(struct platform_device *pdev) asus->nv_dyn_boost_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_DYN_BOOST); asus->nv_temp_tgt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_NV_THERM_TARGET); asus->panel_overdrive_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PANEL_OD); - asus->mini_led_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE); asus->ally_mcu_usb_switch = acpi_has_method(NULL, ASUS_USB0_PWR_EC0_CSEE) && dmi_match(DMI_BOARD_NAME, "RC71L"); + if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE)) + asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE; + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE2)) + asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2; + err = fan_boost_mode_check_present(asus); if (err) goto fail_fan_boost_mode; diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index ab1c7deff118..9cadce10ad9a 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -71,6 +71,7 @@ #define ASUS_WMI_DEVID_LID_FLIP 0x00060062 #define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077 #define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E +#define ASUS_WMI_DEVID_MINI_LED_MODE2 0x0005002E /* Storage */ #define ASUS_WMI_DEVID_CARDREADER 0x00080013 -- cgit From eb3bac90549a226df204a57eac45bbe5d6b4a0cf Mon Sep 17 00:00:00 2001 From: "Luke D. Jones" Date: Thu, 4 Apr 2024 13:16:45 +1300 Subject: platform/x86: asus-wmi: add support for Vivobook GPU MUX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for the Vivobook dgpu MUX available on the ASUS Viviobook and some of the other ranges (Zen). This MUX functions exactly the same as the existing ROG MUX support so the existing functionality now detects which MUX is available and uses that for control. Reviewed-by: Ilpo Järvinen Signed-off-by: Luke D. Jones Link: https://lore.kernel.org/r/20240404001652.86207-3-luke@ljones.dev Signed-off-by: Hans de Goede --- drivers/platform/x86/asus-wmi.c | 22 +++++++++++++--------- include/linux/platform_data/x86/asus-wmi.h | 1 + 2 files changed, 14 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index e8fbd7f9d343..51e4df081046 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -270,7 +270,7 @@ struct asus_wmi { bool egpu_enable_available; bool egpu_connect_available; bool dgpu_disable_available; - bool gpu_mux_mode_available; + u32 gpu_mux_dev; /* Tunables provided by ASUS for gaming laptops */ bool ppt_pl2_sppt_available; @@ -693,8 +693,8 @@ static ssize_t dgpu_disable_store(struct device *dev, if (disable > 1) return -EINVAL; - if (asus->gpu_mux_mode_available) { - result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX); + if (asus->gpu_mux_dev) { + result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev); if (result < 0) /* An error here may signal greater failure of GPU handling */ return result; @@ -759,8 +759,8 @@ static ssize_t egpu_enable_store(struct device *dev, return err; } - if (asus->gpu_mux_mode_available) { - result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX); + if (asus->gpu_mux_dev) { + result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev); if (result < 0) { /* An error here may signal greater failure of GPU handling */ pr_warn("Failed to get gpu mux status: %d\n", result); @@ -813,7 +813,7 @@ static ssize_t gpu_mux_mode_show(struct device *dev, struct asus_wmi *asus = dev_get_drvdata(dev); int result; - result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_GPU_MUX); + result = asus_wmi_get_devstate_simple(asus, asus->gpu_mux_dev); if (result < 0) return result; @@ -859,7 +859,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev, } } - err = asus_wmi_set_devstate(ASUS_WMI_DEVID_GPU_MUX, optimus, &result); + err = asus_wmi_set_devstate(asus->gpu_mux_dev, optimus, &result); if (err) { dev_err(dev, "Failed to set GPU MUX mode: %d\n", err); return err; @@ -4239,7 +4239,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, else if (attr == &dev_attr_dgpu_disable.attr) ok = asus->dgpu_disable_available; else if (attr == &dev_attr_gpu_mux_mode.attr) - ok = asus->gpu_mux_mode_available; + ok = asus->gpu_mux_dev != 0; else if (attr == &dev_attr_fan_boost_mode.attr) ok = asus->fan_boost_mode_available; else if (attr == &dev_attr_throttle_thermal_policy.attr) @@ -4505,7 +4505,6 @@ static int asus_wmi_add(struct platform_device *pdev) asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); asus->egpu_connect_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU_CONNECTED); asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); - asus->gpu_mux_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX); asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE); asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); asus->ppt_pl2_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL2_SPPT); @@ -4524,6 +4523,11 @@ static int asus_wmi_add(struct platform_device *pdev) else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_MINI_LED_MODE2)) asus->mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2; + if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX)) + asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX; + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO)) + asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO; + err = fan_boost_mode_check_present(asus); if (err) goto fail_fan_boost_mode; diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 9cadce10ad9a..b48b024dd844 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -128,6 +128,7 @@ /* gpu mux switch, 0 = dGPU, 1 = Optimus */ #define ASUS_WMI_DEVID_GPU_MUX 0x00090016 +#define ASUS_WMI_DEVID_GPU_MUX_VIVO 0x00090026 /* TUF laptop RGB modes/colours */ #define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056 -- cgit From ae834a549ec1d4cf372ffba1af1c14148807af55 Mon Sep 17 00:00:00 2001 From: "Luke D. Jones" Date: Thu, 4 Apr 2024 13:16:46 +1300 Subject: platform/x86: asus-wmi: add support variant of TUF RGB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds support for a second TUF RGB wmi call that some versions of the TUF laptop come with. Also adjusts existing support to select whichever is available. Reviewed-by: Ilpo Järvinen Signed-off-by: Luke D. Jones Link: https://lore.kernel.org/r/20240404001652.86207-4-luke@ljones.dev Signed-off-by: Hans de Goede --- drivers/platform/x86/asus-wmi.c | 13 +++++++++---- include/linux/platform_data/x86/asus-wmi.h | 1 + 2 files changed, 10 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 51e4df081046..113a80ae0efe 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -281,7 +281,7 @@ struct asus_wmi { bool nv_dyn_boost_available; bool nv_temp_tgt_available; - bool kbd_rgb_mode_available; + u32 kbd_rgb_dev; bool kbd_rgb_state_available; bool throttle_thermal_policy_available; @@ -881,6 +881,7 @@ static ssize_t kbd_rgb_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + struct asus_wmi *asus = dev_get_drvdata(dev); u32 cmd, mode, r, g, b, speed; int err; @@ -917,7 +918,7 @@ static ssize_t kbd_rgb_mode_store(struct device *dev, speed = 0xeb; } - err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, ASUS_WMI_DEVID_TUF_RGB_MODE, + err = asus_wmi_evaluate_method3(ASUS_WMI_METHODID_DEVS, asus->kbd_rgb_dev, cmd | (mode << 8) | (r << 16) | (g << 24), b | (speed << 8), NULL); if (err) return err; @@ -1560,7 +1561,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus) { int rv = 0, num_rgb_groups = 0, led_val; - if (asus->kbd_rgb_mode_available) + if (asus->kbd_rgb_dev) kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_mode_group; if (asus->kbd_rgb_state_available) kbd_rgb_mode_groups[num_rgb_groups++] = &kbd_rgb_state_group; @@ -4505,7 +4506,6 @@ static int asus_wmi_add(struct platform_device *pdev) asus->egpu_enable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU); asus->egpu_connect_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_EGPU_CONNECTED); asus->dgpu_disable_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_DGPU); - asus->kbd_rgb_mode_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE); asus->kbd_rgb_state_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_STATE); asus->ppt_pl2_sppt_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL2_SPPT); asus->ppt_pl1_spl_available = asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_PPT_PL1_SPL); @@ -4528,6 +4528,11 @@ static int asus_wmi_add(struct platform_device *pdev) else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO)) asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO; + if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) + asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE; + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) + asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; + err = fan_boost_mode_check_present(asus); if (err) goto fail_fan_boost_mode; diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index b48b024dd844..3e9a01467c67 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -132,6 +132,7 @@ /* TUF laptop RGB modes/colours */ #define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056 +#define ASUS_WMI_DEVID_TUF_RGB_MODE2 0x0010005A /* TUF laptop RGB power/state */ #define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057 -- cgit From e0ae0ecce4869f841ea7cb20fca1b2c865c13339 Mon Sep 17 00:00:00 2001 From: "Luke D. Jones" Date: Thu, 4 Apr 2024 13:16:47 +1300 Subject: platform/x86: asus-wmi: support toggling POST sound MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for toggling the BIOS POST sound on some ASUS laptops. Reviewed-by: Ilpo Järvinen Signed-off-by: Luke D. Jones Link: https://lore.kernel.org/r/20240404001652.86207-5-luke@ljones.dev Signed-off-by: Hans de Goede --- Documentation/ABI/testing/sysfs-platform-asus-wmi | 9 ++++ drivers/platform/x86/asus-wmi.c | 51 +++++++++++++++++++++++ include/linux/platform_data/x86/asus-wmi.h | 3 ++ 3 files changed, 63 insertions(+) (limited to 'include/linux') diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi index ef1ac1a20a71..72933527d2e4 100644 --- a/Documentation/ABI/testing/sysfs-platform-asus-wmi +++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi @@ -194,3 +194,12 @@ Contact: "Luke Jones" Description: Set the target temperature limit of the Nvidia dGPU: * min=75, max=87 + +What: /sys/devices/platform//boot_sound +Date: Apr 2024 +KernelVersion: 6.10 +Contact: "Luke Jones" +Description: + Set if the BIOS POST sound is played on boot. + * 0 - False, + * 1 - True diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 113a80ae0efe..1dbe473294f7 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -2115,6 +2115,54 @@ static ssize_t panel_od_store(struct device *dev, } static DEVICE_ATTR_RW(panel_od); +/* Bootup sound ***************************************************************/ + +static ssize_t boot_sound_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + int result; + + result = asus_wmi_get_devstate_simple(asus, ASUS_WMI_DEVID_BOOT_SOUND); + if (result < 0) + return result; + + return sysfs_emit(buf, "%d\n", result); +} + +static ssize_t boot_sound_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int result, err; + u32 snd; + + struct asus_wmi *asus = dev_get_drvdata(dev); + + result = kstrtou32(buf, 10, &snd); + if (result) + return result; + + if (snd > 1) + return -EINVAL; + + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BOOT_SOUND, snd, &result); + if (err) { + pr_warn("Failed to set boot sound: %d\n", err); + return err; + } + + if (result > 1) { + pr_warn("Failed to set panel boot sound (result): 0x%x\n", result); + return -EIO; + } + + sysfs_notify(&asus->platform_device->dev.kobj, NULL, "boot_sound"); + + return count; +} +static DEVICE_ATTR_RW(boot_sound); + /* Mini-LED mode **************************************************************/ static ssize_t mini_led_mode_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -4207,6 +4255,7 @@ static struct attribute *platform_attributes[] = { &dev_attr_ppt_platform_sppt.attr, &dev_attr_nv_dynamic_boost.attr, &dev_attr_nv_temp_target.attr, + &dev_attr_boot_sound.attr, &dev_attr_panel_od.attr, &dev_attr_mini_led_mode.attr, &dev_attr_available_mini_led_mode.attr, @@ -4259,6 +4308,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, ok = asus->nv_dyn_boost_available; else if (attr == &dev_attr_nv_temp_target.attr) ok = asus->nv_temp_tgt_available; + else if (attr == &dev_attr_boot_sound.attr) + devid = ASUS_WMI_DEVID_BOOT_SOUND; else if (attr == &dev_attr_panel_od.attr) ok = asus->panel_overdrive_available; else if (attr == &dev_attr_mini_led_mode.attr) diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 3e9a01467c67..3eb5cd6773ad 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -137,6 +137,9 @@ /* TUF laptop RGB power/state */ #define ASUS_WMI_DEVID_TUF_RGB_STATE 0x00100057 +/* Bootup sound control */ +#define ASUS_WMI_DEVID_BOOT_SOUND 0x00130022 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 -- cgit From 2125c0034c5dfd61171b494bd309bb7637bff6eb Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 4 Apr 2024 09:47:48 -0400 Subject: cgroup/cpuset: Make cpuset hotplug processing synchronous Since commit 3a5a6d0c2b03("cpuset: don't nest cgroup_mutex inside get_online_cpus()"), cpuset hotplug was done asynchronously via a work function. This is to avoid recursive locking of cgroup_mutex. Since then, the cgroup locking scheme has changed quite a bit. A cpuset_mutex was introduced to protect cpuset specific operations. The cpuset_mutex is then replaced by a cpuset_rwsem. With commit d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem and hotplug lock order"), cpu_hotplug_lock is acquired before cpuset_rwsem. Later on, cpuset_rwsem is reverted back to cpuset_mutex. All these locking changes allow the hotplug code to call into cpuset core directly. The following commits were also merged due to the asynchronous nature of cpuset hotplug processing. - commit b22afcdf04c9 ("cpu/hotplug: Cure the cpusets trainwreck") - commit 50e76632339d ("sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs") - commit 28b89b9e6f7b ("cpuset: handle race between CPU hotplug and cpuset_hotplug_work") Clean up all these bandages by making cpuset hotplug processing synchronous again with the exception that the call to cgroup_transfer_tasks() to transfer tasks out of an empty cgroup v1 cpuset, if necessary, will still be done via a work function due to the existing cgroup_mutex -> cpu_hotplug_lock dependency. It is possible to reverse that dependency, but that will require updating a number of different cgroup controllers. This special hotplug code path should be rarely taken anyway. As all the cpuset states will be updated by the end of the hotplug operation, we can revert most the above commits except commit 50e76632339d ("sched/cpuset/pm: Fix cpuset vs. suspend-resume bugs") which is partially reverted. Also removing some cpus_read_lock trylock attempts in the cpuset partition code as they are no longer necessary since the cpu_hotplug_lock is now held for the whole duration of the cpuset hotplug code path. Signed-off-by: Waiman Long Tested-by: Valentin Schneider Signed-off-by: Tejun Heo --- include/linux/cpuset.h | 3 -- kernel/cgroup/cpuset.c | 141 ++++++++++++++++++++----------------------------- kernel/cpu.c | 48 ----------------- kernel/power/process.c | 2 - 4 files changed, 56 insertions(+), 138 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 0ce6ff0d9c9a..de4cf0ee96f7 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -70,7 +70,6 @@ extern int cpuset_init(void); extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); -extern void cpuset_wait_for_hotplug(void); extern void inc_dl_tasks_cs(struct task_struct *task); extern void dec_dl_tasks_cs(struct task_struct *task); extern void cpuset_lock(void); @@ -185,8 +184,6 @@ static inline void cpuset_update_active_cpus(void) partition_sched_domains(1, NULL, NULL); } -static inline void cpuset_wait_for_hotplug(void) { } - static inline void inc_dl_tasks_cs(struct task_struct *task) { } static inline void dec_dl_tasks_cs(struct task_struct *task) { } static inline void cpuset_lock(void) { } diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 4237c8748715..d8d3439eda4e 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -201,6 +201,14 @@ struct cpuset { struct list_head remote_sibling; }; +/* + * Legacy hierarchy call to cgroup_transfer_tasks() is handled asynchrously + */ +struct cpuset_remove_tasks_struct { + struct work_struct work; + struct cpuset *cs; +}; + /* * Exclusive CPUs distributed out to sub-partitions of top_cpuset */ @@ -449,12 +457,6 @@ static DEFINE_SPINLOCK(callback_lock); static struct workqueue_struct *cpuset_migrate_mm_wq; -/* - * CPU / memory hotplug is handled asynchronously. - */ -static void cpuset_hotplug_workfn(struct work_struct *work); -static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn); - static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq); static inline void check_insane_mems_config(nodemask_t *nodes) @@ -540,22 +542,10 @@ static void guarantee_online_cpus(struct task_struct *tsk, rcu_read_lock(); cs = task_cs(tsk); - while (!cpumask_intersects(cs->effective_cpus, pmask)) { + while (!cpumask_intersects(cs->effective_cpus, pmask)) cs = parent_cs(cs); - if (unlikely(!cs)) { - /* - * The top cpuset doesn't have any online cpu as a - * consequence of a race between cpuset_hotplug_work - * and cpu hotplug notifier. But we know the top - * cpuset's effective_cpus is on its way to be - * identical to cpu_online_mask. - */ - goto out_unlock; - } - } - cpumask_and(pmask, pmask, cs->effective_cpus); -out_unlock: + cpumask_and(pmask, pmask, cs->effective_cpus); rcu_read_unlock(); } @@ -1217,7 +1207,7 @@ static void rebuild_sched_domains_locked(void) /* * If we have raced with CPU hotplug, return early to avoid * passing doms with offlined cpu to partition_sched_domains(). - * Anyways, cpuset_hotplug_workfn() will rebuild sched domains. + * Anyways, cpuset_handle_hotplug() will rebuild sched domains. * * With no CPUs in any subpartitions, top_cpuset's effective CPUs * should be the same as the active CPUs, so checking only top_cpuset @@ -1260,12 +1250,17 @@ static void rebuild_sched_domains_locked(void) } #endif /* CONFIG_SMP */ -void rebuild_sched_domains(void) +static void rebuild_sched_domains_cpuslocked(void) { - cpus_read_lock(); mutex_lock(&cpuset_mutex); rebuild_sched_domains_locked(); mutex_unlock(&cpuset_mutex); +} + +void rebuild_sched_domains(void) +{ + cpus_read_lock(); + rebuild_sched_domains_cpuslocked(); cpus_read_unlock(); } @@ -2079,14 +2074,11 @@ write_error: /* * For partcmd_update without newmask, it is being called from - * cpuset_hotplug_workfn() where cpus_read_lock() wasn't taken. - * Update the load balance flag and scheduling domain if - * cpus_read_trylock() is successful. + * cpuset_handle_hotplug(). Update the load balance flag and + * scheduling domain accordingly. */ - if ((cmd == partcmd_update) && !newmask && cpus_read_trylock()) { + if ((cmd == partcmd_update) && !newmask) update_partition_sd_lb(cs, old_prs); - cpus_read_unlock(); - } notify_partition_change(cs, old_prs); return 0; @@ -3599,8 +3591,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, * proceeding, so that we don't end up keep removing tasks added * after execution capability is restored. * - * cpuset_hotplug_work calls back into cgroup core via - * cgroup_transfer_tasks() and waiting for it from a cgroupfs + * cpuset_handle_hotplug may call back into cgroup core asynchronously + * via cgroup_transfer_tasks() and waiting for it from a cgroupfs * operation like this one can lead to a deadlock through kernfs * active_ref protection. Let's break the protection. Losing the * protection is okay as we check whether @cs is online after @@ -3609,7 +3601,6 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, */ css_get(&cs->css); kernfs_break_active_protection(of->kn); - flush_work(&cpuset_hotplug_work); cpus_read_lock(); mutex_lock(&cpuset_mutex); @@ -4354,6 +4345,16 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) } } +static void cpuset_migrate_tasks_workfn(struct work_struct *work) +{ + struct cpuset_remove_tasks_struct *s; + + s = container_of(work, struct cpuset_remove_tasks_struct, work); + remove_tasks_in_empty_cpuset(s->cs); + css_put(&s->cs->css); + kfree(s); +} + static void hotplug_update_tasks_legacy(struct cpuset *cs, struct cpumask *new_cpus, nodemask_t *new_mems, @@ -4383,12 +4384,21 @@ hotplug_update_tasks_legacy(struct cpuset *cs, /* * Move tasks to the nearest ancestor with execution resources, * This is full cgroup operation which will also call back into - * cpuset. Should be done outside any lock. + * cpuset. Execute it asynchronously using workqueue. */ - if (is_empty) { - mutex_unlock(&cpuset_mutex); - remove_tasks_in_empty_cpuset(cs); - mutex_lock(&cpuset_mutex); + if (is_empty && cs->css.cgroup->nr_populated_csets && + css_tryget_online(&cs->css)) { + struct cpuset_remove_tasks_struct *s; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (WARN_ON_ONCE(!s)) { + css_put(&cs->css); + return; + } + + s->cs = cs; + INIT_WORK(&s->work, cpuset_migrate_tasks_workfn); + schedule_work(&s->work); } } @@ -4421,30 +4431,6 @@ void cpuset_force_rebuild(void) force_rebuild = true; } -/* - * Attempt to acquire a cpus_read_lock while a hotplug operation may be in - * progress. - * Return: true if successful, false otherwise - * - * To avoid circular lock dependency between cpuset_mutex and cpus_read_lock, - * cpus_read_trylock() is used here to acquire the lock. - */ -static bool cpuset_hotplug_cpus_read_trylock(void) -{ - int retries = 0; - - while (!cpus_read_trylock()) { - /* - * CPU hotplug still in progress. Retry 5 times - * with a 10ms wait before bailing out. - */ - if (++retries > 5) - return false; - msleep(10); - } - return true; -} - /** * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug * @cs: cpuset in interest @@ -4493,13 +4479,11 @@ retry: compute_partition_effective_cpumask(cs, &new_cpus); if (remote && cpumask_empty(&new_cpus) && - partition_is_populated(cs, NULL) && - cpuset_hotplug_cpus_read_trylock()) { + partition_is_populated(cs, NULL)) { remote_partition_disable(cs, tmp); compute_effective_cpumask(&new_cpus, cs, parent); remote = false; cpuset_force_rebuild(); - cpus_read_unlock(); } /* @@ -4519,18 +4503,8 @@ retry: else if (is_partition_valid(parent) && is_partition_invalid(cs)) partcmd = partcmd_update; - /* - * cpus_read_lock needs to be held before calling - * update_parent_effective_cpumask(). To avoid circular lock - * dependency between cpuset_mutex and cpus_read_lock, - * cpus_read_trylock() is used here to acquire the lock. - */ if (partcmd >= 0) { - if (!cpuset_hotplug_cpus_read_trylock()) - goto update_tasks; - update_parent_effective_cpumask(cs, partcmd, NULL, tmp); - cpus_read_unlock(); if ((partcmd == partcmd_invalidate) || is_partition_valid(cs)) { compute_partition_effective_cpumask(cs, &new_cpus); cpuset_force_rebuild(); @@ -4558,8 +4532,7 @@ unlock: } /** - * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset - * @work: unused + * cpuset_handle_hotplug - handle CPU/memory hot{,un}plug for a cpuset * * This function is called after either CPU or memory configuration has * changed and updates cpuset accordingly. The top_cpuset is always @@ -4573,8 +4546,10 @@ unlock: * * Note that CPU offlining during suspend is ignored. We don't modify * cpusets across suspend/resume cycles at all. + * + * CPU / memory hotplug is handled synchronously. */ -static void cpuset_hotplug_workfn(struct work_struct *work) +static void cpuset_handle_hotplug(void) { static cpumask_t new_cpus; static nodemask_t new_mems; @@ -4585,6 +4560,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) if (on_dfl && !alloc_cpumasks(NULL, &tmp)) ptmp = &tmp; + lockdep_assert_cpus_held(); mutex_lock(&cpuset_mutex); /* fetch the available cpus/mems and find out which changed how */ @@ -4666,7 +4642,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) /* rebuild sched domains if cpus_allowed has changed */ if (cpus_updated || force_rebuild) { force_rebuild = false; - rebuild_sched_domains(); + rebuild_sched_domains_cpuslocked(); } free_cpumasks(NULL, ptmp); @@ -4679,12 +4655,7 @@ void cpuset_update_active_cpus(void) * inside cgroup synchronization. Bounce actual hotplug processing * to a work item to avoid reverse locking order. */ - schedule_work(&cpuset_hotplug_work); -} - -void cpuset_wait_for_hotplug(void) -{ - flush_work(&cpuset_hotplug_work); + cpuset_handle_hotplug(); } /* @@ -4695,7 +4666,7 @@ void cpuset_wait_for_hotplug(void) static int cpuset_track_online_nodes(struct notifier_block *self, unsigned long action, void *arg) { - schedule_work(&cpuset_hotplug_work); + cpuset_handle_hotplug(); return NOTIFY_OK; } diff --git a/kernel/cpu.c b/kernel/cpu.c index 8f6affd051f7..49ce3f309688 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1208,52 +1208,6 @@ void __init cpuhp_threads_init(void) kthread_unpark(this_cpu_read(cpuhp_state.thread)); } -/* - * - * Serialize hotplug trainwrecks outside of the cpu_hotplug_lock - * protected region. - * - * The operation is still serialized against concurrent CPU hotplug via - * cpu_add_remove_lock, i.e. CPU map protection. But it is _not_ - * serialized against other hotplug related activity like adding or - * removing of state callbacks and state instances, which invoke either the - * startup or the teardown callback of the affected state. - * - * This is required for subsystems which are unfixable vs. CPU hotplug and - * evade lock inversion problems by scheduling work which has to be - * completed _before_ cpu_up()/_cpu_down() returns. - * - * Don't even think about adding anything to this for any new code or even - * drivers. It's only purpose is to keep existing lock order trainwrecks - * working. - * - * For cpu_down() there might be valid reasons to finish cleanups which are - * not required to be done under cpu_hotplug_lock, but that's a different - * story and would be not invoked via this. - */ -static void cpu_up_down_serialize_trainwrecks(bool tasks_frozen) -{ - /* - * cpusets delegate hotplug operations to a worker to "solve" the - * lock order problems. Wait for the worker, but only if tasks are - * _not_ frozen (suspend, hibernate) as that would wait forever. - * - * The wait is required because otherwise the hotplug operation - * returns with inconsistent state, which could even be observed in - * user space when a new CPU is brought up. The CPU plug uevent - * would be delivered and user space reacting on it would fail to - * move tasks to the newly plugged CPU up to the point where the - * work has finished because up to that point the newly plugged CPU - * is not assignable in cpusets/cgroups. On unplug that's not - * necessarily a visible issue, but it is still inconsistent state, - * which is the real problem which needs to be "fixed". This can't - * prevent the transient state between scheduling the work and - * returning from waiting for it. - */ - if (!tasks_frozen) - cpuset_wait_for_hotplug(); -} - #ifdef CONFIG_HOTPLUG_CPU #ifndef arch_clear_mm_cpumask_cpu #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm)) @@ -1494,7 +1448,6 @@ out: */ lockup_detector_cleanup(); arch_smt_update(); - cpu_up_down_serialize_trainwrecks(tasks_frozen); return ret; } @@ -1728,7 +1681,6 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) out: cpus_write_unlock(); arch_smt_update(); - cpu_up_down_serialize_trainwrecks(tasks_frozen); return ret; } diff --git a/kernel/power/process.c b/kernel/power/process.c index cae81a87cc91..66ac067d9ae6 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c @@ -194,8 +194,6 @@ void thaw_processes(void) __usermodehelper_set_disable_depth(UMH_FREEZING); thaw_workqueues(); - cpuset_wait_for_hotplug(); - read_lock(&tasklist_lock); for_each_process_thread(g, p) { /* No other threads should have PF_SUSPEND_TASK set */ -- cgit From 3f9f231236ce7e48780d8a4f1f8cb9fae2df1e4e Mon Sep 17 00:00:00 2001 From: Li Nan Date: Wed, 17 Jan 2024 11:19:45 +0800 Subject: md: Fix overflow in is_mddev_idle UBSAN reports this problem: UBSAN: Undefined behaviour in drivers/md/md.c:8175:15 signed integer overflow: -2147483291 - 2072033152 cannot be represented in type 'int' Call trace: dump_backtrace+0x0/0x310 show_stack+0x28/0x38 dump_stack+0xec/0x15c ubsan_epilogue+0x18/0x84 handle_overflow+0x14c/0x19c __ubsan_handle_sub_overflow+0x34/0x44 is_mddev_idle+0x338/0x3d8 md_do_sync+0x1bb8/0x1cf8 md_thread+0x220/0x288 kthread+0x1d8/0x1e0 ret_from_fork+0x10/0x18 'curr_events' will overflow when stat accum or 'sync_io' is greater than INT_MAX. Fix it by changing sync_io, last_events and curr_events to 64bit. Signed-off-by: Li Nan Reviewed-by: Yu Kuai Link: https://lore.kernel.org/r/20240117031946.2324519-2-linan666@huaweicloud.com Signed-off-by: Song Liu --- drivers/md/md.c | 7 ++++--- drivers/md/md.h | 4 ++-- include/linux/blkdev.h | 2 +- 3 files changed, 7 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/md/md.c b/drivers/md/md.c index fd574b015d78..f28e41f9ac4a 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -8577,14 +8577,15 @@ static int is_mddev_idle(struct mddev *mddev, int init) { struct md_rdev *rdev; int idle; - int curr_events; + long long curr_events; idle = 1; rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { struct gendisk *disk = rdev->bdev->bd_disk; - curr_events = (int)part_stat_read_accum(disk->part0, sectors) - - atomic_read(&disk->sync_io); + curr_events = + (long long)part_stat_read_accum(disk->part0, sectors) - + atomic64_read(&disk->sync_io); /* sync IO will cause sync_io to increase before the disk_stats * as sync_io is counted when a request starts, and * disk_stats is counted when it completes. diff --git a/drivers/md/md.h b/drivers/md/md.h index 097d9dbd69b8..d0db98c0d33b 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -51,7 +51,7 @@ struct md_rdev { sector_t sectors; /* Device size (in 512bytes sectors) */ struct mddev *mddev; /* RAID array if running */ - int last_events; /* IO event timestamp */ + long long last_events; /* IO event timestamp */ /* * If meta_bdev is non-NULL, it means that a separate device is @@ -621,7 +621,7 @@ extern void mddev_unlock(struct mddev *mddev); static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bdev->bd_disk->sync_io); + atomic64_add(nr_sectors, &bdev->bd_disk->sync_io); } static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c3e8f7cf96be..d0fa4f1c29a2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -172,7 +172,7 @@ struct gendisk { struct list_head slave_bdevs; #endif struct timer_rand_state *random; - atomic_t sync_io; /* RAID */ + atomic64_t sync_io; /* RAID */ struct disk_events *ev; #ifdef CONFIG_BLK_DEV_ZONED -- cgit From e1900d7ba9c9fd9edb214c43d2826876a5a35057 Mon Sep 17 00:00:00 2001 From: Diogo Ivo Date: Wed, 3 Apr 2024 11:48:12 +0100 Subject: eth: Move IPv4/IPv6 multicast address bases to their own symbols As these addresses can be useful outside of checking if an address is a multicast address (for example in device drivers) make them accessible to users of etherdevice.h to avoid code duplication. Signed-off-by: Diogo Ivo Reviewed-by: MD Danish Anwar Signed-off-by: Paolo Abeni --- include/linux/etherdevice.h | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 224645f17c33..8d6daf828427 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -71,6 +71,12 @@ static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; #define eth_stp_addr eth_reserved_addr_base +static const u8 eth_ipv4_mcast_addr_base[ETH_ALEN] __aligned(2) = +{ 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; + +static const u8 eth_ipv6_mcast_addr_base[ETH_ALEN] __aligned(2) = +{ 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; + /** * is_link_local_ether_addr - Determine if given Ethernet address is link-local * @addr: Pointer to a six-byte array containing the Ethernet address @@ -430,18 +436,16 @@ static inline bool ether_addr_equal_masked(const u8 *addr1, const u8 *addr2, static inline bool ether_addr_is_ipv4_mcast(const u8 *addr) { - u8 base[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 }; u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 }; - return ether_addr_equal_masked(addr, base, mask); + return ether_addr_equal_masked(addr, eth_ipv4_mcast_addr_base, mask); } static inline bool ether_addr_is_ipv6_mcast(const u8 *addr) { - u8 base[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 }; u8 mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }; - return ether_addr_equal_masked(addr, base, mask); + return ether_addr_equal_masked(addr, eth_ipv6_mcast_addr_base, mask); } static inline bool ether_addr_is_ip_mcast(const u8 *addr) -- cgit From 886b94d25a8eba4c42634dddc3cbfd6391a24d25 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 9 Apr 2024 10:41:33 +0200 Subject: fs: Add FOP_HUGE_PAGES Instead of checking for specific file_operations, add a bit to file_operations which denotes a file that only contain hugetlb pages. This lets us make hugetlbfs_file_operations static, and removes is_file_shm_hugepages() completely. Signed-off-by: Matthew Wilcox (Oracle) Link: https://lore.kernel.org/r/20240407201122.3783877-1-willy@infradead.org Signed-off-by: Christian Brauner --- fs/hugetlbfs/inode.c | 5 +++-- include/linux/fs.h | 2 ++ include/linux/hugetlb.h | 8 ++------ include/linux/shm.h | 5 ----- ipc/shm.c | 10 +++------- 5 files changed, 10 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 6502c7e776d1..34ac73cc36b1 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -40,7 +40,7 @@ #include static const struct address_space_operations hugetlbfs_aops; -const struct file_operations hugetlbfs_file_operations; +static const struct file_operations hugetlbfs_file_operations; static const struct inode_operations hugetlbfs_dir_inode_operations; static const struct inode_operations hugetlbfs_inode_operations; @@ -1301,13 +1301,14 @@ static void init_once(void *foo) inode_init_once(&ei->vfs_inode); } -const struct file_operations hugetlbfs_file_operations = { +static const struct file_operations hugetlbfs_file_operations = { .read_iter = hugetlbfs_read_iter, .mmap = hugetlbfs_file_mmap, .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, .llseek = default_llseek, .fallocate = hugetlbfs_fallocate, + .fop_flags = FOP_HUGE_PAGES, }; static const struct inode_operations hugetlbfs_dir_inode_operations = { diff --git a/include/linux/fs.h b/include/linux/fs.h index 2ba98930c4c4..b053f98393e6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2052,6 +2052,8 @@ struct file_operations { #define FOP_MMAP_SYNC ((__force fop_flags_t)(1 << 2)) /* Supports non-exclusive O_DIRECT writes from multiple threads */ #define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3)) +/* Contains huge pages */ +#define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4)) /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 77b30a8c6076..b06f7c426d38 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -554,17 +554,13 @@ static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); } -extern const struct file_operations hugetlbfs_file_operations; extern const struct vm_operations_struct hugetlb_vm_ops; struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, int creat_flags, int page_size_log); -static inline bool is_file_hugepages(struct file *file) +static inline bool is_file_hugepages(const struct file *file) { - if (file->f_op == &hugetlbfs_file_operations) - return true; - - return is_file_shm_hugepages(file); + return file->f_op->fop_flags & FOP_HUGE_PAGES; } static inline struct hstate *hstate_inode(struct inode *i) diff --git a/include/linux/shm.h b/include/linux/shm.h index c55bef0538e5..1d3d3ae958fb 100644 --- a/include/linux/shm.h +++ b/include/linux/shm.h @@ -16,7 +16,6 @@ struct sysv_shm { long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr, unsigned long shmlba); -bool is_file_shm_hugepages(struct file *file); void exit_shm(struct task_struct *task); #define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist) #else @@ -30,10 +29,6 @@ static inline long do_shmat(int shmid, char __user *shmaddr, { return -ENOSYS; } -static inline bool is_file_shm_hugepages(struct file *file) -{ - return false; -} static inline void exit_shm(struct task_struct *task) { } diff --git a/ipc/shm.c b/ipc/shm.c index a89f001a8bf0..3e3071252dac 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -662,8 +662,8 @@ static const struct file_operations shm_file_operations = { }; /* - * shm_file_operations_huge is now identical to shm_file_operations, - * but we keep it distinct for the sake of is_file_shm_hugepages(). + * shm_file_operations_huge is now identical to shm_file_operations + * except for fop_flags */ static const struct file_operations shm_file_operations_huge = { .mmap = shm_mmap, @@ -672,13 +672,9 @@ static const struct file_operations shm_file_operations_huge = { .get_unmapped_area = shm_get_unmapped_area, .llseek = noop_llseek, .fallocate = shm_fallocate, + .fop_flags = FOP_HUGE_PAGES, }; -bool is_file_shm_hugepages(struct file *file) -{ - return file->f_op == &shm_file_operations_huge; -} - static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ -- cgit From c342b42fa47f4257fccfeadc8e32c51b1be17a1f Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 23 Feb 2024 15:16:20 -0800 Subject: rcu-tasks: Make Tasks RCU wait idly for grace-period delays Currently, all waits for grace periods sleep at TASK_UNINTERRUPTIBLE, regardless of RCU flavor. This has worked well, but there have been cases where a longer-than-average Tasks RCU grace period has triggered softlockup splats, many of them, before the Tasks RCU CPU stall warning appears. These softlockup splats unnecessarily consume console bandwidth and complicate diagnosis of the underlying problem. Plus a long but not pathologically long Tasks RCU grace period might trigger a few softlockup splats before completing normally, which generates noise for no good reason. This commit therefore causes Tasks RCU grace periods to sleep at TASK_IDLE priority. If there really is a persistent problem, the eventual Tasks RCU CPU stall warning will flag it, and without the extra noise. Reported-by: Breno Leitao Signed-off-by: Paul E. McKenney Signed-off-by: Uladzislau Rezki (Sony) --- include/linux/rcupdate_wait.h | 18 +++++++++--------- kernel/rcu/tasks.h | 6 +++++- kernel/rcu/update.c | 4 ++-- 3 files changed, 16 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate_wait.h b/include/linux/rcupdate_wait.h index d07f0848802e..303ab9bee155 100644 --- a/include/linux/rcupdate_wait.h +++ b/include/linux/rcupdate_wait.h @@ -19,18 +19,18 @@ struct rcu_synchronize { }; void wakeme_after_rcu(struct rcu_head *head); -void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, +void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array); -#define _wait_rcu_gp(checktiny, ...) \ -do { \ - call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ - struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ - __wait_rcu_gp(checktiny, ARRAY_SIZE(__crcu_array), \ - __crcu_array, __rs_array); \ +#define _wait_rcu_gp(checktiny, state, ...) \ +do { \ + call_rcu_func_t __crcu_array[] = { __VA_ARGS__ }; \ + struct rcu_synchronize __rs_array[ARRAY_SIZE(__crcu_array)]; \ + __wait_rcu_gp(checktiny, state, ARRAY_SIZE(__crcu_array), __crcu_array, __rs_array); \ } while (0) -#define wait_rcu_gp(...) _wait_rcu_gp(false, __VA_ARGS__) +#define wait_rcu_gp(...) _wait_rcu_gp(false, TASK_UNINTERRUPTIBLE, __VA_ARGS__) +#define wait_rcu_gp_state(state, ...) _wait_rcu_gp(false, state, __VA_ARGS__) /** * synchronize_rcu_mult - Wait concurrently for multiple grace periods @@ -54,7 +54,7 @@ do { \ * grace period. */ #define synchronize_rcu_mult(...) \ - _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), __VA_ARGS__) + _wait_rcu_gp(IS_ENABLED(CONFIG_TINY_RCU), TASK_UNINTERRUPTIBLE, __VA_ARGS__) static inline void cond_resched_rcu(void) { diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 147b5945d67a..82e458ea0728 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -74,6 +74,7 @@ struct rcu_tasks_percpu { * @holdouts_func: This flavor's holdout-list scan function (optional). * @postgp_func: This flavor's post-grace-period function (optional). * @call_func: This flavor's call_rcu()-equivalent function. + * @wait_state: Task state for synchronous grace-period waits (default TASK_UNINTERRUPTIBLE). * @rtpcpu: This flavor's rcu_tasks_percpu structure. * @percpu_enqueue_shift: Shift down CPU ID this much when enqueuing callbacks. * @percpu_enqueue_lim: Number of per-CPU callback queues in use for enqueuing. @@ -107,6 +108,7 @@ struct rcu_tasks { holdouts_func_t holdouts_func; postgp_func_t postgp_func; call_rcu_func_t call_func; + unsigned int wait_state; struct rcu_tasks_percpu __percpu *rtpcpu; int percpu_enqueue_shift; int percpu_enqueue_lim; @@ -134,6 +136,7 @@ static struct rcu_tasks rt_name = \ .tasks_gp_mutex = __MUTEX_INITIALIZER(rt_name.tasks_gp_mutex), \ .gp_func = gp, \ .call_func = call, \ + .wait_state = TASK_UNINTERRUPTIBLE, \ .rtpcpu = &rt_name ## __percpu, \ .lazy_jiffies = DIV_ROUND_UP(HZ, 4), \ .name = n, \ @@ -638,7 +641,7 @@ static void synchronize_rcu_tasks_generic(struct rcu_tasks *rtp) // If the grace-period kthread is running, use it. if (READ_ONCE(rtp->kthread_ptr)) { - wait_rcu_gp(rtp->call_func); + wait_rcu_gp_state(rtp->wait_state, rtp->call_func); return; } rcu_tasks_one_gp(rtp, true); @@ -1160,6 +1163,7 @@ static int __init rcu_spawn_tasks_kthread(void) rcu_tasks.postscan_func = rcu_tasks_postscan; rcu_tasks.holdouts_func = check_all_holdout_tasks; rcu_tasks.postgp_func = rcu_tasks_postgp; + rcu_tasks.wait_state = TASK_IDLE; rcu_spawn_tasks_kthread_generic(&rcu_tasks); return 0; } diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 46aaaa9fe339..f8436969e0c8 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -408,7 +408,7 @@ void wakeme_after_rcu(struct rcu_head *head) } EXPORT_SYMBOL_GPL(wakeme_after_rcu); -void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, +void __wait_rcu_gp(bool checktiny, unsigned int state, int n, call_rcu_func_t *crcu_array, struct rcu_synchronize *rs_array) { int i; @@ -440,7 +440,7 @@ void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array, if (crcu_array[j] == crcu_array[i]) break; if (j == i) { - wait_for_completion(&rs_array[i].completion); + wait_for_completion_state(&rs_array[i].completion, state); destroy_rcu_head_on_stack(&rs_array[i].head); } } -- cgit From 779087fe2fecf8b8aa7bdd69982f9c0c8266927b Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Fri, 5 Apr 2024 08:08:12 +0200 Subject: kfifo: drop __kfifo_dma_out_finish_r() It is the same as __kfifo_skip_r(), so: * drop __kfifo_dma_out_finish_r() completely, and * replace its (only) use by __kfifo_skip_r(). Signed-off-by: Jiri Slaby (SUSE) Cc: Stefani Seibold Cc: Andrew Morton Link: https://lore.kernel.org/r/20240405060826.2521-2-jirislaby@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/kfifo.h | 4 +--- lib/kfifo.c | 8 -------- 2 files changed, 1 insertion(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 0b35a41440ff..bc7a1f5bb0ce 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -797,7 +797,7 @@ __kfifo_int_must_check_helper( \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ if (__recsize) \ - __kfifo_dma_out_finish_r(__kfifo, __recsize); \ + __kfifo_skip_r(__kfifo, __recsize); \ else \ __kfifo->out += __len / sizeof(*__tmp->type); \ }) @@ -879,8 +879,6 @@ extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo, extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); -extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); - extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); diff --git a/lib/kfifo.c b/lib/kfifo.c index 12f5a347aa13..958099cc4914 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c @@ -582,11 +582,3 @@ unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, } EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); -void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize) -{ - unsigned int len; - - len = __kfifo_peek_n(fifo, recsize); - fifo->out += len + recsize; -} -EXPORT_SYMBOL(__kfifo_dma_out_finish_r); -- cgit From 76738194be605c9fb279f87d8a6b0b95904bfb60 Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Fri, 5 Apr 2024 08:08:13 +0200 Subject: kfifo: introduce and use kfifo_skip_count() kfifo_skip_count() is an extended version of kfifo_skip(), accepting also count. This will be useful in the serial code later. Now, it can be used to implement both kfifo_skip() and kfifo_dma_out_finish(). In the latter, 'len' only needs to be divided by 'type' size (as it was until now). And stop using statement expressions when the return value is cast to 'void'. Use classic 'do {} while (0)' instead. Note: perhaps we should skip 'count' records for the 'recsize' case, but the original (kfifo_dma_out_finish()) used to skip only one record. So this is kept unchanged and 'count' is still ignored in the recsize case. Signed-off-by: Jiri Slaby (SUSE) Cc: Stefani Seibold Cc: Andrew Morton Link: https://lore.kernel.org/r/20240405060826.2521-3-jirislaby@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/kfifo.h | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index bc7a1f5bb0ce..8f3369ec528b 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -304,19 +304,25 @@ __kfifo_uint_must_check_helper( \ ) /** - * kfifo_skip - skip output data + * kfifo_skip_count - skip output data * @fifo: address of the fifo to be used + * @count: count of data to skip */ -#define kfifo_skip(fifo) \ -(void)({ \ +#define kfifo_skip_count(fifo, count) do { \ typeof((fifo) + 1) __tmp = (fifo); \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ if (__recsize) \ __kfifo_skip_r(__kfifo, __recsize); \ else \ - __kfifo->out++; \ -}) + __kfifo->out += (count); \ +} while(0) + +/** + * kfifo_skip - skip output data + * @fifo: address of the fifo to be used + */ +#define kfifo_skip(fifo) kfifo_skip_count(fifo, 1) /** * kfifo_peek_len - gets the size of the next fifo record @@ -790,17 +796,10 @@ __kfifo_int_must_check_helper( \ * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these macros. */ -#define kfifo_dma_out_finish(fifo, len) \ -(void)({ \ - typeof((fifo) + 1) __tmp = (fifo); \ - unsigned int __len = (len); \ - const size_t __recsize = sizeof(*__tmp->rectype); \ - struct __kfifo *__kfifo = &__tmp->kfifo; \ - if (__recsize) \ - __kfifo_skip_r(__kfifo, __recsize); \ - else \ - __kfifo->out += __len / sizeof(*__tmp->type); \ -}) +#define kfifo_dma_out_finish(fifo, len) do { \ + typeof((fifo) + 1) ___tmp = (fifo); \ + kfifo_skip_count(___tmp, (len) / sizeof(*___tmp->type)); \ +} while (0) /** * kfifo_out_peek - gets some data from the fifo -- cgit From 4edd7e96a1f159f43bd1cb82616f81eaddd54262 Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Fri, 5 Apr 2024 08:08:14 +0200 Subject: kfifo: add kfifo_out_linear{,_ptr}() These are helpers which are going to be used in the serial layer. We need a wrapper around kfifo which provides us with a tail (sometimes "tail" offset, sometimes a pointer) to the kfifo data. And which returns count of available data -- but not larger than to the end of the buffer (hence _linear in the names). I.e. something like CIRC_CNT_TO_END() in the legacy circ_buf. This patch adds such two helpers. Signed-off-by: Jiri Slaby (SUSE) Cc: Stefani Seibold Cc: Andrew Morton Link: https://lore.kernel.org/r/20240405060826.2521-4-jirislaby@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/kfifo.h | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++ lib/kfifo.c | 26 +++++++++++++++++++++ 2 files changed, 89 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 8f3369ec528b..3def70e1a3e3 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -827,6 +827,63 @@ __kfifo_uint_must_check_helper( \ }) \ ) +/** + * kfifo_out_linear - gets a tail of/offset to available data + * @fifo: address of the fifo to be used + * @tail: pointer to an unsigned int to store the value of tail + * @n: max. number of elements to point at + * + * This macro obtains the offset (tail) to the available data in the fifo + * buffer and returns the + * numbers of elements available. It returns the available count till the end + * of data or till the end of the buffer. So that it can be used for linear + * data processing (like memcpy() of (@fifo->data + @tail) with count + * returned). + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out_linear(fifo, tail, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) __tmp = (fifo); \ + unsigned int *__tail = (tail); \ + unsigned long __n = (n); \ + const size_t __recsize = sizeof(*__tmp->rectype); \ + struct __kfifo *__kfifo = &__tmp->kfifo; \ + (__recsize) ? \ + __kfifo_out_linear_r(__kfifo, __tail, __n, __recsize) : \ + __kfifo_out_linear(__kfifo, __tail, __n); \ +}) \ +) + +/** + * kfifo_out_linear_ptr - gets a pointer to the available data + * @fifo: address of the fifo to be used + * @ptr: pointer to data to store the pointer to tail + * @n: max. number of elements to point at + * + * Similarly to kfifo_out_linear(), this macro obtains the pointer to the + * available data in the fifo buffer and returns the numbers of elements + * available. It returns the available count till the end of available data or + * till the end of the buffer. So that it can be used for linear data + * processing (like memcpy() of @ptr with count returned). + * + * Note that with only one concurrent reader and one concurrent + * writer, you don't need extra locking to use these macro. + */ +#define kfifo_out_linear_ptr(fifo, ptr, n) \ +__kfifo_uint_must_check_helper( \ +({ \ + typeof((fifo) + 1) ___tmp = (fifo); \ + unsigned int ___tail; \ + unsigned int ___n = kfifo_out_linear(___tmp, &___tail, (n)); \ + *(ptr) = ___tmp->kfifo.data + ___tail * kfifo_esize(___tmp); \ + ___n; \ +}) \ +) + + extern int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, size_t esize, gfp_t gfp_mask); @@ -856,6 +913,9 @@ extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, extern unsigned int __kfifo_out_peek(struct __kfifo *fifo, void *buf, unsigned int len); +extern unsigned int __kfifo_out_linear(struct __kfifo *fifo, + unsigned int *tail, unsigned int n); + extern unsigned int __kfifo_in_r(struct __kfifo *fifo, const void *buf, unsigned int len, size_t recsize); @@ -885,6 +945,9 @@ extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, unsigned int len, size_t recsize); +extern unsigned int __kfifo_out_linear_r(struct __kfifo *fifo, + unsigned int *tail, unsigned int n, size_t recsize); + extern unsigned int __kfifo_max_r(unsigned int len, size_t recsize); #endif diff --git a/lib/kfifo.c b/lib/kfifo.c index 958099cc4914..a36bfdbdb17d 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c @@ -163,6 +163,19 @@ unsigned int __kfifo_out_peek(struct __kfifo *fifo, } EXPORT_SYMBOL(__kfifo_out_peek); +unsigned int __kfifo_out_linear(struct __kfifo *fifo, + unsigned int *tail, unsigned int n) +{ + unsigned int size = fifo->mask + 1; + unsigned int off = fifo->out & fifo->mask; + + if (tail) + *tail = off; + + return min3(n, fifo->in - fifo->out, size - off); +} +EXPORT_SYMBOL(__kfifo_out_linear); + unsigned int __kfifo_out(struct __kfifo *fifo, void *buf, unsigned int len) { @@ -473,6 +486,19 @@ unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, void *buf, } EXPORT_SYMBOL(__kfifo_out_peek_r); +unsigned int __kfifo_out_linear_r(struct __kfifo *fifo, + unsigned int *tail, unsigned int n, size_t recsize) +{ + if (fifo->in == fifo->out) + return 0; + + if (tail) + *tail = fifo->out + recsize; + + return min(n, __kfifo_peek_n(fifo, recsize)); +} +EXPORT_SYMBOL(__kfifo_out_linear_r); + unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, unsigned int len, size_t recsize) { -- cgit From d52b761e4b1acc897e65bcc8eff42b0537ac0134 Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Fri, 5 Apr 2024 08:08:18 +0200 Subject: kfifo: add kfifo_dma_out_prepare_mapped() When the kfifo buffer is already dma-mapped, one cannot use the kfifo API to fill in an SG list. Add kfifo_dma_in_prepare_mapped() which allows exactly this. A mapped dma_addr_t is passed and it is filled into provided sgl too. Including the dma_len. Signed-off-by: Jiri Slaby (SUSE) Cc: Stefani Seibold Cc: Andrew Morton Link: https://lore.kernel.org/r/20240405060826.2521-8-jirislaby@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/kfifo.h | 37 +++++++++++++++++++++++++------------ lib/kfifo.c | 34 ++++++++++++++++++++++------------ 2 files changed, 47 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 3def70e1a3e3..c7cc25b2808b 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -36,6 +36,7 @@ * to lock the reader. */ +#include #include #include #include @@ -709,11 +710,12 @@ __kfifo_int_must_check_helper( \ ) /** - * kfifo_dma_in_prepare - setup a scatterlist for DMA input + * kfifo_dma_in_prepare_mapped - setup a scatterlist for DMA input * @fifo: address of the fifo to be used * @sgl: pointer to the scatterlist array * @nents: number of entries in the scatterlist array * @len: number of elements to transfer + * @dma: mapped dma address to fill into @sgl * * This macro fills a scatterlist for DMA input. * It returns the number entries in the scatterlist array. @@ -721,7 +723,7 @@ __kfifo_int_must_check_helper( \ * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these macros. */ -#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ +#define kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, dma) \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ struct scatterlist *__sgl = (sgl); \ @@ -730,10 +732,14 @@ __kfifo_int_must_check_helper( \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ (__recsize) ? \ - __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ - __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len); \ + __kfifo_dma_in_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \ + dma) : \ + __kfifo_dma_in_prepare(__kfifo, __sgl, __nents, __len, dma); \ }) +#define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ + kfifo_dma_in_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR) + /** * kfifo_dma_in_finish - finish a DMA IN operation * @fifo: address of the fifo to be used @@ -758,11 +764,12 @@ __kfifo_int_must_check_helper( \ }) /** - * kfifo_dma_out_prepare - setup a scatterlist for DMA output + * kfifo_dma_out_prepare_mapped - setup a scatterlist for DMA output * @fifo: address of the fifo to be used * @sgl: pointer to the scatterlist array * @nents: number of entries in the scatterlist array * @len: number of elements to transfer + * @dma: mapped dma address to fill into @sgl * * This macro fills a scatterlist for DMA output which at most @len bytes * to transfer. @@ -772,7 +779,7 @@ __kfifo_int_must_check_helper( \ * Note that with only one concurrent reader and one concurrent * writer, you don't need extra locking to use these macros. */ -#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ +#define kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, dma) \ ({ \ typeof((fifo) + 1) __tmp = (fifo); \ struct scatterlist *__sgl = (sgl); \ @@ -781,10 +788,14 @@ __kfifo_int_must_check_helper( \ const size_t __recsize = sizeof(*__tmp->rectype); \ struct __kfifo *__kfifo = &__tmp->kfifo; \ (__recsize) ? \ - __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize) : \ - __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len); \ + __kfifo_dma_out_prepare_r(__kfifo, __sgl, __nents, __len, __recsize, \ + dma) : \ + __kfifo_dma_out_prepare(__kfifo, __sgl, __nents, __len, dma); \ }) +#define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ + kfifo_dma_out_prepare_mapped(fifo, sgl, nents, len, DMA_MAPPING_ERROR) + /** * kfifo_dma_out_finish - finish a DMA OUT operation * @fifo: address of the fifo to be used @@ -905,10 +916,10 @@ extern int __kfifo_to_user(struct __kfifo *fifo, void __user *to, unsigned long len, unsigned int *copied); extern unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len); + struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma); extern unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len); + struct scatterlist *sgl, int nents, unsigned int len, dma_addr_t dma); extern unsigned int __kfifo_out_peek(struct __kfifo *fifo, void *buf, unsigned int len); @@ -930,13 +941,15 @@ extern int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, unsigned long len, unsigned int *copied, size_t recsize); extern unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma); extern void __kfifo_dma_in_finish_r(struct __kfifo *fifo, unsigned int len, size_t recsize); extern unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize); + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma); extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); diff --git a/lib/kfifo.c b/lib/kfifo.c index 3a249ce4f281..75ce9225548a 100644 --- a/lib/kfifo.c +++ b/lib/kfifo.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include @@ -307,7 +308,7 @@ EXPORT_SYMBOL(__kfifo_to_user); static unsigned int setup_sgl_buf(struct __kfifo *fifo, struct scatterlist *sgl, unsigned int data_offset, int nents, - unsigned int len) + unsigned int len, dma_addr_t dma) { const void *buf = fifo->data + data_offset; @@ -316,11 +317,16 @@ static unsigned int setup_sgl_buf(struct __kfifo *fifo, struct scatterlist *sgl, sg_set_buf(sgl, buf, len); + if (dma != DMA_MAPPING_ERROR) { + sg_dma_address(sgl) = dma + data_offset; + sg_dma_len(sgl) = len; + } + return 1; } static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, - int nents, unsigned int len, unsigned int off) + int nents, unsigned int len, unsigned int off, dma_addr_t dma) { unsigned int size = fifo->mask + 1; unsigned int esize = fifo->esize; @@ -335,14 +341,15 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, } len_to_end = min(len, size - off); - n = setup_sgl_buf(fifo, sgl, off, nents, len_to_end); - n += setup_sgl_buf(fifo, sgl + n, 0, nents - n, len - len_to_end); + n = setup_sgl_buf(fifo, sgl, off, nents, len_to_end, dma); + n += setup_sgl_buf(fifo, sgl + n, 0, nents - n, len - len_to_end, dma); return n; } unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len) + struct scatterlist *sgl, int nents, unsigned int len, + dma_addr_t dma) { unsigned int l; @@ -350,12 +357,13 @@ unsigned int __kfifo_dma_in_prepare(struct __kfifo *fifo, if (len > l) len = l; - return setup_sgl(fifo, sgl, nents, len, fifo->in); + return setup_sgl(fifo, sgl, nents, len, fifo->in, dma); } EXPORT_SYMBOL(__kfifo_dma_in_prepare); unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len) + struct scatterlist *sgl, int nents, unsigned int len, + dma_addr_t dma) { unsigned int l; @@ -363,7 +371,7 @@ unsigned int __kfifo_dma_out_prepare(struct __kfifo *fifo, if (len > l) len = l; - return setup_sgl(fifo, sgl, nents, len, fifo->out); + return setup_sgl(fifo, sgl, nents, len, fifo->out, dma); } EXPORT_SYMBOL(__kfifo_dma_out_prepare); @@ -547,7 +555,8 @@ int __kfifo_to_user_r(struct __kfifo *fifo, void __user *to, EXPORT_SYMBOL(__kfifo_to_user_r); unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma) { BUG_ON(!nents); @@ -556,7 +565,7 @@ unsigned int __kfifo_dma_in_prepare_r(struct __kfifo *fifo, if (len + recsize > kfifo_unused(fifo)) return 0; - return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize); + return setup_sgl(fifo, sgl, nents, len, fifo->in + recsize, dma); } EXPORT_SYMBOL(__kfifo_dma_in_prepare_r); @@ -570,7 +579,8 @@ void __kfifo_dma_in_finish_r(struct __kfifo *fifo, EXPORT_SYMBOL(__kfifo_dma_in_finish_r); unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, - struct scatterlist *sgl, int nents, unsigned int len, size_t recsize) + struct scatterlist *sgl, int nents, unsigned int len, size_t recsize, + dma_addr_t dma) { BUG_ON(!nents); @@ -579,7 +589,7 @@ unsigned int __kfifo_dma_out_prepare_r(struct __kfifo *fifo, if (len + recsize > fifo->in - fifo->out) return 0; - return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize); + return setup_sgl(fifo, sgl, nents, len, fifo->out + recsize, dma); } EXPORT_SYMBOL(__kfifo_dma_out_prepare_r); -- cgit From 2ab682d221551d5eb9606dad1efa01e184de7b00 Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Fri, 5 Apr 2024 08:08:19 +0200 Subject: kfifo: fix typos in kernel-doc Obviously: "This macro finish" -> "This macro finishes" and similar. Signed-off-by: Jiri Slaby (SUSE) Cc: Stefani Seibold Cc: Andrew Morton Link: https://lore.kernel.org/r/20240405060826.2521-9-jirislaby@kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/kfifo.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index c7cc25b2808b..d613748de7ff 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -585,7 +585,7 @@ __kfifo_uint_must_check_helper( \ * @buf: pointer to the storage buffer * @n: max. number of elements to get * - * This macro get some data from the fifo and return the numbers of elements + * This macro gets some data from the fifo and returns the numbers of elements * copied. * * Note that with only one concurrent reader and one concurrent @@ -612,7 +612,7 @@ __kfifo_uint_must_check_helper( \ * @n: max. number of elements to get * @lock: pointer to the spinlock to use for locking * - * This macro get the data from the fifo and return the numbers of elements + * This macro gets the data from the fifo and returns the numbers of elements * copied. */ #define kfifo_out_spinlocked(fifo, buf, n, lock) \ @@ -745,7 +745,7 @@ __kfifo_int_must_check_helper( \ * @fifo: address of the fifo to be used * @len: number of bytes to received * - * This macro finish a DMA IN operation. The in counter will be updated by + * This macro finishes a DMA IN operation. The in counter will be updated by * the len parameter. No error checking will be done. * * Note that with only one concurrent reader and one concurrent @@ -801,7 +801,7 @@ __kfifo_int_must_check_helper( \ * @fifo: address of the fifo to be used * @len: number of bytes transferred * - * This macro finish a DMA OUT operation. The out counter will be updated by + * This macro finishes a DMA OUT operation. The out counter will be updated by * the len parameter. No error checking will be done. * * Note that with only one concurrent reader and one concurrent @@ -818,7 +818,7 @@ __kfifo_int_must_check_helper( \ * @buf: pointer to the storage buffer * @n: max. number of elements to get * - * This macro get the data from the fifo and return the numbers of elements + * This macro gets the data from the fifo and returns the numbers of elements * copied. The data is not removed from the fifo. * * Note that with only one concurrent reader and one concurrent -- cgit From 1788cf6a91d9fa9aa61fc2917afe192c23d67f6a Mon Sep 17 00:00:00 2001 From: "Jiri Slaby (SUSE)" Date: Fri, 5 Apr 2024 08:08:23 +0200 Subject: tty: serial: switch from circ_buf to kfifo MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch from struct circ_buf to proper kfifo. kfifo provides much better API, esp. when wrap-around of the buffer needs to be taken into account. Look at pl011_dma_tx_refill() or cpm_uart_tx_pump() changes for example. Kfifo API can also fill in scatter-gather DMA structures, so it easier for that use case too. Look at lpuart_dma_tx() for example. Note that not all drivers can be converted to that (like atmel_serial), they handle DMA specially. Note that usb-serial uses kfifo for TX for ages. omap needed a bit more care as it needs to put a char into FIFO to start the DMA transfer when OMAP_DMA_TX_KICK is set. In that case, we have to do kfifo_dma_out_prepare twice: once to find out the tx_size (to find out if it is worths to do DMA at all -- size >= 4), the second time for the actual transfer. All traces of circ_buf are removed from serial_core.h (and its struct uart_state). Signed-off-by: Jiri Slaby (SUSE) Cc: Al Cooper Cc: Matthias Brugger Cc: AngeloGioacchino Del Regno Cc: Kumaravel Thiagarajan Cc: Tharun Kumar P Cc: Russell King Cc: Vineet Gupta Cc: Richard Genoud Cc: Nicolas Ferre Cc: Alexandre Belloni Cc: Claudiu Beznea Cc: Alexander Shiyan Cc: Baruch Siach Cc: Maciej W. Rozycki Cc: Shawn Guo Cc: Sascha Hauer Cc: Fabio Estevam Cc: Neil Armstrong Cc: Kevin Hilman Cc: Jerome Brunet Cc: Martin Blumenstingl Cc: Taichi Sugaya Cc: Takao Orito Cc: Bjorn Andersson Cc: Konrad Dybcio Cc: Pali Rohár Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Christophe Leroy Cc: Aneesh Kumar K.V Cc: Naveen N. Rao Cc: Manivannan Sadhasivam Cc: Krzysztof Kozlowski Cc: Alim Akhtar Cc: Laxman Dewangan Cc: Thierry Reding Cc: Jonathan Hunter Cc: Orson Zhai Cc: Baolin Wang Cc: Chunyan Zhang Cc: Patrice Chotard Cc: Maxime Coquelin Cc: Alexandre Torgue Cc: David S. Miller Cc: Hammer Hsieh Cc: Peter Korsgaard Cc: Timur Tabi Cc: Michal Simek Cc: Sumit Semwal Cc: Christian König Link: https://lore.kernel.org/r/20240405060826.2521-13-jirislaby@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_bcm7271.c | 14 ++++---- drivers/tty/serial/8250/8250_core.c | 3 +- drivers/tty/serial/8250/8250_dma.c | 23 +++++++------ drivers/tty/serial/8250/8250_exar.c | 5 +-- drivers/tty/serial/8250/8250_mtk.c | 2 +- drivers/tty/serial/8250/8250_omap.c | 47 ++++++++++++++++++--------- drivers/tty/serial/8250/8250_pci1xxxx.c | 50 +++++++++++++---------------- drivers/tty/serial/8250/8250_port.c | 22 +++++++------ drivers/tty/serial/amba-pl011.c | 46 +++++++++++--------------- drivers/tty/serial/ar933x_uart.c | 15 ++++----- drivers/tty/serial/arc_uart.c | 8 ++--- drivers/tty/serial/atmel_serial.c | 57 +++++++++++++++++---------------- drivers/tty/serial/clps711x.c | 12 +++---- drivers/tty/serial/cpm_uart.c | 20 ++++-------- drivers/tty/serial/digicolor-usart.c | 12 +++---- drivers/tty/serial/dz.c | 13 ++++---- drivers/tty/serial/fsl_linflexuart.c | 17 +++++----- drivers/tty/serial/fsl_lpuart.c | 39 ++++++++++------------ drivers/tty/serial/icom.c | 25 ++++----------- drivers/tty/serial/imx.c | 54 +++++++++++++------------------ drivers/tty/serial/ip22zilog.c | 26 +++++++-------- drivers/tty/serial/jsm/jsm_cls.c | 29 +++++------------ drivers/tty/serial/jsm/jsm_neo.c | 38 ++++++++-------------- drivers/tty/serial/max3100.c | 14 ++++---- drivers/tty/serial/max310x.c | 35 +++++++++----------- drivers/tty/serial/men_z135_uart.c | 26 ++++++--------- drivers/tty/serial/meson_uart.c | 11 +++---- drivers/tty/serial/milbeaut_usio.c | 15 +++++---- drivers/tty/serial/msm_serial.c | 30 ++++++++--------- drivers/tty/serial/mvebu-uart.c | 8 ++--- drivers/tty/serial/mxs-auart.c | 23 ++++--------- drivers/tty/serial/pch_uart.c | 21 ++++++------ drivers/tty/serial/pic32_uart.c | 15 ++++----- drivers/tty/serial/pmac_zilog.c | 24 +++++++------- drivers/tty/serial/qcom_geni_serial.c | 36 ++++++++++----------- drivers/tty/serial/rda-uart.c | 17 ++++------ drivers/tty/serial/samsung_tty.c | 54 ++++++++++++++++--------------- drivers/tty/serial/sb1250-duart.c | 13 ++++---- drivers/tty/serial/sc16is7xx.c | 40 +++++++++-------------- drivers/tty/serial/sccnxp.c | 16 +++++---- drivers/tty/serial/serial-tegra.c | 43 ++++++++++++++----------- drivers/tty/serial/serial_core.c | 56 ++++++++++++-------------------- drivers/tty/serial/serial_port.c | 2 +- drivers/tty/serial/sh-sci.c | 51 +++++++++++++++-------------- drivers/tty/serial/sprd_serial.c | 20 ++++++------ drivers/tty/serial/st-asc.c | 4 +-- drivers/tty/serial/stm32-usart.c | 52 ++++++++++++------------------ drivers/tty/serial/sunhv.c | 35 +++++++++++--------- drivers/tty/serial/sunplus-uart.c | 16 +++++---- drivers/tty/serial/sunsab.c | 30 +++++++++-------- drivers/tty/serial/sunsu.c | 15 +++++---- drivers/tty/serial/sunzilog.c | 27 ++++++++-------- drivers/tty/serial/tegra-tcu.c | 10 +++--- drivers/tty/serial/timbuart.c | 17 ++++------ drivers/tty/serial/uartlite.c | 13 +++++--- drivers/tty/serial/ucc_uart.c | 20 ++++-------- drivers/tty/serial/xilinx_uartps.c | 20 ++++++------ drivers/tty/serial/zs.c | 13 ++++---- include/linux/serial_core.h | 49 +++++++++++++++++----------- 59 files changed, 688 insertions(+), 780 deletions(-) (limited to 'include/linux') diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c index 5daa38d9c64e..de270863eb5e 100644 --- a/drivers/tty/serial/8250/8250_bcm7271.c +++ b/drivers/tty/serial/8250/8250_bcm7271.c @@ -413,20 +413,18 @@ static int stop_tx_dma(struct uart_8250_port *p) static int brcmuart_tx_dma(struct uart_8250_port *p) { struct brcmuart_priv *priv = p->port.private_data; - struct circ_buf *xmit = &p->port.state->xmit; + struct tty_port *tport = &p->port.state->port; u32 tx_size; if (uart_tx_stopped(&p->port) || priv->tx_running || - uart_circ_empty(xmit)) { + kfifo_is_empty(&tport->xmit_fifo)) { return 0; } - tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); priv->dma.tx_err = 0; - memcpy(priv->tx_buf, &xmit->buf[xmit->tail], tx_size); - uart_xmit_advance(&p->port, tx_size); + tx_size = uart_fifo_out(&p->port, priv->tx_buf, UART_XMIT_SIZE); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&p->port); udma_writel(priv, REGS_DMA_TX, UDMA_TX_TRANSFER_LEN, tx_size); @@ -540,7 +538,7 @@ static void brcmuart_tx_isr(struct uart_port *up, u32 isr) struct brcmuart_priv *priv = up->private_data; struct device *dev = up->dev; struct uart_8250_port *port_8250 = up_to_u8250p(up); - struct circ_buf *xmit = &port_8250->port.state->xmit; + struct tty_port *tport = &port_8250->port.state->port; if (isr & UDMA_INTR_TX_ABORT) { if (priv->tx_running) @@ -548,7 +546,7 @@ static void brcmuart_tx_isr(struct uart_port *up, u32 isr) return; } priv->tx_running = false; - if (!uart_circ_empty(xmit) && !uart_tx_stopped(up)) + if (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(up)) brcmuart_tx_dma(port_8250); } diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index b62ad9006780..3a1936b7f05f 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -280,7 +280,8 @@ static void serial8250_backup_timeout(struct timer_list *t) */ lsr = serial_lsr_in(up); if ((iir & UART_IIR_NO_INT) && (up->ier & UART_IER_THRI) && - (!uart_circ_empty(&up->port.state->xmit) || up->port.x_char) && + (!kfifo_is_empty(&up->port.state->port.xmit_fifo) || + up->port.x_char) && (lsr & UART_LSR_THRE)) { iir &= ~(UART_IIR_ID | UART_IIR_NO_INT); iir |= UART_IIR_THRI; diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c index 8b2c3f478b17..8a353e3cc3dd 100644 --- a/drivers/tty/serial/8250/8250_dma.c +++ b/drivers/tty/serial/8250/8250_dma.c @@ -15,7 +15,7 @@ static void __dma_tx_complete(void *param) { struct uart_8250_port *p = param; struct uart_8250_dma *dma = p->dma; - struct circ_buf *xmit = &p->port.state->xmit; + struct tty_port *tport = &p->port.state->port; unsigned long flags; int ret; @@ -28,7 +28,7 @@ static void __dma_tx_complete(void *param) uart_xmit_advance(&p->port, dma->tx_size); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&p->port); ret = serial8250_tx_dma(p); @@ -86,7 +86,7 @@ static void dma_rx_complete(void *param) int serial8250_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; - struct circ_buf *xmit = &p->port.state->xmit; + struct tty_port *tport = &p->port.state->port; struct dma_async_tx_descriptor *desc; struct uart_port *up = &p->port; struct scatterlist sg; @@ -103,18 +103,23 @@ int serial8250_tx_dma(struct uart_8250_port *p) uart_xchar_out(up, UART_TX); } - if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { + if (uart_tx_stopped(&p->port) || kfifo_is_empty(&tport->xmit_fifo)) { /* We have been called from __dma_tx_complete() */ return 0; } - dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); - serial8250_do_prepare_tx_dma(p); sg_init_table(&sg, 1); - sg_dma_address(&sg) = dma->tx_addr + xmit->tail; - sg_dma_len(&sg) = dma->tx_size; + /* kfifo can do more than one sg, we don't (quite yet) */ + ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, + UART_XMIT_SIZE, dma->tx_addr); + + /* we already checked empty fifo above, so there should be something */ + if (WARN_ON_ONCE(ret != 1)) + return 0; + + dma->tx_size = sg_dma_len(&sg); desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1, DMA_MEM_TO_DEV, @@ -257,7 +262,7 @@ int serial8250_request_dma(struct uart_8250_port *p) /* TX buffer */ dma->tx_addr = dma_map_single(dma->txchan->device->dev, - p->port.state->xmit.buf, + p->port.state->port.xmit_buf, UART_XMIT_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) { diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c index 0440df7de1ed..f89dabfc0f97 100644 --- a/drivers/tty/serial/8250/8250_exar.c +++ b/drivers/tty/serial/8250/8250_exar.c @@ -214,7 +214,7 @@ static void exar_shutdown(struct uart_port *port) { bool tx_complete = false; struct uart_8250_port *up = up_to_u8250p(port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; int i = 0; u16 lsr; @@ -225,7 +225,8 @@ static void exar_shutdown(struct uart_port *port) else tx_complete = false; usleep_range(1000, 1100); - } while (!uart_circ_empty(xmit) && !tx_complete && i++ < 1000); + } while (!kfifo_is_empty(&tport->xmit_fifo) && + !tx_complete && i++ < 1000); serial8250_do_shutdown(port); } diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index 9ff6bbe9c086..c365a349421a 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -199,7 +199,7 @@ static int mtk8250_startup(struct uart_port *port) if (up->dma) { data->rx_status = DMA_RX_START; - uart_circ_clear(&port->state->xmit); + kfifo_reset(&port->state->port.xmit_fifo); } #endif memset(&port->icount, 0, sizeof(port->icount)); diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 879e77b30701..c07d924d5add 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -1094,7 +1094,7 @@ static void omap_8250_dma_tx_complete(void *param) { struct uart_8250_port *p = param; struct uart_8250_dma *dma = p->dma; - struct circ_buf *xmit = &p->port.state->xmit; + struct tty_port *tport = &p->port.state->port; unsigned long flags; bool en_thri = false; struct omap8250_priv *priv = p->port.private_data; @@ -1113,10 +1113,10 @@ static void omap_8250_dma_tx_complete(void *param) omap8250_restore_regs(p); } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&p->port); - if (!uart_circ_empty(xmit) && !uart_tx_stopped(&p->port)) { + if (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(&p->port)) { int ret; ret = omap_8250_tx_dma(p); @@ -1138,15 +1138,15 @@ static int omap_8250_tx_dma(struct uart_8250_port *p) { struct uart_8250_dma *dma = p->dma; struct omap8250_priv *priv = p->port.private_data; - struct circ_buf *xmit = &p->port.state->xmit; + struct tty_port *tport = &p->port.state->port; struct dma_async_tx_descriptor *desc; struct scatterlist sg; - unsigned int skip_byte = 0; + int skip_byte = -1; int ret; if (dma->tx_running) return 0; - if (uart_tx_stopped(&p->port) || uart_circ_empty(xmit)) { + if (uart_tx_stopped(&p->port) || kfifo_is_empty(&tport->xmit_fifo)) { /* * Even if no data, we need to return an error for the two cases @@ -1161,8 +1161,18 @@ static int omap_8250_tx_dma(struct uart_8250_port *p) return 0; } - dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + sg_init_table(&sg, 1); + ret = kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, + UART_XMIT_SIZE, dma->tx_addr); + if (ret != 1) { + serial8250_clear_THRI(p); + return 0; + } + + dma->tx_size = sg_dma_len(&sg); + if (priv->habit & OMAP_DMA_TX_KICK) { + unsigned char c; u8 tx_lvl; /* @@ -1189,13 +1199,16 @@ static int omap_8250_tx_dma(struct uart_8250_port *p) ret = -EINVAL; goto err; } - skip_byte = 1; + if (!kfifo_get(&tport->xmit_fifo, &c)) { + ret = -EINVAL; + goto err; + } + skip_byte = c; + /* now we need to recompute due to kfifo_get */ + kfifo_dma_out_prepare_mapped(&tport->xmit_fifo, &sg, 1, + UART_XMIT_SIZE, dma->tx_addr); } - sg_init_table(&sg, 1); - sg_dma_address(&sg) = dma->tx_addr + xmit->tail + skip_byte; - sg_dma_len(&sg) = dma->tx_size - skip_byte; - desc = dmaengine_prep_slave_sg(dma->txchan, &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { @@ -1218,11 +1231,13 @@ static int omap_8250_tx_dma(struct uart_8250_port *p) dma->tx_err = 0; serial8250_clear_THRI(p); - if (skip_byte) - serial_out(p, UART_TX, xmit->buf[xmit->tail]); - return 0; + ret = 0; + goto out_skip; err: dma->tx_err = 1; +out_skip: + if (skip_byte >= 0) + serial_out(p, UART_TX, skip_byte); return ret; } @@ -1311,7 +1326,7 @@ static int omap_8250_dma_handle_irq(struct uart_port *port) serial8250_modem_status(up); if (status & UART_LSR_THRE && up->dma->tx_err) { if (uart_tx_stopped(&up->port) || - uart_circ_empty(&up->port.state->xmit)) { + kfifo_is_empty(&up->port.state->port.xmit_fifo)) { up->dma->tx_err = 0; serial8250_tx_chars(up); } else { diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c index 2fbb5851f788..d3930bf32fe4 100644 --- a/drivers/tty/serial/8250/8250_pci1xxxx.c +++ b/drivers/tty/serial/8250/8250_pci1xxxx.c @@ -382,10 +382,10 @@ static void pci1xxxx_rx_burst(struct uart_port *port, u32 uart_status) } static void pci1xxxx_process_write_data(struct uart_port *port, - struct circ_buf *xmit, int *data_empty_count, u32 *valid_byte_count) { + struct tty_port *tport = &port->state->port; u32 valid_burst_count = *valid_byte_count / UART_BURST_SIZE; /* @@ -395,41 +395,36 @@ static void pci1xxxx_process_write_data(struct uart_port *port, * one byte at a time. */ while (valid_burst_count) { + u32 c; + if (*data_empty_count - UART_BURST_SIZE < 0) break; - if (xmit->tail > (UART_XMIT_SIZE - UART_BURST_SIZE)) + if (kfifo_len(&tport->xmit_fifo) < UART_BURST_SIZE) + break; + if (WARN_ON(kfifo_out(&tport->xmit_fifo, (u8 *)&c, sizeof(c)) != + sizeof(c))) break; - writel(*(unsigned int *)&xmit->buf[xmit->tail], - port->membase + UART_TX_BURST_FIFO); + writel(c, port->membase + UART_TX_BURST_FIFO); *valid_byte_count -= UART_BURST_SIZE; *data_empty_count -= UART_BURST_SIZE; valid_burst_count -= UART_BYTE_SIZE; - - xmit->tail = (xmit->tail + UART_BURST_SIZE) & - (UART_XMIT_SIZE - 1); } while (*valid_byte_count) { - if (*data_empty_count - UART_BYTE_SIZE < 0) + u8 c; + + if (!kfifo_get(&tport->xmit_fifo, &c)) break; - writeb(xmit->buf[xmit->tail], port->membase + - UART_TX_BYTE_FIFO); + writeb(c, port->membase + UART_TX_BYTE_FIFO); *data_empty_count -= UART_BYTE_SIZE; *valid_byte_count -= UART_BYTE_SIZE; - /* - * When the tail of the circular buffer is reached, the next - * byte is transferred to the beginning of the buffer. - */ - xmit->tail = (xmit->tail + UART_BYTE_SIZE) & - (UART_XMIT_SIZE - 1); - /* * If there are any pending burst count, data is handled by * transmitting DWORDs at a time. */ - if (valid_burst_count && (xmit->tail < - (UART_XMIT_SIZE - UART_BURST_SIZE))) + if (valid_burst_count && + kfifo_len(&tport->xmit_fifo) >= UART_BURST_SIZE) break; } } @@ -437,11 +432,9 @@ static void pci1xxxx_process_write_data(struct uart_port *port, static void pci1xxxx_tx_burst(struct uart_port *port, u32 uart_status) { struct uart_8250_port *up = up_to_u8250p(port); + struct tty_port *tport = &port->state->port; u32 valid_byte_count; int data_empty_count; - struct circ_buf *xmit; - - xmit = &port->state->xmit; if (port->x_char) { writeb(port->x_char, port->membase + UART_TX); @@ -450,25 +443,25 @@ static void pci1xxxx_tx_burst(struct uart_port *port, u32 uart_status) return; } - if ((uart_tx_stopped(port)) || (uart_circ_empty(xmit))) { + if ((uart_tx_stopped(port)) || kfifo_is_empty(&tport->xmit_fifo)) { port->ops->stop_tx(port); } else { data_empty_count = (pci1xxxx_read_burst_status(port) & UART_BST_STAT_TX_COUNT_MASK) >> 8; do { - valid_byte_count = uart_circ_chars_pending(xmit); + valid_byte_count = kfifo_len(&tport->xmit_fifo); - pci1xxxx_process_write_data(port, xmit, + pci1xxxx_process_write_data(port, &data_empty_count, &valid_byte_count); port->icount.tx++; - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) break; } while (data_empty_count && valid_byte_count); } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); /* @@ -476,7 +469,8 @@ static void pci1xxxx_tx_burst(struct uart_port *port, u32 uart_status) * the HW can go idle. So we get here once again with empty FIFO and * disable the interrupt and RPM in __stop_tx() */ - if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM)) + if (kfifo_is_empty(&tport->xmit_fifo) && + !(up->capabilities & UART_CAP_RPM)) port->ops->stop_tx(port); } diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fc9dd5d45295..0c19451d0332 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1630,7 +1630,7 @@ static void serial8250_start_tx(struct uart_port *port) /* Port locked to synchronize UART_IER access against the console. */ lockdep_assert_held_once(&port->lock); - if (!port->x_char && uart_circ_empty(&port->state->xmit)) + if (!port->x_char && kfifo_is_empty(&port->state->port.xmit_fifo)) return; serial8250_rpm_get_tx(up); @@ -1778,7 +1778,7 @@ EXPORT_SYMBOL_GPL(serial8250_rx_chars); void serial8250_tx_chars(struct uart_8250_port *up) { struct uart_port *port = &up->port; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; int count; if (port->x_char) { @@ -1789,14 +1789,19 @@ void serial8250_tx_chars(struct uart_8250_port *up) serial8250_stop_tx(port); return; } - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { __stop_tx(up); return; } count = up->tx_loadsz; do { - serial_out(up, UART_TX, xmit->buf[xmit->tail]); + unsigned char c; + + if (!uart_fifo_get(port, &c)) + break; + + serial_out(up, UART_TX, c); if (up->bugs & UART_BUG_TXRACE) { /* * The Aspeed BMC virtual UARTs have a bug where data @@ -1809,9 +1814,7 @@ void serial8250_tx_chars(struct uart_8250_port *up) */ serial_in(up, UART_SCR); } - uart_xmit_advance(port, 1); - if (uart_circ_empty(xmit)) - break; + if ((up->capabilities & UART_CAP_HFIFO) && !uart_lsr_tx_empty(serial_in(up, UART_LSR))) break; @@ -1821,7 +1824,7 @@ void serial8250_tx_chars(struct uart_8250_port *up) break; } while (--count > 0); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); /* @@ -1829,7 +1832,8 @@ void serial8250_tx_chars(struct uart_8250_port *up) * HW can go idle. So we get here once again with empty FIFO and disable * the interrupt and RPM in __stop_tx() */ - if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM)) + if (kfifo_is_empty(&tport->xmit_fifo) && + !(up->capabilities & UART_CAP_RPM)) __stop_tx(up); } EXPORT_SYMBOL_GPL(serial8250_tx_chars); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 2fa3fb30dc6c..51b202eb8296 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -535,6 +535,7 @@ static void pl011_start_tx_pio(struct uart_amba_port *uap); static void pl011_dma_tx_callback(void *data) { struct uart_amba_port *uap = data; + struct tty_port *tport = &uap->port.state->port; struct pl011_dmatx_data *dmatx = &uap->dmatx; unsigned long flags; u16 dmacr; @@ -558,7 +559,7 @@ static void pl011_dma_tx_callback(void *data) * get further refills (hence we check dmacr). */ if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) || - uart_circ_empty(&uap->port.state->xmit)) { + kfifo_is_empty(&tport->xmit_fifo)) { uap->dmatx.queued = false; uart_port_unlock_irqrestore(&uap->port, flags); return; @@ -588,7 +589,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) struct dma_chan *chan = dmatx->chan; struct dma_device *dma_dev = chan->device; struct dma_async_tx_descriptor *desc; - struct circ_buf *xmit = &uap->port.state->xmit; + struct tty_port *tport = &uap->port.state->port; unsigned int count; /* @@ -597,7 +598,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) * the standard interrupt handling. This ensures that we * issue a uart_write_wakeup() at the appropriate time. */ - count = uart_circ_chars_pending(xmit); + count = kfifo_len(&tport->xmit_fifo); if (count < (uap->fifosize >> 1)) { uap->dmatx.queued = false; return 0; @@ -613,21 +614,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) if (count > PL011_DMA_BUFFER_SIZE) count = PL011_DMA_BUFFER_SIZE; - if (xmit->tail < xmit->head) { - memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count); - } else { - size_t first = UART_XMIT_SIZE - xmit->tail; - size_t second; - - if (first > count) - first = count; - second = count - first; - - memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first); - if (second) - memcpy(&dmatx->buf[first], &xmit->buf[0], second); - } - + count = kfifo_out_peek(&tport->xmit_fifo, dmatx->buf, count); dmatx->len = count; dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, DMA_TO_DEVICE); @@ -670,7 +657,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap) */ uart_xmit_advance(&uap->port, count); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); return 1; @@ -1454,7 +1441,7 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c, /* Returns true if tx interrupts have to be (kept) enabled */ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) { - struct circ_buf *xmit = &uap->port.state->xmit; + struct tty_port *tport = &uap->port.state->port; int count = uap->fifosize >> 1; if (uap->port.x_char) { @@ -1463,7 +1450,7 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) uap->port.x_char = 0; --count; } - if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&uap->port)) { pl011_stop_tx(&uap->port); return false; } @@ -1472,20 +1459,25 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq) if (pl011_dma_tx_irq(uap)) return true; - do { + while (1) { + unsigned char c; + if (likely(from_irq) && count-- == 0) break; - if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq)) + if (!kfifo_peek(&tport->xmit_fifo, &c)) + break; + + if (!pl011_tx_char(uap, c, from_irq)) break; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - } while (!uart_circ_empty(xmit)); + kfifo_skip(&tport->xmit_fifo); + } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { pl011_stop_tx(&uap->port); return false; } diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c index 7790cbc57391..f2836ecc5c19 100644 --- a/drivers/tty/serial/ar933x_uart.c +++ b/drivers/tty/serial/ar933x_uart.c @@ -390,7 +390,7 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up) static void ar933x_uart_tx_chars(struct ar933x_uart_port *up) { - struct circ_buf *xmit = &up->port.state->xmit; + struct tty_port *tport = &up->port.state->port; struct serial_rs485 *rs485conf = &up->port.rs485; int count; bool half_duplex_send = false; @@ -399,7 +399,7 @@ static void ar933x_uart_tx_chars(struct ar933x_uart_port *up) return; if ((rs485conf->flags & SER_RS485_ENABLED) && - (up->port.x_char || !uart_circ_empty(xmit))) { + (up->port.x_char || !kfifo_is_empty(&tport->xmit_fifo))) { ar933x_uart_stop_rx_interrupt(up); gpiod_set_value(up->rts_gpiod, !!(rs485conf->flags & SER_RS485_RTS_ON_SEND)); half_duplex_send = true; @@ -408,6 +408,7 @@ static void ar933x_uart_tx_chars(struct ar933x_uart_port *up) count = up->port.fifosize; do { unsigned int rdata; + unsigned char c; rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG); if ((rdata & AR933X_UART_DATA_TX_CSR) == 0) @@ -420,18 +421,16 @@ static void ar933x_uart_tx_chars(struct ar933x_uart_port *up) continue; } - if (uart_circ_empty(xmit)) + if (!uart_fifo_get(&up->port, &c)) break; - ar933x_uart_putc(up, xmit->buf[xmit->tail]); - - uart_xmit_advance(&up->port, 1); + ar933x_uart_putc(up, c); } while (--count > 0); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&up->port); - if (!uart_circ_empty(xmit)) { + if (!kfifo_is_empty(&tport->xmit_fifo)) { ar933x_uart_start_tx_interrupt(up); } else if (half_duplex_send) { ar933x_uart_wait_tx_complete(up); diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c index 1aa5b2b49c26..5c4895d154c0 100644 --- a/drivers/tty/serial/arc_uart.c +++ b/drivers/tty/serial/arc_uart.c @@ -155,7 +155,7 @@ static unsigned int arc_serial_tx_empty(struct uart_port *port) */ static void arc_serial_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; int sent = 0; unsigned char ch; @@ -164,9 +164,7 @@ static void arc_serial_tx_chars(struct uart_port *port) port->icount.tx++; port->x_char = 0; sent = 1; - } else if (!uart_circ_empty(xmit)) { - ch = xmit->buf[xmit->tail]; - uart_xmit_advance(port, 1); + } else if (uart_fifo_get(port, &ch)) { while (!(UART_GET_STATUS(port) & TXEMPTY)) cpu_relax(); UART_SET_DATA(port, ch); @@ -177,7 +175,7 @@ static void arc_serial_tx_chars(struct uart_port *port) * If num chars in xmit buffer are too few, ask tty layer for more. * By Hard ISR to schedule processing in software interrupt part */ - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); if (sent) diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 85667f709515..5bb5e4303754 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -857,7 +857,7 @@ static void atmel_complete_tx_dma(void *arg) { struct atmel_uart_port *atmel_port = arg; struct uart_port *port = &atmel_port->uart; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct dma_chan *chan = atmel_port->chan_tx; unsigned long flags; @@ -873,15 +873,15 @@ static void atmel_complete_tx_dma(void *arg) atmel_port->desc_tx = NULL; spin_unlock(&atmel_port->lock_tx); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); /* - * xmit is a circular buffer so, if we have just send data from - * xmit->tail to the end of xmit->buf, now we have to transmit the - * remaining data from the beginning of xmit->buf to xmit->head. + * xmit is a circular buffer so, if we have just send data from the + * tail to the end, now we have to transmit the remaining data from the + * beginning to the head. */ - if (!uart_circ_empty(xmit)) + if (!kfifo_is_empty(&tport->xmit_fifo)) atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); else if (atmel_uart_is_half_duplex(port)) { /* @@ -919,18 +919,18 @@ static void atmel_release_tx_dma(struct uart_port *port) static void atmel_tx_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct dma_chan *chan = atmel_port->chan_tx; struct dma_async_tx_descriptor *desc; struct scatterlist sgl[2], *sg, *sg_tx = &atmel_port->sg_tx; - unsigned int tx_len, part1_len, part2_len, sg_len; + unsigned int tx_len, tail, part1_len, part2_len, sg_len; dma_addr_t phys_addr; /* Make sure we have an idle channel */ if (atmel_port->desc_tx != NULL) return; - if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { + if (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(port)) { /* * DMA is idle now. * Port xmit buffer is already mapped, @@ -940,9 +940,8 @@ static void atmel_tx_dma(struct uart_port *port) * Take the port lock to get a * consistent xmit buffer state. */ - tx_len = CIRC_CNT_TO_END(xmit->head, - xmit->tail, - UART_XMIT_SIZE); + tx_len = kfifo_out_linear(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE); if (atmel_port->fifo_size) { /* multi data mode */ @@ -956,7 +955,7 @@ static void atmel_tx_dma(struct uart_port *port) sg_init_table(sgl, 2); sg_len = 0; - phys_addr = sg_dma_address(sg_tx) + xmit->tail; + phys_addr = sg_dma_address(sg_tx) + tail; if (part1_len) { sg = &sgl[sg_len++]; sg_dma_address(sg) = phys_addr; @@ -973,7 +972,7 @@ static void atmel_tx_dma(struct uart_port *port) /* * save tx_len so atmel_complete_tx_dma() will increase - * xmit->tail correctly + * tail correctly */ atmel_port->tx_len = tx_len; @@ -1003,13 +1002,14 @@ static void atmel_tx_dma(struct uart_port *port) dma_async_issue_pending(chan); } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } static int atmel_prepare_tx_dma(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); + struct tty_port *tport = &port->state->port; struct device *mfd_dev = port->dev->parent; dma_cap_mask_t mask; struct dma_slave_config config; @@ -1031,11 +1031,11 @@ static int atmel_prepare_tx_dma(struct uart_port *port) spin_lock_init(&atmel_port->lock_tx); sg_init_table(&atmel_port->sg_tx, 1); /* UART circular tx buffer is an aligned page. */ - BUG_ON(!PAGE_ALIGNED(port->state->xmit.buf)); + BUG_ON(!PAGE_ALIGNED(tport->xmit_buf)); sg_set_page(&atmel_port->sg_tx, - virt_to_page(port->state->xmit.buf), + virt_to_page(tport->xmit_buf), UART_XMIT_SIZE, - offset_in_page(port->state->xmit.buf)); + offset_in_page(tport->xmit_buf)); nent = dma_map_sg(port->dev, &atmel_port->sg_tx, 1, @@ -1047,7 +1047,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port) } else { dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n", __func__, sg_dma_len(&atmel_port->sg_tx), - port->state->xmit.buf, + tport->xmit_buf, &sg_dma_address(&atmel_port->sg_tx)); } @@ -1459,9 +1459,8 @@ static void atmel_release_tx_pdc(struct uart_port *port) static void atmel_tx_pdc(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; - int count; /* nothing left to transmit? */ if (atmel_uart_readl(port, ATMEL_PDC_TCR)) @@ -1474,17 +1473,19 @@ static void atmel_tx_pdc(struct uart_port *port) /* disable PDC transmit */ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS); - if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) { + if (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(port)) { + unsigned int count, tail; + dma_sync_single_for_device(port->dev, pdc->dma_addr, pdc->dma_size, DMA_TO_DEVICE); - count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + count = kfifo_out_linear(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE); pdc->ofs = count; - atmel_uart_writel(port, ATMEL_PDC_TPR, - pdc->dma_addr + xmit->tail); + atmel_uart_writel(port, ATMEL_PDC_TPR, pdc->dma_addr + tail); atmel_uart_writel(port, ATMEL_PDC_TCR, count); /* re-enable PDC transmit */ atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); @@ -1498,7 +1499,7 @@ static void atmel_tx_pdc(struct uart_port *port) } } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } @@ -1506,9 +1507,9 @@ static int atmel_prepare_tx_pdc(struct uart_port *port) { struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; - pdc->buf = xmit->buf; + pdc->buf = tport->xmit_buf; pdc->dma_addr = dma_map_single(port->dev, pdc->buf, UART_XMIT_SIZE, diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c index 7927725b8957..30425a3d19fb 100644 --- a/drivers/tty/serial/clps711x.c +++ b/drivers/tty/serial/clps711x.c @@ -146,7 +146,8 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id) { struct uart_port *port = dev_id; struct clps711x_port *s = dev_get_drvdata(port->dev); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + unsigned char c; if (port->x_char) { writew(port->x_char, port->membase + UARTDR_OFFSET); @@ -155,7 +156,7 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id) return IRQ_HANDLED; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { if (s->tx_enabled) { disable_irq_nosync(port->irq); s->tx_enabled = 0; @@ -163,18 +164,17 @@ static irqreturn_t uart_clps711x_int_tx(int irq, void *dev_id) return IRQ_HANDLED; } - while (!uart_circ_empty(xmit)) { + while (uart_fifo_get(port, &c)) { u32 sysflg = 0; - writew(xmit->buf[xmit->tail], port->membase + UARTDR_OFFSET); - uart_xmit_advance(port, 1); + writew(c, port->membase + UARTDR_OFFSET); regmap_read(s->syscon, SYSFLG_OFFSET, &sysflg); if (sysflg & SYSFLG_UTXFF) break; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); return IRQ_HANDLED; diff --git a/drivers/tty/serial/cpm_uart.c b/drivers/tty/serial/cpm_uart.c index df56c6c5afd0..a927478f581d 100644 --- a/drivers/tty/serial/cpm_uart.c +++ b/drivers/tty/serial/cpm_uart.c @@ -648,7 +648,7 @@ static int cpm_uart_tx_pump(struct uart_port *port) int count; struct uart_cpm_port *pinfo = container_of(port, struct uart_cpm_port, port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; /* Handle xon/xoff */ if (port->x_char) { @@ -673,7 +673,7 @@ static int cpm_uart_tx_pump(struct uart_port *port) return 1; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { cpm_uart_stop_tx(port); return 0; } @@ -681,16 +681,10 @@ static int cpm_uart_tx_pump(struct uart_port *port) /* Pick next descriptor and fill from buffer */ bdp = pinfo->tx_cur; - while (!(in_be16(&bdp->cbd_sc) & BD_SC_READY) && !uart_circ_empty(xmit)) { - count = 0; + while (!(in_be16(&bdp->cbd_sc) & BD_SC_READY) && + !kfifo_is_empty(&tport->xmit_fifo)) { p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); - while (count < pinfo->tx_fifosize) { - *p++ = xmit->buf[xmit->tail]; - uart_xmit_advance(port, 1); - count++; - if (uart_circ_empty(xmit)) - break; - } + count = uart_fifo_out(port, p, pinfo->tx_fifosize); out_be16(&bdp->cbd_datlen, count); setbits16(&bdp->cbd_sc, BD_SC_READY); /* Get next BD. */ @@ -701,10 +695,10 @@ static int cpm_uart_tx_pump(struct uart_port *port) } pinfo->tx_cur = bdp; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { cpm_uart_stop_tx(port); return 0; } diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c index e419c4bde8b7..2ccd13cc0a89 100644 --- a/drivers/tty/serial/digicolor-usart.c +++ b/drivers/tty/serial/digicolor-usart.c @@ -179,8 +179,9 @@ static void digicolor_uart_rx(struct uart_port *port) static void digicolor_uart_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; unsigned long flags; + unsigned char c; if (digicolor_uart_tx_full(port)) return; @@ -194,20 +195,19 @@ static void digicolor_uart_tx(struct uart_port *port) goto out; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { digicolor_uart_stop_tx(port); goto out; } - while (!uart_circ_empty(xmit)) { - writeb(xmit->buf[xmit->tail], port->membase + UA_EMI_REC); - uart_xmit_advance(port, 1); + while (uart_fifo_get(port, &c)) { + writeb(c, port->membase + UA_EMI_REC); if (digicolor_uart_tx_full(port)) break; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); out: diff --git a/drivers/tty/serial/dz.c b/drivers/tty/serial/dz.c index 6df7af9edc1c..eba91daedef8 100644 --- a/drivers/tty/serial/dz.c +++ b/drivers/tty/serial/dz.c @@ -252,13 +252,13 @@ static inline void dz_receive_chars(struct dz_mux *mux) static inline void dz_transmit_chars(struct dz_mux *mux) { struct dz_port *dport = &mux->dport[0]; - struct circ_buf *xmit; + struct tty_port *tport; unsigned char tmp; u16 status; status = dz_in(dport, DZ_CSR); dport = &mux->dport[LINE(status)]; - xmit = &dport->port.state->xmit; + tport = &dport->port.state->port; if (dport->port.x_char) { /* XON/XOFF chars */ dz_out(dport, DZ_TDR, dport->port.x_char); @@ -267,7 +267,8 @@ static inline void dz_transmit_chars(struct dz_mux *mux) return; } /* If nothing to do or stopped or hardware stopped. */ - if (uart_circ_empty(xmit) || uart_tx_stopped(&dport->port)) { + if (uart_tx_stopped(&dport->port) || + !uart_fifo_get(&dport->port, &tmp)) { uart_port_lock(&dport->port); dz_stop_tx(&dport->port); uart_port_unlock(&dport->port); @@ -278,15 +279,13 @@ static inline void dz_transmit_chars(struct dz_mux *mux) * If something to do... (remember the dz has no output fifo, * so we go one char at a time) :-< */ - tmp = xmit->buf[xmit->tail]; dz_out(dport, DZ_TDR, tmp); - uart_xmit_advance(&dport->port, 1); - if (uart_circ_chars_pending(xmit) < DZ_WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < DZ_WAKEUP_CHARS) uart_write_wakeup(&dport->port); /* Are we are done. */ - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { uart_port_lock(&dport->port); dz_stop_tx(&dport->port); uart_port_unlock(&dport->port); diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c index 5426322b5f0c..e972df4b188d 100644 --- a/drivers/tty/serial/fsl_linflexuart.c +++ b/drivers/tty/serial/fsl_linflexuart.c @@ -174,17 +174,18 @@ static void linflex_put_char(struct uart_port *sport, unsigned char c) static inline void linflex_transmit_buffer(struct uart_port *sport) { - struct circ_buf *xmit = &sport->state->xmit; + struct tty_port *tport = &sport->state->port; + unsigned char c; - while (!uart_circ_empty(xmit)) { - linflex_put_char(sport, xmit->buf[xmit->tail]); - uart_xmit_advance(sport, 1); + while (uart_fifo_get(sport, &c)) { + linflex_put_char(sport, c); + sport->icount.tx++; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(sport); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) linflex_stop_tx(sport); } @@ -200,7 +201,7 @@ static void linflex_start_tx(struct uart_port *port) static irqreturn_t linflex_txint(int irq, void *dev_id) { struct uart_port *sport = dev_id; - struct circ_buf *xmit = &sport->state->xmit; + struct tty_port *tport = &sport->state->port; unsigned long flags; uart_port_lock_irqsave(sport, &flags); @@ -210,7 +211,7 @@ static irqreturn_t linflex_txint(int irq, void *dev_id) goto out; } - if (uart_circ_empty(xmit) || uart_tx_stopped(sport)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(sport)) { linflex_stop_tx(sport); goto out; } diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index bbcbc91482af..ae5e1ecc48fc 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -473,7 +474,7 @@ static void lpuart32_stop_rx(struct uart_port *port) static void lpuart_dma_tx(struct lpuart_port *sport) { - struct circ_buf *xmit = &sport->port.state->xmit; + struct tty_port *tport = &sport->port.state->port; struct scatterlist *sgl = sport->tx_sgl; struct device *dev = sport->port.dev; struct dma_chan *chan = sport->dma_tx_chan; @@ -482,18 +483,10 @@ static void lpuart_dma_tx(struct lpuart_port *sport) if (sport->dma_tx_in_progress) return; - sport->dma_tx_bytes = uart_circ_chars_pending(xmit); - - if (xmit->tail < xmit->head || xmit->head == 0) { - sport->dma_tx_nents = 1; - sg_init_one(sgl, xmit->buf + xmit->tail, sport->dma_tx_bytes); - } else { - sport->dma_tx_nents = 2; - sg_init_table(sgl, 2); - sg_set_buf(sgl, xmit->buf + xmit->tail, - UART_XMIT_SIZE - xmit->tail); - sg_set_buf(sgl + 1, xmit->buf, xmit->head); - } + sg_init_table(sgl, ARRAY_SIZE(sport->tx_sgl)); + sport->dma_tx_bytes = kfifo_len(&tport->xmit_fifo); + sport->dma_tx_nents = kfifo_dma_out_prepare(&tport->xmit_fifo, sgl, + ARRAY_SIZE(sport->tx_sgl), sport->dma_tx_bytes); ret = dma_map_sg(chan->device->dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); @@ -521,14 +514,15 @@ static void lpuart_dma_tx(struct lpuart_port *sport) static bool lpuart_stopped_or_empty(struct uart_port *port) { - return uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port); + return kfifo_is_empty(&port->state->port.xmit_fifo) || + uart_tx_stopped(port); } static void lpuart_dma_tx_complete(void *arg) { struct lpuart_port *sport = arg; struct scatterlist *sgl = &sport->tx_sgl[0]; - struct circ_buf *xmit = &sport->port.state->xmit; + struct tty_port *tport = &sport->port.state->port; struct dma_chan *chan = sport->dma_tx_chan; unsigned long flags; @@ -545,7 +539,7 @@ static void lpuart_dma_tx_complete(void *arg) sport->dma_tx_in_progress = false; uart_port_unlock_irqrestore(&sport->port, flags); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); if (waitqueue_active(&sport->dma_wait)) { @@ -756,8 +750,9 @@ static inline void lpuart_transmit_buffer(struct lpuart_port *sport) static inline void lpuart32_transmit_buffer(struct lpuart_port *sport) { - struct circ_buf *xmit = &sport->port.state->xmit; + struct tty_port *tport = &sport->port.state->port; unsigned long txcnt; + unsigned char c; if (sport->port.x_char) { lpuart32_write(&sport->port, sport->port.x_char, UARTDATA); @@ -774,18 +769,18 @@ static inline void lpuart32_transmit_buffer(struct lpuart_port *sport) txcnt = lpuart32_read(&sport->port, UARTWATER); txcnt = txcnt >> UARTWATER_TXCNT_OFF; txcnt &= UARTWATER_COUNT_MASK; - while (!uart_circ_empty(xmit) && (txcnt < sport->txfifo_size)) { - lpuart32_write(&sport->port, xmit->buf[xmit->tail], UARTDATA); - uart_xmit_advance(&sport->port, 1); + while (txcnt < sport->txfifo_size && + uart_fifo_get(&sport->port, &c)) { + lpuart32_write(&sport->port, c, UARTDATA); txcnt = lpuart32_read(&sport->port, UARTWATER); txcnt = txcnt >> UARTWATER_TXCNT_OFF; txcnt &= UARTWATER_COUNT_MASK; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) lpuart32_stop_tx(&sport->port); } diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c index a75eafbcbea3..29e42831df39 100644 --- a/drivers/tty/serial/icom.c +++ b/drivers/tty/serial/icom.c @@ -877,10 +877,10 @@ unlock: static int icom_write(struct uart_port *port) { struct icom_port *icom_port = to_icom_port(port); + struct tty_port *tport = &port->state->port; unsigned long data_count; unsigned char cmdReg; unsigned long offset; - int temp_tail = port->state->xmit.tail; trace(icom_port, "WRITE", 0); @@ -890,16 +890,8 @@ static int icom_write(struct uart_port *port) return 0; } - data_count = 0; - while ((port->state->xmit.head != temp_tail) && - (data_count <= XMIT_BUFF_SZ)) { - - icom_port->xmit_buf[data_count++] = - port->state->xmit.buf[temp_tail]; - - temp_tail++; - temp_tail &= (UART_XMIT_SIZE - 1); - } + data_count = kfifo_out_peek(&tport->xmit_fifo, icom_port->xmit_buf, + XMIT_BUFF_SZ); if (data_count) { icom_port->statStg->xmit[0].flags = @@ -956,7 +948,8 @@ static inline void check_modem_status(struct icom_port *icom_port) static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port) { - u16 count, i; + struct tty_port *tport = &icom_port->uart_port.state->port; + u16 count; if (port_int_reg & (INT_XMIT_COMPLETED)) { trace(icom_port, "XMIT_COMPLETE", 0); @@ -968,13 +961,7 @@ static void xmit_interrupt(u16 port_int_reg, struct icom_port *icom_port) count = le16_to_cpu(icom_port->statStg->xmit[0].leLength); icom_port->uart_port.icount.tx += count; - for (i=0; iuart_port.state->xmit); i++) { - - icom_port->uart_port.state->xmit.tail++; - icom_port->uart_port.state->xmit.tail &= - (UART_XMIT_SIZE - 1); - } + kfifo_skip_count(&tport->xmit_fifo, count); if (!icom_write(&icom_port->uart_port)) /* activate write queue */ diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index e14813250616..56b76a221082 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -8,6 +8,7 @@ * Copyright (C) 2004 Pengutronix */ +#include #include #include #include @@ -521,7 +522,8 @@ static void imx_uart_dma_tx(struct imx_port *sport); /* called with port.lock taken and irqs off */ static inline void imx_uart_transmit_buffer(struct imx_port *sport) { - struct circ_buf *xmit = &sport->port.state->xmit; + struct tty_port *tport = &sport->port.state->port; + unsigned char c; if (sport->port.x_char) { /* Send next char */ @@ -531,7 +533,8 @@ static inline void imx_uart_transmit_buffer(struct imx_port *sport) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || + uart_tx_stopped(&sport->port)) { imx_uart_stop_tx(&sport->port); return; } @@ -555,26 +558,22 @@ static inline void imx_uart_transmit_buffer(struct imx_port *sport) return; } - while (!uart_circ_empty(xmit) && - !(imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL)) { - /* send xmit->buf[xmit->tail] - * out the port here */ - imx_uart_writel(sport, xmit->buf[xmit->tail], URTX0); - uart_xmit_advance(&sport->port, 1); - } + while (!(imx_uart_readl(sport, imx_uart_uts_reg(sport)) & UTS_TXFULL) && + uart_fifo_get(&sport->port, &c)) + imx_uart_writel(sport, c, URTX0); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) imx_uart_stop_tx(&sport->port); } static void imx_uart_dma_tx_callback(void *data) { struct imx_port *sport = data; + struct tty_port *tport = &sport->port.state->port; struct scatterlist *sgl = &sport->tx_sgl[0]; - struct circ_buf *xmit = &sport->port.state->xmit; unsigned long flags; u32 ucr1; @@ -592,10 +591,11 @@ static void imx_uart_dma_tx_callback(void *data) sport->dma_is_txing = 0; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); - if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port)) + if (!kfifo_is_empty(&tport->xmit_fifo) && + !uart_tx_stopped(&sport->port)) imx_uart_dma_tx(sport); else if (sport->port.rs485.flags & SER_RS485_ENABLED) { u32 ucr4 = imx_uart_readl(sport, UCR4); @@ -609,7 +609,7 @@ static void imx_uart_dma_tx_callback(void *data) /* called with port.lock taken and irqs off */ static void imx_uart_dma_tx(struct imx_port *sport) { - struct circ_buf *xmit = &sport->port.state->xmit; + struct tty_port *tport = &sport->port.state->port; struct scatterlist *sgl = sport->tx_sgl; struct dma_async_tx_descriptor *desc; struct dma_chan *chan = sport->dma_chan_tx; @@ -624,18 +624,10 @@ static void imx_uart_dma_tx(struct imx_port *sport) ucr4 &= ~UCR4_TCEN; imx_uart_writel(sport, ucr4, UCR4); - sport->tx_bytes = uart_circ_chars_pending(xmit); - - if (xmit->tail < xmit->head || xmit->head == 0) { - sport->dma_tx_nents = 1; - sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); - } else { - sport->dma_tx_nents = 2; - sg_init_table(sgl, 2); - sg_set_buf(sgl, xmit->buf + xmit->tail, - UART_XMIT_SIZE - xmit->tail); - sg_set_buf(sgl + 1, xmit->buf, xmit->head); - } + sg_init_table(sgl, ARRAY_SIZE(sport->tx_sgl)); + sport->tx_bytes = kfifo_len(&tport->xmit_fifo); + sport->dma_tx_nents = kfifo_dma_out_prepare(&tport->xmit_fifo, sgl, + ARRAY_SIZE(sport->tx_sgl), sport->tx_bytes); ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); if (ret == 0) { @@ -653,8 +645,7 @@ static void imx_uart_dma_tx(struct imx_port *sport) desc->callback = imx_uart_dma_tx_callback; desc->callback_param = sport; - dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", - uart_circ_chars_pending(xmit)); + dev_dbg(dev, "TX: prepare to send %u bytes by DMA.\n", sport->tx_bytes); ucr1 = imx_uart_readl(sport, UCR1); ucr1 |= UCR1_TXDMAEN; @@ -671,9 +662,10 @@ static void imx_uart_dma_tx(struct imx_port *sport) static void imx_uart_start_tx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; + struct tty_port *tport = &sport->port.state->port; u32 ucr1; - if (!sport->port.x_char && uart_circ_empty(&port->state->xmit)) + if (!sport->port.x_char && kfifo_is_empty(&tport->xmit_fifo)) return; /* @@ -749,7 +741,7 @@ static void imx_uart_start_tx(struct uart_port *port) return; } - if (!uart_circ_empty(&port->state->xmit) && + if (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(port)) imx_uart_dma_tx(sport); return; diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c index 320b29cd4683..c2cae50f06f3 100644 --- a/drivers/tty/serial/ip22zilog.c +++ b/drivers/tty/serial/ip22zilog.c @@ -355,7 +355,8 @@ static void ip22zilog_status_handle(struct uart_ip22zilog_port *up, static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up, struct zilog_channel *channel) { - struct circ_buf *xmit; + struct tty_port *tport; + unsigned char c; if (ZS_IS_CONS(up)) { unsigned char status = readb(&channel->control); @@ -398,20 +399,18 @@ static void ip22zilog_transmit_chars(struct uart_ip22zilog_port *up, if (up->port.state == NULL) goto ack_tx_int; - xmit = &up->port.state->xmit; - if (uart_circ_empty(xmit)) - goto ack_tx_int; + tport = &up->port.state->port; if (uart_tx_stopped(&up->port)) goto ack_tx_int; + if (!uart_fifo_get(&up->port, &c)) + goto ack_tx_int; up->flags |= IP22ZILOG_FLAG_TX_ACTIVE; - writeb(xmit->buf[xmit->tail], &channel->data); + writeb(c, &channel->data); ZSDELAY(); ZS_WSYNC(channel); - uart_xmit_advance(&up->port, 1); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&up->port); return; @@ -600,17 +599,16 @@ static void ip22zilog_start_tx(struct uart_port *port) port->icount.tx++; port->x_char = 0; } else { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + unsigned char c; - if (uart_circ_empty(xmit)) + if (!uart_fifo_get(port, &c)) return; - writeb(xmit->buf[xmit->tail], &channel->data); + writeb(c, &channel->data); ZSDELAY(); ZS_WSYNC(channel); - uart_xmit_advance(port, 1); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&up->port); } } diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c index ddbd42c09637..6e40792f92cf 100644 --- a/drivers/tty/serial/jsm/jsm_cls.c +++ b/drivers/tty/serial/jsm/jsm_cls.c @@ -443,20 +443,14 @@ static void cls_copy_data_from_uart_to_queue(struct jsm_channel *ch) static void cls_copy_data_from_queue_to_uart(struct jsm_channel *ch) { - u16 tail; + struct tty_port *tport; int n; - int qlen; u32 len_written = 0; - struct circ_buf *circ; if (!ch) return; - circ = &ch->uart_port.state->xmit; - - /* No data to write to the UART */ - if (uart_circ_empty(circ)) - return; + tport = &ch->uart_port.state->port; /* If port is "stopped", don't send any data to the UART */ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) @@ -467,29 +461,22 @@ static void cls_copy_data_from_queue_to_uart(struct jsm_channel *ch) return; n = 32; + while (n > 0) { + unsigned char c; - /* cache tail of queue */ - tail = circ->tail & (UART_XMIT_SIZE - 1); - qlen = uart_circ_chars_pending(circ); - - /* Find minimum of the FIFO space, versus queue length */ - n = min(n, qlen); + if (!kfifo_get(&tport->xmit_fifo, &c)) + break; - while (n > 0) { - writeb(circ->buf[tail], &ch->ch_cls_uart->txrx); - tail = (tail + 1) & (UART_XMIT_SIZE - 1); + writeb(c, &ch->ch_cls_uart->txrx); n--; ch->ch_txcount++; len_written++; } - /* Update the final tail */ - circ->tail = tail & (UART_XMIT_SIZE - 1); - if (len_written > ch->ch_t_tlevel) ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); - if (uart_circ_empty(circ)) + if (kfifo_is_empty(&tport->xmit_fifo)) uart_write_wakeup(&ch->uart_port); } diff --git a/drivers/tty/serial/jsm/jsm_neo.c b/drivers/tty/serial/jsm/jsm_neo.c index 1fa10f19368f..e8e13bf056e2 100644 --- a/drivers/tty/serial/jsm/jsm_neo.c +++ b/drivers/tty/serial/jsm/jsm_neo.c @@ -474,21 +474,21 @@ static void neo_copy_data_from_uart_to_queue(struct jsm_channel *ch) static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) { - u16 head; - u16 tail; + struct tty_port *tport; + unsigned char *tail; + unsigned char c; int n; int s; int qlen; u32 len_written = 0; - struct circ_buf *circ; if (!ch) return; - circ = &ch->uart_port.state->xmit; + tport = &ch->uart_port.state->port; /* No data to write to the UART */ - if (uart_circ_empty(circ)) + if (kfifo_is_empty(&tport->xmit_fifo)) return; /* If port is "stopped", don't send any data to the UART */ @@ -504,10 +504,9 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) if (ch->ch_cached_lsr & UART_LSR_THRE) { ch->ch_cached_lsr &= ~(UART_LSR_THRE); - writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx); - jsm_dbg(WRITE, &ch->ch_bd->pci_dev, - "Tx data: %x\n", circ->buf[circ->tail]); - circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1); + WARN_ON_ONCE(!kfifo_get(&tport->xmit_fifo, &c)); + writeb(c, &ch->ch_neo_uart->txrx); + jsm_dbg(WRITE, &ch->ch_bd->pci_dev, "Tx data: %x\n", c); ch->ch_txcount++; } return; @@ -520,38 +519,27 @@ static void neo_copy_data_from_queue_to_uart(struct jsm_channel *ch) return; n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel; - - /* cache head and tail of queue */ - head = circ->head & (UART_XMIT_SIZE - 1); - tail = circ->tail & (UART_XMIT_SIZE - 1); - qlen = uart_circ_chars_pending(circ); + qlen = kfifo_len(&tport->xmit_fifo); /* Find minimum of the FIFO space, versus queue length */ n = min(n, qlen); while (n > 0) { - - s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; - s = min(s, n); - + s = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, n); if (s <= 0) break; - memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s); - /* Add and flip queue if needed */ - tail = (tail + s) & (UART_XMIT_SIZE - 1); + memcpy_toio(&ch->ch_neo_uart->txrxburst, tail, s); + kfifo_skip_count(&tport->xmit_fifo, s); n -= s; ch->ch_txcount += s; len_written += s; } - /* Update the final tail */ - circ->tail = tail & (UART_XMIT_SIZE - 1); - if (len_written >= ch->ch_t_tlevel) ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM); - if (uart_circ_empty(circ)) + if (kfifo_is_empty(&tport->xmit_fifo)) uart_write_wakeup(&ch->uart_port); } diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index 5efb2b593be3..b83eee37c17d 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -257,10 +257,11 @@ static int max3100_handlerx(struct max3100_port *s, u16 rx) static void max3100_work(struct work_struct *w) { struct max3100_port *s = container_of(w, struct max3100_port, work); + struct tty_port *tport = &s->port.state->port; + unsigned char ch; int rxchars; u16 tx, rx; int conf, cconf, crts; - struct circ_buf *xmit = &s->port.state->xmit; dev_dbg(&s->spi->dev, "%s\n", __func__); @@ -290,10 +291,9 @@ static void max3100_work(struct work_struct *w) tx = s->port.x_char; s->port.icount.tx++; s->port.x_char = 0; - } else if (!uart_circ_empty(xmit) && - !uart_tx_stopped(&s->port)) { - tx = xmit->buf[xmit->tail]; - uart_xmit_advance(&s->port, 1); + } else if (!uart_tx_stopped(&s->port) && + uart_fifo_get(&s->port, &ch)) { + tx = ch; } if (tx != 0xffff) { max3100_calc_parity(s, &tx); @@ -307,13 +307,13 @@ static void max3100_work(struct work_struct *w) tty_flip_buffer_push(&s->port.state->port); rxchars = 0; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&s->port); } while (!s->force_end_work && !freezing(current) && ((rx & MAX3100_R) || - (!uart_circ_empty(xmit) && + (!kfifo_is_empty(&tport->xmit_fifo) && !uart_tx_stopped(&s->port)))); if (rxchars > 0) diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c index 14dd9cfaa9f7..f0eb96429dae 100644 --- a/drivers/tty/serial/max310x.c +++ b/drivers/tty/serial/max310x.c @@ -747,8 +747,9 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen) static void max310x_handle_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - unsigned int txlen, to_send, until_end; + struct tty_port *tport = &port->state->port; + unsigned int txlen, to_send; + unsigned char *tail; if (unlikely(port->x_char)) { max310x_port_write(port, MAX310X_THR_REG, port->x_char); @@ -757,32 +758,26 @@ static void max310x_handle_tx(struct uart_port *port) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) return; - /* Get length of data pending in circular buffer */ - to_send = uart_circ_chars_pending(xmit); - until_end = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); - if (likely(to_send)) { + /* + * It's a circ buffer -- wrap around. + * We could do that in one SPI transaction, but meh. + */ + while (!kfifo_is_empty(&tport->xmit_fifo)) { /* Limit to space available in TX FIFO */ txlen = max310x_port_read(port, MAX310X_TXFIFOLVL_REG); txlen = port->fifosize - txlen; - to_send = (to_send > txlen) ? txlen : to_send; - - if (until_end < to_send) { - /* - * It's a circ buffer -- wrap around. - * We could do that in one SPI transaction, but meh. - */ - max310x_batch_write(port, xmit->buf + xmit->tail, until_end); - max310x_batch_write(port, xmit->buf, to_send - until_end); - } else { - max310x_batch_write(port, xmit->buf + xmit->tail, to_send); - } + if (!txlen) + break; + + to_send = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, txlen); + max310x_batch_write(port, tail, to_send); uart_xmit_advance(port, to_send); } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } diff --git a/drivers/tty/serial/men_z135_uart.c b/drivers/tty/serial/men_z135_uart.c index 8048fa542fc4..4bff422bb1bc 100644 --- a/drivers/tty/serial/men_z135_uart.c +++ b/drivers/tty/serial/men_z135_uart.c @@ -293,17 +293,14 @@ static void men_z135_handle_rx(struct men_z135_port *uart) static void men_z135_handle_tx(struct men_z135_port *uart) { struct uart_port *port = &uart->port; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + unsigned char *tail; + unsigned int n, txfree; u32 txc; u32 wptr; int qlen; - int n; - int txfree; - int head; - int tail; - int s; - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) goto out; if (uart_tx_stopped(port)) @@ -313,7 +310,7 @@ static void men_z135_handle_tx(struct men_z135_port *uart) goto out; /* calculate bytes to copy */ - qlen = uart_circ_chars_pending(xmit); + qlen = kfifo_len(&tport->xmit_fifo); if (qlen <= 0) goto out; @@ -345,21 +342,18 @@ static void men_z135_handle_tx(struct men_z135_port *uart) if (n <= 0) goto irq_en; - head = xmit->head & (UART_XMIT_SIZE - 1); - tail = xmit->tail & (UART_XMIT_SIZE - 1); - - s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; - n = min(n, s); + n = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, + min_t(unsigned int, UART_XMIT_SIZE, n)); + memcpy_toio(port->membase + MEN_Z135_TX_RAM, tail, n); - memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n); iowrite32(n & 0x3ff, port->membase + MEN_Z135_TX_CTRL); uart_xmit_advance(port, n); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); irq_en: - if (!uart_circ_empty(xmit)) + if (!kfifo_is_empty(&tport->xmit_fifo)) men_z135_reg_set(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN); else men_z135_reg_clr(uart, MEN_Z135_CONF_REG, MEN_Z135_IER_TXCIEN); diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c index 6feac459c0cf..4587ed4d4d5d 100644 --- a/drivers/tty/serial/meson_uart.c +++ b/drivers/tty/serial/meson_uart.c @@ -141,8 +141,8 @@ static void meson_uart_shutdown(struct uart_port *port) static void meson_uart_start_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - unsigned int ch; + struct tty_port *tport = &port->state->port; + unsigned char ch; u32 val; if (uart_tx_stopped(port)) { @@ -158,21 +158,20 @@ static void meson_uart_start_tx(struct uart_port *port) continue; } - if (uart_circ_empty(xmit)) + if (!uart_fifo_get(port, &ch)) break; - ch = xmit->buf[xmit->tail]; writel(ch, port->membase + AML_UART_WFIFO); uart_xmit_advance(port, 1); } - if (!uart_circ_empty(xmit)) { + if (!kfifo_is_empty(&tport->xmit_fifo)) { val = readl(port->membase + AML_UART_CONTROL); val |= AML_UART_TX_INT_EN; writel(val, port->membase + AML_UART_CONTROL); } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } diff --git a/drivers/tty/serial/milbeaut_usio.c b/drivers/tty/serial/milbeaut_usio.c index da4c6f7e2a30..fb082ee73d5b 100644 --- a/drivers/tty/serial/milbeaut_usio.c +++ b/drivers/tty/serial/milbeaut_usio.c @@ -72,7 +72,7 @@ static void mlb_usio_stop_tx(struct uart_port *port) static void mlb_usio_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; int count; writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FTIE, @@ -87,7 +87,7 @@ static void mlb_usio_tx_chars(struct uart_port *port) port->x_char = 0; return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { mlb_usio_stop_tx(port); return; } @@ -96,12 +96,13 @@ static void mlb_usio_tx_chars(struct uart_port *port) (readw(port->membase + MLB_USIO_REG_FBYTE) & 0xff); do { - writew(xmit->buf[xmit->tail], port->membase + MLB_USIO_REG_DR); + unsigned char ch; - uart_xmit_advance(port, 1); - if (uart_circ_empty(xmit)) + if (!uart_fifo_get(port, &ch)) break; + writew(ch, port->membase + MLB_USIO_REG_DR); + port->icount.tx++; } while (--count > 0); writew(readw(port->membase + MLB_USIO_REG_FCR) & ~MLB_USIO_FCR_FDRQ, @@ -110,10 +111,10 @@ static void mlb_usio_tx_chars(struct uart_port *port) writeb(readb(port->membase + MLB_USIO_REG_SCR) | MLB_USIO_SCR_TBIE, port->membase + MLB_USIO_REG_SCR); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) mlb_usio_stop_tx(port); } diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c index 7bf30e632313..ae7a8e3cf467 100644 --- a/drivers/tty/serial/msm_serial.c +++ b/drivers/tty/serial/msm_serial.c @@ -452,7 +452,7 @@ static void msm_complete_tx_dma(void *args) { struct msm_port *msm_port = args; struct uart_port *port = &msm_port->uart; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct msm_dma *dma = &msm_port->tx_dma; struct dma_tx_state state; unsigned long flags; @@ -486,7 +486,7 @@ static void msm_complete_tx_dma(void *args) msm_port->imr |= MSM_UART_IMR_TXLEV; msm_write(port, msm_port->imr, MSM_UART_IMR); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); msm_handle_tx(port); @@ -496,14 +496,14 @@ done: static int msm_handle_tx_dma(struct msm_port *msm_port, unsigned int count) { - struct circ_buf *xmit = &msm_port->uart.state->xmit; struct uart_port *port = &msm_port->uart; + struct tty_port *tport = &port->state->port; struct msm_dma *dma = &msm_port->tx_dma; int ret; u32 val; sg_init_table(&dma->tx_sg, 1); - sg_set_buf(&dma->tx_sg, &xmit->buf[xmit->tail], count); + kfifo_dma_out_prepare(&tport->xmit_fifo, &dma->tx_sg, 1, count); ret = dma_map_sg(port->dev, &dma->tx_sg, 1, dma->dir); if (ret) @@ -843,8 +843,8 @@ static void msm_handle_rx(struct uart_port *port) static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count) { - struct circ_buf *xmit = &port->state->xmit; struct msm_port *msm_port = to_msm_port(port); + struct tty_port *tport = &port->state->port; unsigned int num_chars; unsigned int tf_pointer = 0; void __iomem *tf; @@ -858,8 +858,7 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count) msm_reset_dm_count(port, tx_count); while (tf_pointer < tx_count) { - int i; - char buf[4] = { 0 }; + unsigned char buf[4] = { 0 }; if (!(msm_read(port, MSM_UART_SR) & MSM_UART_SR_TX_READY)) break; @@ -870,26 +869,23 @@ static void msm_handle_tx_pio(struct uart_port *port, unsigned int tx_count) else num_chars = 1; - for (i = 0; i < num_chars; i++) - buf[i] = xmit->buf[xmit->tail + i]; - + num_chars = uart_fifo_out(port, buf, num_chars); iowrite32_rep(tf, buf, 1); - uart_xmit_advance(port, num_chars); tf_pointer += num_chars; } /* disable tx interrupts if nothing more to send */ - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) msm_stop_tx(port); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } static void msm_handle_tx(struct uart_port *port) { struct msm_port *msm_port = to_msm_port(port); - struct circ_buf *xmit = &msm_port->uart.state->xmit; + struct tty_port *tport = &port->state->port; struct msm_dma *dma = &msm_port->tx_dma; unsigned int pio_count, dma_count, dma_min; char buf[4] = { 0 }; @@ -913,13 +909,13 @@ static void msm_handle_tx(struct uart_port *port) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { msm_stop_tx(port); return; } - pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); - dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + dma_count = pio_count = kfifo_out_linear(&tport->xmit_fifo, NULL, + UART_XMIT_SIZE); dma_min = 1; /* Always DMA */ if (msm_port->is_uartdm > UARTDM_1P3) { diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 0255646bc175..5de57b77abdb 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -219,12 +219,10 @@ static void mvebu_uart_stop_tx(struct uart_port *port) static void mvebu_uart_start_tx(struct uart_port *port) { unsigned int ctl; - struct circ_buf *xmit = &port->state->xmit; + unsigned char c; - if (IS_EXTENDED(port) && !uart_circ_empty(xmit)) { - writel(xmit->buf[xmit->tail], port->membase + UART_TSH(port)); - uart_xmit_advance(port, 1); - } + if (IS_EXTENDED(port) && uart_fifo_get(port, &c)) + writel(c, port->membase + UART_TSH(port)); ctl = readl(port->membase + UART_INTR(port)); ctl |= CTRL_TX_RDY_INT(port); diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 4749331fe618..144b35d31497 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -517,7 +517,7 @@ static void mxs_auart_tx_chars(struct mxs_auart_port *s); static void dma_tx_callback(void *param) { struct mxs_auart_port *s = param; - struct circ_buf *xmit = &s->port.state->xmit; + struct tty_port *tport = &s->port.state->port; dma_unmap_sg(s->dev, &s->tx_sgl, 1, DMA_TO_DEVICE); @@ -526,7 +526,7 @@ static void dma_tx_callback(void *param) smp_mb__after_atomic(); /* wake up the possible processes. */ - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&s->port); mxs_auart_tx_chars(s); @@ -568,33 +568,22 @@ static int mxs_auart_dma_tx(struct mxs_auart_port *s, int size) static void mxs_auart_tx_chars(struct mxs_auart_port *s) { - struct circ_buf *xmit = &s->port.state->xmit; + struct tty_port *tport = &s->port.state->port; bool pending; u8 ch; if (auart_dma_enabled(s)) { u32 i = 0; - int size; void *buffer = s->tx_dma_buf; if (test_and_set_bit(MXS_AUART_DMA_TX_SYNC, &s->flags)) return; - while (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { - size = min_t(u32, UART_XMIT_SIZE - i, - CIRC_CNT_TO_END(xmit->head, - xmit->tail, - UART_XMIT_SIZE)); - memcpy(buffer + i, xmit->buf + xmit->tail, size); - xmit->tail = (xmit->tail + size) & (UART_XMIT_SIZE - 1); - - i += size; - if (i >= UART_XMIT_SIZE) - break; - } - if (uart_tx_stopped(&s->port)) mxs_auart_stop_tx(&s->port); + else + i = kfifo_out(&tport->xmit_fifo, buffer, + UART_XMIT_SIZE); if (i) { mxs_auart_dma_tx(s, i); diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 89257cddf540..c7cee5fee603 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -808,7 +808,7 @@ static int dma_handle_rx(struct eg20t_port *priv) static unsigned int handle_tx(struct eg20t_port *priv) { struct uart_port *port = &priv->port; - struct circ_buf *xmit = &port->state->xmit; + unsigned char ch; int fifo_size; int tx_empty; @@ -830,9 +830,9 @@ static unsigned int handle_tx(struct eg20t_port *priv) fifo_size--; } - while (!uart_tx_stopped(port) && !uart_circ_empty(xmit) && fifo_size) { - iowrite8(xmit->buf[xmit->tail], priv->membase + PCH_UART_THR); - uart_xmit_advance(port, 1); + while (!uart_tx_stopped(port) && fifo_size && + uart_fifo_get(port, &ch)) { + iowrite8(ch, priv->membase + PCH_UART_THR); fifo_size--; tx_empty = 0; } @@ -850,14 +850,14 @@ static unsigned int handle_tx(struct eg20t_port *priv) static unsigned int dma_handle_tx(struct eg20t_port *priv) { struct uart_port *port = &priv->port; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct scatterlist *sg; int nent; int fifo_size; struct dma_async_tx_descriptor *desc; + unsigned int bytes, tail; int num; int i; - int bytes; int size; int rem; @@ -886,7 +886,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) fifo_size--; } - bytes = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + bytes = kfifo_out_linear(&tport->xmit_fifo, &tail, UART_XMIT_SIZE); if (!bytes) { dev_dbg(priv->port.dev, "%s 0 bytes return\n", __func__); pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_TX_INT); @@ -920,10 +920,10 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) for (i = 0; i < num; i++, sg++) { if (i == (num - 1)) - sg_set_page(sg, virt_to_page(xmit->buf), + sg_set_page(sg, virt_to_page(tport->xmit_buf), rem, fifo_size * i); else - sg_set_page(sg, virt_to_page(xmit->buf), + sg_set_page(sg, virt_to_page(tport->xmit_buf), size, fifo_size * i); } @@ -937,8 +937,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) priv->nent = nent; for (i = 0; i < nent; i++, sg++) { - sg->offset = (xmit->tail & (UART_XMIT_SIZE - 1)) + - fifo_size * i; + sg->offset = tail + fifo_size * i; sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + sg->offset; if (i == (nent - 1)) diff --git a/drivers/tty/serial/pic32_uart.c b/drivers/tty/serial/pic32_uart.c index bbb46e6e98a2..f5af336a869b 100644 --- a/drivers/tty/serial/pic32_uart.c +++ b/drivers/tty/serial/pic32_uart.c @@ -342,7 +342,7 @@ static void pic32_uart_do_rx(struct uart_port *port) static void pic32_uart_do_tx(struct uart_port *port) { struct pic32_sport *sport = to_pic32_sport(port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; unsigned int max_count = PIC32_UART_TX_FIFO_DEPTH; if (port->x_char) { @@ -357,7 +357,7 @@ static void pic32_uart_do_tx(struct uart_port *port) return; } - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) goto txq_empty; /* keep stuffing chars into uart tx buffer @@ -371,21 +371,20 @@ static void pic32_uart_do_tx(struct uart_port *port) */ while (!(PIC32_UART_STA_UTXBF & pic32_uart_readl(sport, PIC32_UART_STA))) { - unsigned int c = xmit->buf[xmit->tail]; + unsigned char c; + if (!uart_fifo_get(port, &c)) + break; pic32_uart_writel(sport, PIC32_UART_TX, c); - uart_xmit_advance(port, 1); - if (uart_circ_empty(xmit)) - break; if (--max_count == 0) break; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) goto txq_empty; return; diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 05d97e89511e..63bc726273fd 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -347,7 +347,8 @@ static void pmz_status_handle(struct uart_pmac_port *uap) static void pmz_transmit_chars(struct uart_pmac_port *uap) { - struct circ_buf *xmit; + struct tty_port *tport; + unsigned char ch; if (ZS_IS_CONS(uap)) { unsigned char status = read_zsreg(uap, R0); @@ -398,8 +399,8 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap) if (uap->port.state == NULL) goto ack_tx_int; - xmit = &uap->port.state->xmit; - if (uart_circ_empty(xmit)) { + tport = &uap->port.state->port; + if (kfifo_is_empty(&tport->xmit_fifo)) { uart_write_wakeup(&uap->port); goto ack_tx_int; } @@ -407,12 +408,11 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap) goto ack_tx_int; uap->flags |= PMACZILOG_FLAG_TX_ACTIVE; - write_zsdata(uap, xmit->buf[xmit->tail]); + WARN_ON(!uart_fifo_get(&uap->port, &ch)); + write_zsdata(uap, ch); zssync(uap); - uart_xmit_advance(&uap->port, 1); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); return; @@ -620,15 +620,15 @@ static void pmz_start_tx(struct uart_port *port) port->icount.tx++; port->x_char = 0; } else { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + unsigned char ch; - if (uart_circ_empty(xmit)) + if (!uart_fifo_get(&uap->port, &ch)) return; - write_zsdata(uap, xmit->buf[xmit->tail]); + write_zsdata(uap, ch); zssync(uap); - uart_xmit_advance(port, 1); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&uap->port); } } diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index f9f7ac1a10df..7814982f1921 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -505,7 +505,7 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s, */ qcom_geni_serial_poll_tx_done(uport); - if (!uart_circ_empty(&uport->state->xmit)) { + if (!kfifo_is_empty(&uport->state->port.xmit_fifo)) { irq_en = readl(uport->membase + SE_GENI_M_IRQ_EN); writel(irq_en | M_TX_FIFO_WATERMARK_EN, uport->membase + SE_GENI_M_IRQ_EN); @@ -620,22 +620,24 @@ static void qcom_geni_serial_stop_tx_dma(struct uart_port *uport) static void qcom_geni_serial_start_tx_dma(struct uart_port *uport) { struct qcom_geni_serial_port *port = to_dev_port(uport); - struct circ_buf *xmit = &uport->state->xmit; + struct tty_port *tport = &uport->state->port; unsigned int xmit_size; + u8 *tail; int ret; if (port->tx_dma_addr) return; - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) return; - xmit_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + xmit_size = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE); qcom_geni_serial_setup_tx(uport, xmit_size); - ret = geni_se_tx_dma_prep(&port->se, &xmit->buf[xmit->tail], - xmit_size, &port->tx_dma_addr); + ret = geni_se_tx_dma_prep(&port->se, tail, xmit_size, + &port->tx_dma_addr); if (ret) { dev_err(uport->dev, "unable to start TX SE DMA: %d\n", ret); qcom_geni_serial_stop_tx_dma(uport); @@ -853,18 +855,16 @@ static void qcom_geni_serial_send_chunk_fifo(struct uart_port *uport, unsigned int chunk) { struct qcom_geni_serial_port *port = to_dev_port(uport); - struct circ_buf *xmit = &uport->state->xmit; - unsigned int tx_bytes, c, remaining = chunk; + struct tty_port *tport = &uport->state->port; + unsigned int tx_bytes, remaining = chunk; u8 buf[BYTES_PER_FIFO_WORD]; while (remaining) { memset(buf, 0, sizeof(buf)); tx_bytes = min(remaining, BYTES_PER_FIFO_WORD); - for (c = 0; c < tx_bytes ; c++) { - buf[c] = xmit->buf[xmit->tail]; - uart_xmit_advance(uport, 1); - } + tx_bytes = kfifo_out(&tport->xmit_fifo, buf, tx_bytes); + uart_xmit_advance(uport, tx_bytes); iowrite32_rep(uport->membase + SE_GENI_TX_FIFOn, buf, 1); @@ -877,7 +877,7 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, bool done, bool active) { struct qcom_geni_serial_port *port = to_dev_port(uport); - struct circ_buf *xmit = &uport->state->xmit; + struct tty_port *tport = &uport->state->port; size_t avail; size_t pending; u32 status; @@ -890,7 +890,7 @@ static void qcom_geni_serial_handle_tx_fifo(struct uart_port *uport, if (active) pending = port->tx_remaining; else - pending = uart_circ_chars_pending(xmit); + pending = kfifo_len(&tport->xmit_fifo); /* All data has been transmitted and acknowledged as received */ if (!pending && !status && done) { @@ -933,24 +933,24 @@ out_write_wakeup: uport->membase + SE_GENI_M_IRQ_EN); } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(uport); } static void qcom_geni_serial_handle_tx_dma(struct uart_port *uport) { struct qcom_geni_serial_port *port = to_dev_port(uport); - struct circ_buf *xmit = &uport->state->xmit; + struct tty_port *tport = &uport->state->port; uart_xmit_advance(uport, port->tx_remaining); geni_se_tx_dma_unprep(&port->se, port->tx_dma_addr, port->tx_remaining); port->tx_dma_addr = 0; port->tx_remaining = 0; - if (!uart_circ_empty(xmit)) + if (!kfifo_is_empty(&tport->xmit_fifo)) qcom_geni_serial_start_tx_dma(uport); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(uport); } diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c index 82def9b8632a..663e35e424bd 100644 --- a/drivers/tty/serial/rda-uart.c +++ b/drivers/tty/serial/rda-uart.c @@ -330,8 +330,8 @@ static void rda_uart_set_termios(struct uart_port *port, static void rda_uart_send_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; - unsigned int ch; + struct tty_port *tport = &port->state->port; + unsigned char ch; u32 val; if (uart_tx_stopped(port)) @@ -347,19 +347,14 @@ static void rda_uart_send_chars(struct uart_port *port) port->x_char = 0; } - while (rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK) { - if (uart_circ_empty(xmit)) - break; - - ch = xmit->buf[xmit->tail]; + while ((rda_uart_read(port, RDA_UART_STATUS) & RDA_UART_TX_FIFO_MASK) && + uart_fifo_get(port, &ch)) rda_uart_write(port, ch, RDA_UART_RXTX_BUFFER); - uart_xmit_advance(port, 1); - } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (!uart_circ_empty(xmit)) { + if (!kfifo_is_empty(&tport->xmit_fifo)) { /* Re-enable Tx FIFO interrupt */ val = rda_uart_read(port, RDA_UART_IRQ_MASK); val |= RDA_UART_TX_DATA_NEEDED; diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c index a2d07e05c502..dc35eb77d2ef 100644 --- a/drivers/tty/serial/samsung_tty.c +++ b/drivers/tty/serial/samsung_tty.c @@ -329,7 +329,7 @@ static void s3c24xx_serial_tx_dma_complete(void *args) { struct s3c24xx_uart_port *ourport = args; struct uart_port *port = &ourport->port; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct s3c24xx_uart_dma *dma = ourport->dma; struct dma_tx_state state; unsigned long flags; @@ -348,7 +348,7 @@ static void s3c24xx_serial_tx_dma_complete(void *args) uart_xmit_advance(port, count); ourport->tx_in_progress = 0; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); s3c24xx_serial_start_next_tx(ourport); @@ -431,17 +431,15 @@ static void s3c24xx_serial_start_tx_pio(struct s3c24xx_uart_port *ourport) } static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport, - unsigned int count) + unsigned int count, unsigned int tail) { - struct uart_port *port = &ourport->port; - struct circ_buf *xmit = &port->state->xmit; struct s3c24xx_uart_dma *dma = ourport->dma; if (ourport->tx_mode != S3C24XX_TX_DMA) enable_tx_dma(ourport); dma->tx_size = count & ~(dma_get_cache_alignment() - 1); - dma->tx_transfer_addr = dma->tx_addr + xmit->tail; + dma->tx_transfer_addr = dma->tx_addr + tail; dma_sync_single_for_device(dma->tx_chan->device->dev, dma->tx_transfer_addr, dma->tx_size, @@ -468,11 +466,11 @@ static int s3c24xx_serial_start_tx_dma(struct s3c24xx_uart_port *ourport, static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport) { struct uart_port *port = &ourport->port; - struct circ_buf *xmit = &port->state->xmit; - unsigned long count; + struct tty_port *tport = &port->state->port; + unsigned int count, tail; /* Get data size up to the end of buffer */ - count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + count = kfifo_out_linear(&tport->xmit_fifo, &tail, UART_XMIT_SIZE); if (!count) { s3c24xx_serial_stop_tx(port); @@ -481,16 +479,16 @@ static void s3c24xx_serial_start_next_tx(struct s3c24xx_uart_port *ourport) if (!ourport->dma || !ourport->dma->tx_chan || count < ourport->min_dma_size || - xmit->tail & (dma_get_cache_alignment() - 1)) + tail & (dma_get_cache_alignment() - 1)) s3c24xx_serial_start_tx_pio(ourport); else - s3c24xx_serial_start_tx_dma(ourport, count); + s3c24xx_serial_start_tx_dma(ourport, count, tail); } static void s3c24xx_serial_start_tx(struct uart_port *port) { struct s3c24xx_uart_port *ourport = to_ourport(port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; if (!ourport->tx_enabled) { if (port->flags & UPF_CONS_FLOW) @@ -502,7 +500,8 @@ static void s3c24xx_serial_start_tx(struct uart_port *port) } if (ourport->dma && ourport->dma->tx_chan) { - if (!uart_circ_empty(xmit) && !ourport->tx_in_progress) + if (!kfifo_is_empty(&tport->xmit_fifo) && + !ourport->tx_in_progress) s3c24xx_serial_start_next_tx(ourport); } } @@ -868,18 +867,19 @@ static irqreturn_t s3c24xx_serial_rx_irq(int irq, void *dev_id) static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) { struct uart_port *port = &ourport->port; - struct circ_buf *xmit = &port->state->xmit; - int count, dma_count = 0; + struct tty_port *tport = &port->state->port; + unsigned int count, dma_count = 0, tail; - count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + count = kfifo_out_linear(&tport->xmit_fifo, &tail, UART_XMIT_SIZE); if (ourport->dma && ourport->dma->tx_chan && count >= ourport->min_dma_size) { int align = dma_get_cache_alignment() - - (xmit->tail & (dma_get_cache_alignment() - 1)); + (tail & (dma_get_cache_alignment() - 1)); if (count - align >= ourport->min_dma_size) { dma_count = count - align; count = align; + tail += align; } } @@ -894,7 +894,7 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) * stopped, disable the uart and exit */ - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { s3c24xx_serial_stop_tx(port); return; } @@ -906,24 +906,25 @@ static void s3c24xx_serial_tx_chars(struct s3c24xx_uart_port *ourport) dma_count = 0; } - while (!uart_circ_empty(xmit) && count > 0) { - if (rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull) + while (!(rd_regl(port, S3C2410_UFSTAT) & ourport->info->tx_fifofull)) { + unsigned char ch; + + if (!uart_fifo_get(port, &ch)) break; - wr_reg(port, S3C2410_UTXH, xmit->buf[xmit->tail]); - uart_xmit_advance(port, 1); + wr_reg(port, S3C2410_UTXH, ch); count--; } if (!count && dma_count) { - s3c24xx_serial_start_tx_dma(ourport, dma_count); + s3c24xx_serial_start_tx_dma(ourport, dma_count, tail); return; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) s3c24xx_serial_stop_tx(port); } @@ -1118,7 +1119,8 @@ static int s3c24xx_serial_request_dma(struct s3c24xx_uart_port *p) /* TX buffer */ dma->tx_addr = dma_map_single(dma->tx_chan->device->dev, - p->port.state->xmit.buf, UART_XMIT_SIZE, + p->port.state->port.xmit_buf, + UART_XMIT_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dma->tx_chan->device->dev, dma->tx_addr)) { reason = "DMA mapping error for TX buffer"; diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c index dbec29d9a6c3..b4e1b90e5960 100644 --- a/drivers/tty/serial/sb1250-duart.c +++ b/drivers/tty/serial/sb1250-duart.c @@ -382,7 +382,8 @@ static void sbd_receive_chars(struct sbd_port *sport) static void sbd_transmit_chars(struct sbd_port *sport) { struct uart_port *uport = &sport->port; - struct circ_buf *xmit = &sport->port.state->xmit; + struct tty_port *tport = &sport->port.state->port; + unsigned char ch; unsigned int mask; int stop_tx; @@ -395,19 +396,19 @@ static void sbd_transmit_chars(struct sbd_port *sport) } /* If nothing to do or stopped or hardware stopped. */ - stop_tx = (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)); + stop_tx = uart_tx_stopped(&sport->port) || + !uart_fifo_get(&sport->port, &ch); /* Send char. */ if (!stop_tx) { - write_sbdchn(sport, R_DUART_TX_HOLD, xmit->buf[xmit->tail]); - uart_xmit_advance(&sport->port, 1); + write_sbdchn(sport, R_DUART_TX_HOLD, ch); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); } /* Are we are done? */ - if (stop_tx || uart_circ_empty(xmit)) { + if (stop_tx || kfifo_is_empty(&tport->xmit_fifo)) { /* Disable tx interrupts. */ mask = read_sbdshr(sport, R_DUART_IMRREG((uport->line) % 2)); mask &= ~M_DUART_IMR_TX; diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index 929206a9a6e1..c6983b7bd78c 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c @@ -676,9 +676,9 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, static void sc16is7xx_handle_tx(struct uart_port *port) { struct sc16is7xx_port *s = dev_get_drvdata(port->dev); - struct circ_buf *xmit = &port->state->xmit; - unsigned int txlen, to_send, i; + struct tty_port *tport = &port->state->port; unsigned long flags; + unsigned int txlen; if (unlikely(port->x_char)) { sc16is7xx_port_write(port, SC16IS7XX_THR_REG, port->x_char); @@ -687,40 +687,30 @@ static void sc16is7xx_handle_tx(struct uart_port *port) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { uart_port_lock_irqsave(port, &flags); sc16is7xx_stop_tx(port); uart_port_unlock_irqrestore(port, flags); return; } - /* Get length of data pending in circular buffer */ - to_send = uart_circ_chars_pending(xmit); - if (likely(to_send)) { - /* Limit to space available in TX FIFO */ - txlen = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG); - if (txlen > SC16IS7XX_FIFO_SIZE) { - dev_err_ratelimited(port->dev, - "chip reports %d free bytes in TX fifo, but it only has %d", - txlen, SC16IS7XX_FIFO_SIZE); - txlen = 0; - } - to_send = (to_send > txlen) ? txlen : to_send; - - /* Convert to linear buffer */ - for (i = 0; i < to_send; ++i) { - s->buf[i] = xmit->buf[xmit->tail]; - uart_xmit_advance(port, 1); - } - - sc16is7xx_fifo_write(port, s->buf, to_send); + /* Limit to space available in TX FIFO */ + txlen = sc16is7xx_port_read(port, SC16IS7XX_TXLVL_REG); + if (txlen > SC16IS7XX_FIFO_SIZE) { + dev_err_ratelimited(port->dev, + "chip reports %d free bytes in TX fifo, but it only has %d", + txlen, SC16IS7XX_FIFO_SIZE); + txlen = 0; } + txlen = uart_fifo_out(port, s->buf, txlen); + sc16is7xx_fifo_write(port, s->buf, txlen); + uart_port_lock_irqsave(port, &flags); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) sc16is7xx_stop_tx(port); else sc16is7xx_ier_set(port, SC16IS7XX_IER_THRI_BIT); diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c index f24217a560d7..6d1d142fd216 100644 --- a/drivers/tty/serial/sccnxp.c +++ b/drivers/tty/serial/sccnxp.c @@ -439,7 +439,7 @@ static void sccnxp_handle_rx(struct uart_port *port) static void sccnxp_handle_tx(struct uart_port *port) { u8 sr; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct sccnxp_port *s = dev_get_drvdata(port->dev); if (unlikely(port->x_char)) { @@ -449,7 +449,7 @@ static void sccnxp_handle_tx(struct uart_port *port) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { /* Disable TX if FIFO is empty */ if (sccnxp_port_read(port, SCCNXP_SR_REG) & SR_TXEMT) { sccnxp_disable_irq(port, IMR_TXRDY); @@ -461,16 +461,20 @@ static void sccnxp_handle_tx(struct uart_port *port) return; } - while (!uart_circ_empty(xmit)) { + while (1) { + unsigned char ch; + sr = sccnxp_port_read(port, SCCNXP_SR_REG); if (!(sr & SR_TXRDY)) break; - sccnxp_port_write(port, SCCNXP_THR_REG, xmit->buf[xmit->tail]); - uart_xmit_advance(port, 1); + if (!uart_fifo_get(port, &ch)) + break; + + sccnxp_port_write(port, SCCNXP_THR_REG, ch); } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c index 525f3a2f7bd4..1183ca54ab92 100644 --- a/drivers/tty/serial/serial-tegra.c +++ b/drivers/tty/serial/serial-tegra.c @@ -484,18 +484,18 @@ static void tegra_uart_release_port(struct uart_port *u) static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes) { - struct circ_buf *xmit = &tup->uport.state->xmit; + unsigned char ch; int i; for (i = 0; i < max_bytes; i++) { - BUG_ON(uart_circ_empty(xmit)); if (tup->cdata->tx_fifo_full_status) { unsigned long lsr = tegra_uart_read(tup, UART_LSR); if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL)) break; } - tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX); - uart_xmit_advance(&tup->uport, 1); + if (WARN_ON_ONCE(!uart_fifo_get(&tup->uport, &ch))) + break; + tegra_uart_write(tup, ch, UART_TX); } } @@ -514,7 +514,7 @@ static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup, static void tegra_uart_tx_dma_complete(void *args) { struct tegra_uart_port *tup = args; - struct circ_buf *xmit = &tup->uport.state->xmit; + struct tty_port *tport = &tup->uport.state->port; struct dma_tx_state state; unsigned long flags; unsigned int count; @@ -525,7 +525,7 @@ static void tegra_uart_tx_dma_complete(void *args) uart_port_lock_irqsave(&tup->uport, &flags); uart_xmit_advance(&tup->uport, count); tup->tx_in_progress = 0; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); tegra_uart_start_next_tx(tup); uart_port_unlock_irqrestore(&tup->uport, flags); @@ -534,11 +534,14 @@ static void tegra_uart_tx_dma_complete(void *args) static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup, unsigned long count) { - struct circ_buf *xmit = &tup->uport.state->xmit; + struct tty_port *tport = &tup->uport.state->port; dma_addr_t tx_phys_addr; + unsigned int tail; tup->tx_bytes = count & ~(0xF); - tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail; + WARN_ON_ONCE(kfifo_out_linear(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE) < count); + tx_phys_addr = tup->tx_dma_buf_phys + tail; dma_sync_single_for_device(tup->uport.dev, tx_phys_addr, tup->tx_bytes, DMA_TO_DEVICE); @@ -562,18 +565,21 @@ static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup, static void tegra_uart_start_next_tx(struct tegra_uart_port *tup) { + struct tty_port *tport = &tup->uport.state->port; + unsigned char *tail_ptr; unsigned long tail; - unsigned long count; - struct circ_buf *xmit = &tup->uport.state->xmit; + unsigned int count; if (!tup->current_baud) return; - tail = (unsigned long)&xmit->buf[xmit->tail]; - count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + count = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail_ptr, + UART_XMIT_SIZE); if (!count) return; + tail = (unsigned long)tail_ptr; + if (tup->use_tx_pio || count < TEGRA_UART_MIN_DMA) tegra_uart_start_pio_tx(tup, count); else if (BYTES_TO_ALIGN(tail) > 0) @@ -586,9 +592,9 @@ static void tegra_uart_start_next_tx(struct tegra_uart_port *tup) static void tegra_uart_start_tx(struct uart_port *u) { struct tegra_uart_port *tup = to_tegra_uport(u); - struct circ_buf *xmit = &u->state->xmit; + struct tty_port *tport = &u->state->port; - if (!uart_circ_empty(xmit) && !tup->tx_in_progress) + if (!kfifo_is_empty(&tport->xmit_fifo) && !tup->tx_in_progress) tegra_uart_start_next_tx(tup); } @@ -628,11 +634,11 @@ static void tegra_uart_stop_tx(struct uart_port *u) static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup) { - struct circ_buf *xmit = &tup->uport.state->xmit; + struct tty_port *tport = &tup->uport.state->port; tegra_uart_fill_tx_fifo(tup, tup->tx_bytes); tup->tx_in_progress = 0; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&tup->uport); tegra_uart_start_next_tx(tup); } @@ -1169,15 +1175,14 @@ static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup, tup->rx_dma_buf_virt = dma_buf; tup->rx_dma_buf_phys = dma_phys; } else { + dma_buf = tup->uport.state->port.xmit_buf; dma_phys = dma_map_single(tup->uport.dev, - tup->uport.state->xmit.buf, UART_XMIT_SIZE, - DMA_TO_DEVICE); + dma_buf, UART_XMIT_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(tup->uport.dev, dma_phys)) { dev_err(tup->uport.dev, "dma_map_single tx failed\n"); dma_release_channel(dma_chan); return -ENOMEM; } - dma_buf = tup->uport.state->xmit.buf; dma_sconfig.dst_addr = tup->uport.mapbase; dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; dma_sconfig.dst_maxburst = 16; diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index ff85ebd3a007..3c0931fba1c6 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -272,9 +272,10 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, return -ENOMEM; uart_port_lock(state, flags); - if (!state->xmit.buf) { - state->xmit.buf = (unsigned char *) page; - uart_circ_clear(&state->xmit); + if (!state->port.xmit_buf) { + state->port.xmit_buf = (unsigned char *)page; + kfifo_init(&state->port.xmit_fifo, state->port.xmit_buf, + PAGE_SIZE); uart_port_unlock(uport, flags); } else { uart_port_unlock(uport, flags); @@ -387,8 +388,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) * can endup in printk() recursion. */ uart_port_lock(state, flags); - xmit_buf = state->xmit.buf; - state->xmit.buf = NULL; + xmit_buf = port->xmit_buf; + port->xmit_buf = NULL; + INIT_KFIFO(port->xmit_fifo); uart_port_unlock(uport, flags); free_page((unsigned long)xmit_buf); @@ -552,22 +554,17 @@ static int uart_put_char(struct tty_struct *tty, u8 c) { struct uart_state *state = tty->driver_data; struct uart_port *port; - struct circ_buf *circ; unsigned long flags; int ret = 0; - circ = &state->xmit; port = uart_port_lock(state, flags); - if (!circ->buf) { + if (WARN_ON_ONCE(!state->port.xmit_buf)) { uart_port_unlock(port, flags); return 0; } - if (port && uart_circ_chars_free(circ) != 0) { - circ->buf[circ->head] = c; - circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); - ret = 1; - } + if (port) + ret = kfifo_put(&state->port.xmit_fifo, c); uart_port_unlock(port, flags); return ret; } @@ -581,9 +578,8 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count) { struct uart_state *state = tty->driver_data; struct uart_port *port; - struct circ_buf *circ; unsigned long flags; - int c, ret = 0; + int ret = 0; /* * This means you called this function _after_ the port was @@ -593,24 +589,13 @@ static ssize_t uart_write(struct tty_struct *tty, const u8 *buf, size_t count) return -EL3HLT; port = uart_port_lock(state, flags); - circ = &state->xmit; - if (!circ->buf) { + if (WARN_ON_ONCE(!state->port.xmit_buf)) { uart_port_unlock(port, flags); return 0; } - while (port) { - c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE); - if (count < c) - c = count; - if (c <= 0) - break; - memcpy(circ->buf + circ->head, buf, c); - circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1); - buf += c; - count -= c; - ret += c; - } + if (port) + ret = kfifo_in(&state->port.xmit_fifo, buf, count); __uart_start(state); uart_port_unlock(port, flags); @@ -625,7 +610,7 @@ static unsigned int uart_write_room(struct tty_struct *tty) unsigned int ret; port = uart_port_lock(state, flags); - ret = uart_circ_chars_free(&state->xmit); + ret = kfifo_avail(&state->port.xmit_fifo); uart_port_unlock(port, flags); return ret; } @@ -638,7 +623,7 @@ static unsigned int uart_chars_in_buffer(struct tty_struct *tty) unsigned int ret; port = uart_port_lock(state, flags); - ret = uart_circ_chars_pending(&state->xmit); + ret = kfifo_len(&state->port.xmit_fifo); uart_port_unlock(port, flags); return ret; } @@ -661,7 +646,7 @@ static void uart_flush_buffer(struct tty_struct *tty) port = uart_port_lock(state, flags); if (!port) return; - uart_circ_clear(&state->xmit); + kfifo_reset(&state->port.xmit_fifo); if (port->ops->flush_buffer) port->ops->flush_buffer(port); uart_port_unlock(port, flags); @@ -1064,7 +1049,7 @@ static int uart_get_lsr_info(struct tty_struct *tty, * interrupt happens). */ if (uport->x_char || - ((uart_circ_chars_pending(&state->xmit) > 0) && + (!kfifo_is_empty(&state->port.xmit_fifo) && !uart_tx_stopped(uport))) result &= ~TIOCSER_TEMT; @@ -1788,8 +1773,9 @@ static void uart_tty_port_shutdown(struct tty_port *port) * Free the transmit buffer. */ uart_port_lock_irq(uport); - buf = state->xmit.buf; - state->xmit.buf = NULL; + buf = port->xmit_buf; + port->xmit_buf = NULL; + INIT_KFIFO(port->xmit_fifo); uart_port_unlock_irq(uport); free_page((unsigned long)buf); diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c index 22b9eeb23e68..3408c8827561 100644 --- a/drivers/tty/serial/serial_port.c +++ b/drivers/tty/serial/serial_port.c @@ -23,7 +23,7 @@ static int __serial_port_busy(struct uart_port *port) { return !uart_tx_stopped(port) && - uart_circ_chars_pending(&port->state->xmit); + !kfifo_is_empty(&port->state->port.xmit_fifo); } static int serial_port_runtime_resume(struct device *dev) diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index e512eaa57ed5..97031db26ae4 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -585,7 +585,7 @@ static void sci_start_tx(struct uart_port *port) sci_serial_out(port, SCSCR, new); } - if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && + if (s->chan_tx && !kfifo_is_empty(&port->state->port.xmit_fifo) && dma_submit_error(s->cookie_tx)) { if (s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) /* Switch irq from SCIF to DMA */ @@ -817,7 +817,7 @@ static int sci_rxfill(struct uart_port *port) static void sci_transmit_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; unsigned int stopped = uart_tx_stopped(port); unsigned short status; unsigned short ctrl; @@ -826,7 +826,7 @@ static void sci_transmit_chars(struct uart_port *port) status = sci_serial_in(port, SCxSR); if (!(status & SCxSR_TDxE(port))) { ctrl = sci_serial_in(port, SCSCR); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) ctrl &= ~SCSCR_TIE; else ctrl |= SCSCR_TIE; @@ -842,15 +842,14 @@ static void sci_transmit_chars(struct uart_port *port) if (port->x_char) { c = port->x_char; port->x_char = 0; - } else if (!uart_circ_empty(xmit) && !stopped) { - c = xmit->buf[xmit->tail]; - xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); - } else if (port->type == PORT_SCI && uart_circ_empty(xmit)) { - ctrl = sci_serial_in(port, SCSCR); - ctrl &= ~SCSCR_TE; - sci_serial_out(port, SCSCR, ctrl); - return; - } else { + } else if (stopped || !kfifo_get(&tport->xmit_fifo, &c)) { + if (port->type == PORT_SCI && + kfifo_is_empty(&tport->xmit_fifo)) { + ctrl = sci_serial_in(port, SCSCR); + ctrl &= ~SCSCR_TE; + sci_serial_out(port, SCSCR, ctrl); + return; + } break; } @@ -861,9 +860,9 @@ static void sci_transmit_chars(struct uart_port *port) sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port)); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { if (port->type == PORT_SCI) { ctrl = sci_serial_in(port, SCSCR); ctrl &= ~SCSCR_TIE; @@ -1199,7 +1198,7 @@ static void sci_dma_tx_complete(void *arg) { struct sci_port *s = arg; struct uart_port *port = &s->port; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; unsigned long flags; dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); @@ -1208,10 +1207,10 @@ static void sci_dma_tx_complete(void *arg) uart_xmit_advance(port, s->tx_dma_len); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (!uart_circ_empty(xmit)) { + if (!kfifo_is_empty(&tport->xmit_fifo)) { s->cookie_tx = 0; schedule_work(&s->work_tx); } else { @@ -1424,10 +1423,10 @@ static void sci_dma_tx_work_fn(struct work_struct *work) struct dma_async_tx_descriptor *desc; struct dma_chan *chan = s->chan_tx; struct uart_port *port = &s->port; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; unsigned long flags; + unsigned int tail; dma_addr_t buf; - int head, tail; /* * DMA is idle now. @@ -1437,10 +1436,9 @@ static void sci_dma_tx_work_fn(struct work_struct *work) * consistent xmit buffer state. */ uart_port_lock_irq(port); - head = xmit->head; - tail = xmit->tail; + s->tx_dma_len = kfifo_out_linear(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE); buf = s->tx_dma_addr + tail; - s->tx_dma_len = CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE); if (!s->tx_dma_len) { /* Transmit buffer has been flushed */ uart_port_unlock_irq(port); @@ -1469,8 +1467,8 @@ static void sci_dma_tx_work_fn(struct work_struct *work) } uart_port_unlock_irq(port); - dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", - __func__, xmit->buf, tail, head, s->cookie_tx); + dev_dbg(port->dev, "%s: %p: %u, cookie %d\n", + __func__, tport->xmit_buf, tail, s->cookie_tx); dma_async_issue_pending(chan); return; @@ -1585,6 +1583,7 @@ static struct dma_chan *sci_request_dma_chan(struct uart_port *port, static void sci_request_dma(struct uart_port *port) { struct sci_port *s = to_sci_port(port); + struct tty_port *tport = &port->state->port; struct dma_chan *chan; dev_dbg(port->dev, "%s: port %d\n", __func__, port->line); @@ -1613,7 +1612,7 @@ static void sci_request_dma(struct uart_port *port) if (chan) { /* UART circular tx buffer is an aligned page. */ s->tx_dma_addr = dma_map_single(chan->device->dev, - port->state->xmit.buf, + tport->xmit_buf, UART_XMIT_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) { @@ -1622,7 +1621,7 @@ static void sci_request_dma(struct uart_port *port) } else { dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n", __func__, UART_XMIT_SIZE, - port->state->xmit.buf, &s->tx_dma_addr); + tport->xmit_buf, &s->tx_dma_addr); INIT_WORK(&s->work_tx, sci_dma_tx_work_fn); s->chan_tx_saved = s->chan_tx = chan; diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c index 15f14fa593da..3fc54cc02a1f 100644 --- a/drivers/tty/serial/sprd_serial.c +++ b/drivers/tty/serial/sprd_serial.c @@ -227,13 +227,13 @@ static int sprd_tx_buf_remap(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + unsigned char *tail; - sp->tx_dma.trans_len = - CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + sp->tx_dma.trans_len = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE); - sp->tx_dma.phys_addr = dma_map_single(port->dev, - (void *)&(xmit->buf[xmit->tail]), + sp->tx_dma.phys_addr = dma_map_single(port->dev, tail, sp->tx_dma.trans_len, DMA_TO_DEVICE); return dma_mapping_error(port->dev, sp->tx_dma.phys_addr); @@ -244,7 +244,7 @@ static void sprd_complete_tx_dma(void *data) struct uart_port *port = (struct uart_port *)data; struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; unsigned long flags; uart_port_lock_irqsave(port, &flags); @@ -253,10 +253,10 @@ static void sprd_complete_tx_dma(void *data) uart_xmit_advance(port, sp->tx_dma.trans_len); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit) || sprd_tx_buf_remap(port) || + if (kfifo_is_empty(&tport->xmit_fifo) || sprd_tx_buf_remap(port) || sprd_tx_dma_config(port)) sp->tx_dma.trans_len = 0; @@ -319,7 +319,7 @@ static void sprd_start_tx_dma(struct uart_port *port) { struct sprd_uart_port *sp = container_of(port, struct sprd_uart_port, port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; if (port->x_char) { serial_out(port, SPRD_TXD, port->x_char); @@ -328,7 +328,7 @@ static void sprd_start_tx_dma(struct uart_port *port) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { sprd_stop_tx_dma(port); return; } diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index a23e59551848..f91753a40a69 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -387,9 +387,9 @@ static unsigned int asc_get_mctrl(struct uart_port *port) /* There are probably characters waiting to be transmitted. */ static void asc_start_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; - if (!uart_circ_empty(xmit)) + if (!kfifo_is_empty(&tport->xmit_fifo)) asc_enable_tx_interrupts(port); } diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c index 58d169e5c1db..8c66abcfe6ca 100644 --- a/drivers/tty/serial/stm32-usart.c +++ b/drivers/tty/serial/stm32-usart.c @@ -696,18 +696,23 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + + while (1) { + unsigned char ch; - while (!uart_circ_empty(xmit)) { /* Check that TDR is empty before filling FIFO */ if (!(readl_relaxed(port->membase + ofs->isr) & USART_SR_TXE)) break; - writel_relaxed(xmit->buf[xmit->tail], port->membase + ofs->tdr); - uart_xmit_advance(port, 1); + + if (!uart_fifo_get(port, &ch)) + break; + + writel_relaxed(ch, port->membase + ofs->tdr); } /* rely on TXE irq (mask or unmask) for sending remaining data */ - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) stm32_usart_tx_interrupt_disable(port); else stm32_usart_tx_interrupt_enable(port); @@ -716,7 +721,7 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port) static void stm32_usart_transmit_chars_dma(struct uart_port *port) { struct stm32_port *stm32port = to_stm32_port(port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; struct dma_async_tx_descriptor *desc = NULL; unsigned int count; int ret; @@ -728,25 +733,8 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port) return; } - count = uart_circ_chars_pending(xmit); - - if (count > TX_BUF_L) - count = TX_BUF_L; - - if (xmit->tail < xmit->head) { - memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], count); - } else { - size_t one = UART_XMIT_SIZE - xmit->tail; - size_t two; - - if (one > count) - one = count; - two = count - one; - - memcpy(&stm32port->tx_buf[0], &xmit->buf[xmit->tail], one); - if (two) - memcpy(&stm32port->tx_buf[one], &xmit->buf[0], two); - } + count = kfifo_out_peek(&tport->xmit_fifo, &stm32port->tx_buf[0], + TX_BUF_L); desc = dmaengine_prep_slave_single(stm32port->tx_ch, stm32port->tx_dma_buf, @@ -792,14 +780,14 @@ static void stm32_usart_transmit_chars(struct uart_port *port) { struct stm32_port *stm32_port = to_stm32_port(port); const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; u32 isr; int ret; if (!stm32_port->hw_flow_control && port->rs485.flags & SER_RS485_ENABLED && (port->x_char || - !(uart_circ_empty(xmit) || uart_tx_stopped(port)))) { + !(kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)))) { stm32_usart_tc_interrupt_disable(port); stm32_usart_rs485_rts_enable(port); } @@ -826,7 +814,7 @@ static void stm32_usart_transmit_chars(struct uart_port *port) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { stm32_usart_tx_interrupt_disable(port); return; } @@ -841,10 +829,10 @@ static void stm32_usart_transmit_chars(struct uart_port *port) else stm32_usart_transmit_chars_pio(port); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { stm32_usart_tx_interrupt_disable(port); if (!stm32_port->hw_flow_control && port->rs485.flags & SER_RS485_ENABLED) { @@ -967,9 +955,9 @@ static void stm32_usart_stop_tx(struct uart_port *port) /* There are probably characters waiting to be transmitted. */ static void stm32_usart_start_tx(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; - if (uart_circ_empty(xmit) && !port->x_char) { + if (kfifo_is_empty(&tport->xmit_fifo) && !port->x_char) { stm32_usart_rs485_rts_disable(port); return; } diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c index 8d612ab80680..7f60679fdde1 100644 --- a/drivers/tty/serial/sunhv.c +++ b/drivers/tty/serial/sunhv.c @@ -39,10 +39,13 @@ static char *con_read_page; static int hung_up = 0; -static void transmit_chars_putchar(struct uart_port *port, struct circ_buf *xmit) +static void transmit_chars_putchar(struct uart_port *port, + struct tty_port *tport) { - while (!uart_circ_empty(xmit)) { - long status = sun4v_con_putchar(xmit->buf[xmit->tail]); + unsigned char ch; + + while (kfifo_peek(&tport->xmit_fifo, &ch)) { + long status = sun4v_con_putchar(ch); if (status != HV_EOK) break; @@ -51,14 +54,16 @@ static void transmit_chars_putchar(struct uart_port *port, struct circ_buf *xmit } } -static void transmit_chars_write(struct uart_port *port, struct circ_buf *xmit) +static void transmit_chars_write(struct uart_port *port, struct tty_port *tport) { - while (!uart_circ_empty(xmit)) { - unsigned long ra = __pa(xmit->buf + xmit->tail); - unsigned long len, status, sent; + while (!kfifo_is_empty(&tport->xmit_fifo)) { + unsigned long len, ra, status, sent; + unsigned char *tail; + + len = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE); + ra = __pa(tail); - len = CIRC_CNT_TO_END(xmit->head, xmit->tail, - UART_XMIT_SIZE); status = sun4v_con_write(ra, len, &sent); if (status != HV_EOK) break; @@ -165,7 +170,7 @@ static int receive_chars_read(struct uart_port *port) } struct sunhv_ops { - void (*transmit_chars)(struct uart_port *port, struct circ_buf *xmit); + void (*transmit_chars)(struct uart_port *port, struct tty_port *tport); int (*receive_chars)(struct uart_port *port); }; @@ -196,18 +201,18 @@ static struct tty_port *receive_chars(struct uart_port *port) static void transmit_chars(struct uart_port *port) { - struct circ_buf *xmit; + struct tty_port *tport; if (!port->state) return; - xmit = &port->state->xmit; - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + tport = &port->state->port; + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) return; - sunhv_ops->transmit_chars(port, xmit); + sunhv_ops->transmit_chars(port, tport); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c index f5e29eb4a4ce..abf7c449308d 100644 --- a/drivers/tty/serial/sunplus-uart.c +++ b/drivers/tty/serial/sunplus-uart.c @@ -200,7 +200,7 @@ static void sunplus_break_ctl(struct uart_port *port, int ctl) static void transmit_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; if (port->x_char) { sp_uart_put_char(port, port->x_char); @@ -209,22 +209,24 @@ static void transmit_chars(struct uart_port *port) return; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { sunplus_stop_tx(port); return; } do { - sp_uart_put_char(port, xmit->buf[xmit->tail]); - uart_xmit_advance(port, 1); - if (uart_circ_empty(xmit)) + unsigned char ch; + + if (!uart_fifo_get(port, &ch)) break; + + sp_uart_put_char(port, ch); } while (sunplus_tx_buf_not_full(port)); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) sunplus_stop_tx(port); } diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c index 1ea2f33a07a7..1acbe2fba746 100644 --- a/drivers/tty/serial/sunsab.c +++ b/drivers/tty/serial/sunsab.c @@ -232,7 +232,7 @@ static void sunsab_tx_idle(struct uart_sunsab_port *); static void transmit_chars(struct uart_sunsab_port *up, union sab82532_irq_status *stat) { - struct circ_buf *xmit = &up->port.state->xmit; + struct tty_port *tport = &up->port.state->port; int i; if (stat->sreg.isr1 & SAB82532_ISR1_ALLS) { @@ -252,7 +252,7 @@ static void transmit_chars(struct uart_sunsab_port *up, set_bit(SAB82532_XPR, &up->irqflags); sunsab_tx_idle(up); - if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(&up->port)) { up->interrupt_mask1 |= SAB82532_IMR1_XPR; writeb(up->interrupt_mask1, &up->regs->w.imr1); return; @@ -265,21 +265,22 @@ static void transmit_chars(struct uart_sunsab_port *up, /* Stuff 32 bytes into Transmit FIFO. */ clear_bit(SAB82532_XPR, &up->irqflags); for (i = 0; i < up->port.fifosize; i++) { - writeb(xmit->buf[xmit->tail], - &up->regs->w.xfifo[i]); - uart_xmit_advance(&up->port, 1); - if (uart_circ_empty(xmit)) + unsigned char ch; + + if (!uart_fifo_get(&up->port, &ch)) break; + + writeb(ch, &up->regs->w.xfifo[i]); } /* Issue a Transmit Frame command. */ sunsab_cec_wait(up); writeb(SAB82532_CMDR_XF, &up->regs->w.cmdr); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&up->port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) sunsab_stop_tx(&up->port); } @@ -435,10 +436,10 @@ static void sunsab_start_tx(struct uart_port *port) { struct uart_sunsab_port *up = container_of(port, struct uart_sunsab_port, port); - struct circ_buf *xmit = &up->port.state->xmit; + struct tty_port *tport = &up->port.state->port; int i; - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) return; up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); @@ -451,11 +452,12 @@ static void sunsab_start_tx(struct uart_port *port) clear_bit(SAB82532_XPR, &up->irqflags); for (i = 0; i < up->port.fifosize; i++) { - writeb(xmit->buf[xmit->tail], - &up->regs->w.xfifo[i]); - uart_xmit_advance(&up->port, 1); - if (uart_circ_empty(xmit)) + unsigned char ch; + + if (!uart_fifo_get(&up->port, &ch)) break; + + writeb(ch, &up->regs->w.xfifo[i]); } /* Issue a Transmit Frame command. */ diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c index c8b65f4b2710..67a5fc70bb4b 100644 --- a/drivers/tty/serial/sunsu.c +++ b/drivers/tty/serial/sunsu.c @@ -396,7 +396,8 @@ receive_chars(struct uart_sunsu_port *up, unsigned char *status) static void transmit_chars(struct uart_sunsu_port *up) { - struct circ_buf *xmit = &up->port.state->xmit; + struct tty_port *tport = &up->port.state->port; + unsigned char ch; int count; if (up->port.x_char) { @@ -409,23 +410,23 @@ static void transmit_chars(struct uart_sunsu_port *up) sunsu_stop_tx(&up->port); return; } - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { __stop_tx(up); return; } count = up->port.fifosize; do { - serial_out(up, UART_TX, xmit->buf[xmit->tail]); - uart_xmit_advance(&up->port, 1); - if (uart_circ_empty(xmit)) + if (!uart_fifo_get(&up->port, &ch)) break; + + serial_out(up, UART_TX, ch); } while (--count > 0); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&up->port); - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) __stop_tx(up); } diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c index c99289c6c8f8..71758ad4241c 100644 --- a/drivers/tty/serial/sunzilog.c +++ b/drivers/tty/serial/sunzilog.c @@ -453,7 +453,8 @@ static void sunzilog_status_handle(struct uart_sunzilog_port *up, static void sunzilog_transmit_chars(struct uart_sunzilog_port *up, struct zilog_channel __iomem *channel) { - struct circ_buf *xmit; + struct tty_port *tport; + unsigned char ch; if (ZS_IS_CONS(up)) { unsigned char status = readb(&channel->control); @@ -496,21 +497,20 @@ static void sunzilog_transmit_chars(struct uart_sunzilog_port *up, if (up->port.state == NULL) goto ack_tx_int; - xmit = &up->port.state->xmit; - if (uart_circ_empty(xmit)) - goto ack_tx_int; + tport = &up->port.state->port; if (uart_tx_stopped(&up->port)) goto ack_tx_int; + if (!uart_fifo_get(&up->port, &ch)) + goto ack_tx_int; + up->flags |= SUNZILOG_FLAG_TX_ACTIVE; - writeb(xmit->buf[xmit->tail], &channel->data); + writeb(ch, &channel->data); ZSDELAY(); ZS_WSYNC(channel); - uart_xmit_advance(&up->port, 1); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&up->port); return; @@ -700,17 +700,16 @@ static void sunzilog_start_tx(struct uart_port *port) port->icount.tx++; port->x_char = 0; } else { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + unsigned char ch; - if (uart_circ_empty(xmit)) + if (!uart_fifo_get(&up->port, &ch)) return; - writeb(xmit->buf[xmit->tail], &channel->data); + writeb(ch, &channel->data); ZSDELAY(); ZS_WSYNC(channel); - uart_xmit_advance(port, 1); - - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&up->port); } } diff --git a/drivers/tty/serial/tegra-tcu.c b/drivers/tty/serial/tegra-tcu.c index d9c78320eb02..21ca5fcadf49 100644 --- a/drivers/tty/serial/tegra-tcu.c +++ b/drivers/tty/serial/tegra-tcu.c @@ -91,15 +91,17 @@ static void tegra_tcu_write(struct tegra_tcu *tcu, const char *s, static void tegra_tcu_uart_start_tx(struct uart_port *port) { struct tegra_tcu *tcu = port->private_data; - struct circ_buf *xmit = &port->state->xmit; - unsigned long count; + struct tty_port *tport = &port->state->port; + unsigned char *tail; + unsigned int count; for (;;) { - count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); + count = kfifo_out_linear_ptr(&tport->xmit_fifo, &tail, + UART_XMIT_SIZE); if (!count) break; - tegra_tcu_write(tcu, &xmit->buf[xmit->tail], count); + tegra_tcu_write(tcu, tail, count); uart_xmit_advance(port, count); } diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c index 4bc89a9b380a..43fa0938b5e3 100644 --- a/drivers/tty/serial/timbuart.c +++ b/drivers/tty/serial/timbuart.c @@ -95,14 +95,11 @@ static void timbuart_rx_chars(struct uart_port *port) static void timbuart_tx_chars(struct uart_port *port) { - struct circ_buf *xmit = &port->state->xmit; + unsigned char ch; while (!(ioread32(port->membase + TIMBUART_ISR) & TXBF) && - !uart_circ_empty(xmit)) { - iowrite8(xmit->buf[xmit->tail], - port->membase + TIMBUART_TXFIFO); - uart_xmit_advance(port, 1); - } + uart_fifo_get(port, &ch)) + iowrite8(ch, port->membase + TIMBUART_TXFIFO); dev_dbg(port->dev, "%s - total written %d bytes, CTL: %x, RTS: %x, baud: %x\n", @@ -117,9 +114,9 @@ static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier) { struct timbuart_port *uart = container_of(port, struct timbuart_port, port); - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) return; if (port->x_char) @@ -130,7 +127,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier) /* clear all TX interrupts */ iowrite32(TXFLAGS, port->membase + TIMBUART_ISR); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); } else /* Re-enable any tx interrupt */ @@ -141,7 +138,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier) * we wake up the upper layer later when we got the interrupt * to give it some time to go out... */ - if (!uart_circ_empty(xmit)) + if (!kfifo_is_empty(&tport->xmit_fifo)) *ier |= TXBAE; dev_dbg(port->dev, "%s - leaving\n", __func__); diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c index 10ba41b7be99..68357ac8ffe3 100644 --- a/drivers/tty/serial/uartlite.c +++ b/drivers/tty/serial/uartlite.c @@ -189,7 +189,8 @@ static int ulite_receive(struct uart_port *port, int stat) static int ulite_transmit(struct uart_port *port, int stat) { - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; + unsigned char ch; if (stat & ULITE_STATUS_TXFULL) return 0; @@ -201,14 +202,16 @@ static int ulite_transmit(struct uart_port *port, int stat) return 1; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + if (uart_tx_stopped(port)) + return 0; + + if (!uart_fifo_get(port, &ch)) return 0; - uart_out32(xmit->buf[xmit->tail], ULITE_TX, port); - uart_xmit_advance(port, 1); + uart_out32(ch, ULITE_TX, port); /* wake up */ - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); return 1; diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c index 397b95dff7ed..53bb8c5ef499 100644 --- a/drivers/tty/serial/ucc_uart.c +++ b/drivers/tty/serial/ucc_uart.c @@ -334,7 +334,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) unsigned char *p; unsigned int count; struct uart_port *port = &qe_port->port; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; /* Handle xon/xoff */ if (port->x_char) { @@ -358,7 +358,7 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) return 1; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { qe_uart_stop_tx(port); return 0; } @@ -366,16 +366,10 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) /* Pick next descriptor and fill from buffer */ bdp = qe_port->tx_cur; - while (!(ioread16be(&bdp->status) & BD_SC_READY) && !uart_circ_empty(xmit)) { - count = 0; + while (!(ioread16be(&bdp->status) & BD_SC_READY) && + !kfifo_is_empty(&tport->xmit_fifo)) { p = qe2cpu_addr(ioread32be(&bdp->buf), qe_port); - while (count < qe_port->tx_fifosize) { - *p++ = xmit->buf[xmit->tail]; - uart_xmit_advance(port, 1); - count++; - if (uart_circ_empty(xmit)) - break; - } + count = uart_fifo_out(port, p, qe_port->tx_fifosize); iowrite16be(count, &bdp->length); qe_setbits_be16(&bdp->status, BD_SC_READY); @@ -388,10 +382,10 @@ static int qe_uart_tx_pump(struct uart_qe_port *qe_port) } qe_port->tx_cur = bdp; - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) { + if (kfifo_is_empty(&tport->xmit_fifo)) { /* The kernel buffer is empty, so turn off TX interrupts. We don't need to be told when the QE is finished transmitting the data. */ diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index 5f48ec37cb25..de3487206bcb 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c @@ -425,32 +425,32 @@ static void cdns_uart_handle_tx(void *dev_id) { struct uart_port *port = (struct uart_port *)dev_id; struct cdns_uart *cdns_uart = port->private_data; - struct circ_buf *xmit = &port->state->xmit; + struct tty_port *tport = &port->state->port; unsigned int numbytes; + unsigned char ch; - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { + if (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port)) { /* Disable the TX Empty interrupt */ writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR); return; } numbytes = port->fifosize; - while (numbytes && !uart_circ_empty(xmit) && - !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) { - - writel(xmit->buf[xmit->tail], port->membase + CDNS_UART_FIFO); - uart_xmit_advance(port, 1); + while (numbytes && + !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL) && + uart_fifo_get(port, &ch)) { + writel(ch, port->membase + CDNS_UART_FIFO); numbytes--; } - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(port); /* Enable the TX Empty interrupt */ writel(CDNS_UART_IXR_TXEMPTY, cdns_uart->port->membase + CDNS_UART_IER); if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED && - (uart_circ_empty(xmit) || uart_tx_stopped(port))) { + (kfifo_is_empty(&tport->xmit_fifo) || uart_tx_stopped(port))) { cdns_uart->tx_timer.function = &cdns_rs485_rx_callback; hrtimer_start(&cdns_uart->tx_timer, ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL); @@ -723,7 +723,7 @@ static void cdns_uart_start_tx(struct uart_port *port) status |= CDNS_UART_CR_TX_EN; writel(status, port->membase + CDNS_UART_CR); - if (uart_circ_empty(&port->state->xmit)) + if (kfifo_is_empty(&port->state->port.xmit_fifo)) return; /* Clear the TX Empty interrupt */ diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c index 65ca4da6e368..79ea7108a0f3 100644 --- a/drivers/tty/serial/zs.c +++ b/drivers/tty/serial/zs.c @@ -606,7 +606,8 @@ static void zs_receive_chars(struct zs_port *zport) static void zs_raw_transmit_chars(struct zs_port *zport) { - struct circ_buf *xmit = &zport->port.state->xmit; + struct tty_port *tport = &zport->port.state->port; + unsigned char ch; /* XON/XOFF chars. */ if (zport->port.x_char) { @@ -617,20 +618,20 @@ static void zs_raw_transmit_chars(struct zs_port *zport) } /* If nothing to do or stopped or hardware stopped. */ - if (uart_circ_empty(xmit) || uart_tx_stopped(&zport->port)) { + if (uart_tx_stopped(&zport->port) || + !uart_fifo_get(&zport->port, &ch)) { zs_raw_stop_tx(zport); return; } /* Send char. */ - write_zsdata(zport, xmit->buf[xmit->tail]); - uart_xmit_advance(&zport->port, 1); + write_zsdata(zport, ch); - if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&zport->port); /* Are we are done? */ - if (uart_circ_empty(xmit)) + if (kfifo_is_empty(&tport->xmit_fifo)) zs_raw_stop_tx(zport); } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 0a0f6e21d40e..8cb65f50e830 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -699,7 +698,6 @@ struct uart_state { struct tty_port port; enum uart_pm_state pm_state; - struct circ_buf xmit; atomic_t refcount; wait_queue_head_t remove_wait; @@ -723,12 +721,35 @@ struct uart_state { */ static inline void uart_xmit_advance(struct uart_port *up, unsigned int chars) { - struct circ_buf *xmit = &up->state->xmit; + struct tty_port *tport = &up->state->port; - xmit->tail = (xmit->tail + chars) & (UART_XMIT_SIZE - 1); + kfifo_skip_count(&tport->xmit_fifo, chars); up->icount.tx += chars; } +static inline unsigned int uart_fifo_out(struct uart_port *up, + unsigned char *buf, unsigned int chars) +{ + struct tty_port *tport = &up->state->port; + + chars = kfifo_out(&tport->xmit_fifo, buf, chars); + up->icount.tx += chars; + + return chars; +} + +static inline unsigned int uart_fifo_get(struct uart_port *up, + unsigned char *ch) +{ + struct tty_port *tport = &up->state->port; + unsigned int chars; + + chars = kfifo_get(&tport->xmit_fifo, ch); + up->icount.tx += chars; + + return chars; +} + struct module; struct tty_driver; @@ -764,7 +785,7 @@ enum UART_TX_FLAGS { for_test, for_post) \ ({ \ struct uart_port *__port = (uport); \ - struct circ_buf *xmit = &__port->state->xmit; \ + struct tty_port *__tport = &__port->state->port; \ unsigned int pending; \ \ for (; (for_test) && (tx_ready); (for_post), __port->icount.tx++) { \ @@ -775,17 +796,18 @@ enum UART_TX_FLAGS { continue; \ } \ \ - if (uart_circ_empty(xmit) || uart_tx_stopped(__port)) \ + if (uart_tx_stopped(__port)) \ + break; \ + \ + if (!kfifo_get(&__tport->xmit_fifo, &(ch))) \ break; \ \ - (ch) = xmit->buf[xmit->tail]; \ (put_char); \ - xmit->tail = (xmit->tail + 1) % UART_XMIT_SIZE; \ } \ \ (tx_done); \ \ - pending = uart_circ_chars_pending(xmit); \ + pending = kfifo_len(&__tport->xmit_fifo); \ if (pending < WAKEUP_CHARS) { \ uart_write_wakeup(__port); \ \ @@ -974,15 +996,6 @@ bool uart_match_port(const struct uart_port *port1, int uart_suspend_port(struct uart_driver *reg, struct uart_port *port); int uart_resume_port(struct uart_driver *reg, struct uart_port *port); -#define uart_circ_empty(circ) ((circ)->head == (circ)->tail) -#define uart_circ_clear(circ) ((circ)->head = (circ)->tail = 0) - -#define uart_circ_chars_pending(circ) \ - (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE)) - -#define uart_circ_chars_free(circ) \ - (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE)) - static inline int uart_tx_stopped(struct uart_port *port) { struct tty_struct *tty = port->state->port.tty; -- cgit From f03e8c1060f86c23eb49bafee99d9fcbd1c1bd77 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 27 Mar 2024 12:59:35 +0200 Subject: printk: Save console options for add_preferred_console_match() Driver subsystems may need to translate the preferred console name to the character device name used. We already do some of this in console_setup() with a few hardcoded names, but that does not scale well. The console options are parsed early in console_setup(), and the consoles are added with __add_preferred_console(). At this point we don't know much about the character device names and device drivers getting probed. To allow driver subsystems to set up a preferred console, let's save the kernel command line console options. To add a preferred console from a driver subsystem with optional character device name translation, let's add a new function add_preferred_console_match(). This allows the serial core layer to support console=DEVNAME:0.0 style hardware based addressing in addition to the current console=ttyS0 style naming. And we can start moving console_setup() character device parsing to the driver subsystem specific code. We use a separate array from the console_cmdline array as the character device name and index may be unknown at the console_setup() time. And eventually there's no need to call __add_preferred_console() until the subsystem is ready to handle the console. Adding the console name in addition to the character device name, and a flag for an added console, could be added to the struct console_cmdline. And the console_cmdline array handling could be modified accordingly. But that complicates things compared saving the console options, and then adding the consoles when the subsystems handling the consoles are ready. Co-developed-by: Andy Shevchenko Signed-off-by: Tony Lindgren Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240327110021.59793-2-tony@atomide.com Signed-off-by: Greg Kroah-Hartman --- include/linux/printk.h | 3 + kernel/printk/Makefile | 2 +- kernel/printk/conopt.c | 146 ++++++++++++++++++++++++++++++++++++++++ kernel/printk/console_cmdline.h | 6 ++ kernel/printk/printk.c | 14 +++- 5 files changed, 167 insertions(+), 4 deletions(-) create mode 100644 kernel/printk/conopt.c (limited to 'include/linux') diff --git a/include/linux/printk.h b/include/linux/printk.h index 955e31860095..5e038e782256 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -60,6 +60,9 @@ static inline const char *printk_skip_headers(const char *buffer) #define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT #define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET +int add_preferred_console_match(const char *match, const char *name, + const short idx); + extern int console_printk[]; #define console_loglevel (console_printk[0]) diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile index 39a2b61c7232..040fe7d1eda2 100644 --- a/kernel/printk/Makefile +++ b/kernel/printk/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y = printk.o +obj-y = printk.o conopt.o obj-$(CONFIG_PRINTK) += printk_safe.o nbcon.o obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o obj-$(CONFIG_PRINTK_INDEX) += index.o diff --git a/kernel/printk/conopt.c b/kernel/printk/conopt.c new file mode 100644 index 000000000000..9d507bac3657 --- /dev/null +++ b/kernel/printk/conopt.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kernel command line console options for hardware based addressing + * + * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ + * Author: Tony Lindgren + */ + +#include +#include +#include +#include + +#include + +#include "console_cmdline.h" + +/* + * Allow longer DEVNAME:0.0 style console naming such as abcd0000.serial:0.0 + * in addition to the legacy ttyS0 style naming. + */ +#define CONSOLE_NAME_MAX 32 + +#define CONSOLE_OPT_MAX 16 +#define CONSOLE_BRL_OPT_MAX 16 + +struct console_option { + char name[CONSOLE_NAME_MAX]; + char opt[CONSOLE_OPT_MAX]; + char brl_opt[CONSOLE_BRL_OPT_MAX]; + u8 has_brl_opt:1; +}; + +/* Updated only at console_setup() time, no locking needed */ +static struct console_option conopt[MAX_CMDLINECONSOLES]; + +/** + * console_opt_save - Saves kernel command line console option for driver use + * @str: Kernel command line console name and option + * @brl_opt: Braille console options + * + * Saves a kernel command line console option for driver subsystems to use for + * adding a preferred console during init. Called from console_setup() only. + * + * Return: 0 on success, negative error code on failure. + */ +int __init console_opt_save(const char *str, const char *brl_opt) +{ + struct console_option *con; + size_t namelen, optlen; + const char *opt; + int i; + + namelen = strcspn(str, ","); + if (namelen == 0 || namelen >= CONSOLE_NAME_MAX) + return -EINVAL; + + opt = str + namelen; + if (*opt == ',') + opt++; + + optlen = strlen(opt); + if (optlen >= CONSOLE_OPT_MAX) + return -EINVAL; + + for (i = 0; i < MAX_CMDLINECONSOLES; i++) { + con = &conopt[i]; + + if (con->name[0]) { + if (!strncmp(str, con->name, namelen)) + return 0; + continue; + } + + /* + * The name isn't terminated, only opt is. Empty opt is fine, + * but brl_opt can be either empty or NULL. For more info, see + * _braille_console_setup(). + */ + strscpy(con->name, str, namelen + 1); + strscpy(con->opt, opt, CONSOLE_OPT_MAX); + if (brl_opt) { + strscpy(con->brl_opt, brl_opt, CONSOLE_BRL_OPT_MAX); + con->has_brl_opt = 1; + } + + return 0; + } + + return -ENOMEM; +} + +static struct console_option *console_opt_find(const char *name) +{ + struct console_option *con; + int i; + + for (i = 0; i < MAX_CMDLINECONSOLES; i++) { + con = &conopt[i]; + if (!strcmp(name, con->name)) + return con; + } + + return NULL; +} + +/** + * add_preferred_console_match - Adds a preferred console if a match is found + * @match: Expected console on kernel command line, such as console=DEVNAME:0.0 + * @name: Name of the console character device to add such as ttyS + * @idx: Index for the console + * + * Allows driver subsystems to add a console after translating the command + * line name to the character device name used for the console. Options are + * added automatically based on the kernel command line. Duplicate preferred + * consoles are ignored by __add_preferred_console(). + * + * Return: 0 on success, negative error code on failure. + */ +int add_preferred_console_match(const char *match, const char *name, + const short idx) +{ + struct console_option *con; + char *brl_opt = NULL; + + if (!match || !strlen(match) || !name || !strlen(name) || + idx < 0) + return -EINVAL; + + con = console_opt_find(match); + if (!con) + return -ENOENT; + + /* + * See __add_preferred_console(). It checks for NULL brl_options to set + * the preferred_console flag. Empty brl_opt instead of NULL leads into + * the preferred_console flag not set, and CON_CONSDEV not being set, + * and the boot console won't get disabled at the end of console_setup(). + */ + if (con->has_brl_opt) + brl_opt = con->brl_opt; + + console_opt_add_preferred_console(name, idx, con->opt, brl_opt); + + return 0; +} diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h index 3ca74ad391d6..a125e0235589 100644 --- a/kernel/printk/console_cmdline.h +++ b/kernel/printk/console_cmdline.h @@ -2,6 +2,12 @@ #ifndef _CONSOLE_CMDLINE_H #define _CONSOLE_CMDLINE_H +#define MAX_CMDLINECONSOLES 8 + +int console_opt_save(const char *str, const char *brl_opt); +int console_opt_add_preferred_console(const char *name, const short idx, + char *options, char *brl_options); + struct console_cmdline { char name[16]; /* Name of the driver */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index adf99c05adca..05919491c5a2 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -383,9 +383,6 @@ static int console_locked; /* * Array of consoles built from command line options (console=) */ - -#define MAX_CMDLINECONSOLES 8 - static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; static int preferred_console = -1; @@ -2503,6 +2500,10 @@ static int __init console_setup(char *str) if (_braille_console_setup(&str, &brl_options)) return 1; + /* Save the console for driver subsystem use */ + if (console_opt_save(str, brl_options)) + return 1; + /* * Decode str into name, index, options. */ @@ -2533,6 +2534,13 @@ static int __init console_setup(char *str) } __setup("console=", console_setup); +/* Only called from add_preferred_console_match() */ +int console_opt_add_preferred_console(const char *name, const short idx, + char *options, char *brl_options) +{ + return __add_preferred_console(name, idx, options, brl_options, true); +} + /** * add_preferred_console - add a device to the list of preferred consoles. * @name: device name -- cgit From 0487724912abc14eb0e95c352a29e6691a733631 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 2 Apr 2024 22:50:31 +0300 Subject: serial: max3100: Make struct plat_max3100 local There is no user of the struct plat_max3100 outside the driver. Inline its contents into the driver. While at it, drop outdated example in the comment. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240402195306.269276-5-andriy.shevchenko@linux.intel.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/max3100.c | 38 ++++++++++++++++----------------- include/linux/serial_max3100.h | 48 ------------------------------------------ 2 files changed, 18 insertions(+), 68 deletions(-) delete mode 100644 include/linux/serial_max3100.h (limited to 'include/linux') diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index 8c6eaf908bce..0e30201e9689 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * * Copyright (C) 2008 Christian Pellegrin * * Notes: the MAX3100 doesn't provide an interrupt on CTS so we have @@ -8,24 +7,6 @@ * writing conf clears FIFO buffer and we cannot have this interrupt * always asking us for attention. * - * Example platform data: - - static struct plat_max3100 max3100_plat_data = { - .loopback = 0, - .crystal = 0, - .poll_time = 100, - }; - - static struct spi_board_info spi_board_info[] = { - { - .modalias = "max3100", - .platform_data = &max3100_plat_data, - .irq = IRQ_EINT12, - .max_speed_hz = 5*1000*1000, - .chip_select = 0, - }, - }; - * The initial minor number is 209 in the low-density serial port: * mknod /dev/ttyMAX0 c 204 209 */ @@ -49,7 +30,24 @@ #include -#include +/** + * struct plat_max3100 - MAX3100 SPI UART platform data + * @loopback: force MAX3100 in loopback + * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 + * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook + * called on suspend and resume to activate it. + * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw + * flow ctrl is possible but you have less CPU usage) + * + * You should use this structure in your machine description to specify + * how the MAX3100 is connected. + */ +struct plat_max3100 { + int loopback; + int crystal; + void (*max3100_hw_suspend) (int suspend); + int poll_time; +}; #define MAX3100_C (1<<14) #define MAX3100_D (0<<14) diff --git a/include/linux/serial_max3100.h b/include/linux/serial_max3100.h deleted file mode 100644 index befd55c08a7c..000000000000 --- a/include/linux/serial_max3100.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * - * Copyright (C) 2007 Christian Pellegrin - */ - - -#ifndef _LINUX_SERIAL_MAX3100_H -#define _LINUX_SERIAL_MAX3100_H 1 - - -/** - * struct plat_max3100 - MAX3100 SPI UART platform data - * @loopback: force MAX3100 in loopback - * @crystal: 1 for 3.6864 Mhz, 0 for 1.8432 - * @max3100_hw_suspend: MAX3100 has a shutdown pin. This is a hook - * called on suspend and resume to activate it. - * @poll_time: poll time for CTS signal in ms, 0 disables (so no hw - * flow ctrl is possible but you have less CPU usage) - * - * You should use this structure in your machine description to specify - * how the MAX3100 is connected. Example: - * - * static struct plat_max3100 max3100_plat_data = { - * .loopback = 0, - * .crystal = 0, - * .poll_time = 100, - * }; - * - * static struct spi_board_info spi_board_info[] = { - * { - * .modalias = "max3100", - * .platform_data = &max3100_plat_data, - * .irq = IRQ_EINT12, - * .max_speed_hz = 5*1000*1000, - * .chip_select = 0, - * }, - * }; - * - **/ -struct plat_max3100 { - int loopback; - int crystal; - void (*max3100_hw_suspend) (int suspend); - int poll_time; -}; - -#endif -- cgit From 897fa2c38c076c801bd1f1238af0af927e339c8f Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 3 Apr 2024 23:59:41 +0800 Subject: cpumask: add cpumask_any_and_but() In some cases, it's useful to be able to select a random cpu from the intersection of two masks, excluding a particular CPU. For example, in some systems an uncore PMU is shared by a subset of CPUs, and management of this PMU is assigned to some arbitrary CPU in this set. Whenever the management CPU is hotplugged out, we wish to migrate responsibility to another arbitrary CPU which is both in this set and online. Today we can use cpumask_any_and() to select an arbitrary CPU in the intersection of two masks. We can also use cpumask_any_but() to select any arbitrary cpu in a mask excluding, a particular CPU. To do both, we either need to use a temporary cpumask, which is wasteful, or use some lower-level cpumask helpers, which can be unclear. This patch adds a new cpumask_any_and_but() to cater for these cases. Signed-off-by: Mark Rutland Cc: Thomas Gleixner Cc: Andrew Morton Cc: Peter Zijlstra Cc: Rusty Russell Cc: linux-kernel@vger.kernel.org Signed-off-by: Dawei Li Acked-by: Yury Norov Link: https://lore.kernel.org/r/20240403155950.2068109-2-dawei.li@shingroup.cn Signed-off-by: Will Deacon --- include/linux/cpumask.h | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1c29947db848..121f3ac757ff 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -388,6 +388,29 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) return i; } +/** + * cpumask_any_and_but - pick a "random" cpu from *mask1 & *mask2, but not this one. + * @mask1: the first input cpumask + * @mask2: the second input cpumask + * @cpu: the cpu to ignore + * + * Returns >= nr_cpu_ids if no cpus set. + */ +static inline +unsigned int cpumask_any_and_but(const struct cpumask *mask1, + const struct cpumask *mask2, + unsigned int cpu) +{ + unsigned int i; + + cpumask_check(cpu); + i = cpumask_first_and(mask1, mask2); + if (i != cpu) + return i; + + return cpumask_next_and(cpu, mask1, mask2); +} + /** * cpumask_nth - get the Nth cpu in a cpumask * @srcp: the cpumask pointer -- cgit From 904fb8f843a99ae7473d184412b6c7bd46a51593 Mon Sep 17 00:00:00 2001 From: Bastien Curutchet Date: Tue, 2 Apr 2024 09:12:03 +0200 Subject: ASoC: ti: davinci-i2s: Remove the unused clk_input_pin attribute The clk_input_pin attribute of davinci_mcbsp_dev struct is not set since commit 257ade78b601 ("ASoC: davinci-i2s: Convert to use edma-pcm"). Remove the attribute. Keep the behaviour of the MCBSP_CLKR case as MCBSP_CLKR == 0. I can't test the BC_FP format so I added back the initial comment that was removed by commit ec6375533748 ("ASoC: DaVinci: Added selection of clk input pin for McBSP"). This was the last dependency to linux/platform_data/davinci_asp.h so it is not included anymore. Remove the enum mcbsp_clk_input_pin from davinci_asp.h as it is not used anywhere else. Signed-off-by: Bastien Curutchet Acked-by: Peter Ujfalusi Link: https://msgid.link/r/20240402071213.11671-4-bastien.curutchet@bootlin.com Signed-off-by: Mark Brown --- include/linux/platform_data/davinci_asp.h | 15 --------------- sound/soc/ti/davinci-i2s.c | 24 ++++-------------------- 2 files changed, 4 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index c8645b2ed3c0..b9c8520b4bd3 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -25,16 +25,6 @@ struct davinci_mcasp_pdata { unsigned sram_size_capture; struct gen_pool *sram_pool; - /* - * If McBSP peripheral gets the clock from an external pin, - * there are three chooses, that are MCBSP_CLKX, MCBSP_CLKR - * and MCBSP_CLKS. - * Depending on different hardware connections it is possible - * to use this setting to change the behaviour of McBSP - * driver. - */ - int clk_input_pin; - /* * This flag works when both clock and FS are outputs for the cpu * and makes clock more accurate (FS is not symmetrical and the @@ -91,11 +81,6 @@ enum { MCASP_VERSION_OMAP, /* OMAP4/5 */ }; -enum mcbsp_clk_input_pin { - MCBSP_CLKR = 0, /* as in DM365 */ - MCBSP_CLKS, -}; - #define INACTIVE_MODE 0 #define TX_MODE 1 #define RX_MODE 2 diff --git a/sound/soc/ti/davinci-i2s.c b/sound/soc/ti/davinci-i2s.c index 07c8b2259208..5c906641640e 100644 --- a/sound/soc/ti/davinci-i2s.c +++ b/sound/soc/ti/davinci-i2s.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include @@ -159,7 +158,6 @@ struct davinci_mcbsp_dev { unsigned int fmt; int clk_div; - int clk_input_pin; bool i2s_accurate_sck; }; @@ -239,26 +237,12 @@ static int davinci_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai, DAVINCI_MCBSP_PCR_CLKRM; break; case SND_SOC_DAIFMT_BC_FP: - pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM; /* - * Selection of the clock input pin that is the - * input for the Sample Rate Generator. - * McBSP FSR and FSX are driven by the Sample Rate - * Generator. + * McBSP CLKR pin is the input for the Sample Rate Generator. + * McBSP FSR and FSX are driven by the Sample Rate Generator. */ - switch (dev->clk_input_pin) { - case MCBSP_CLKS: - pcr |= DAVINCI_MCBSP_PCR_CLKXM | - DAVINCI_MCBSP_PCR_CLKRM; - break; - case MCBSP_CLKR: - pcr |= DAVINCI_MCBSP_PCR_SCLKME; - break; - default: - dev_err(dev->dev, "bad clk_input_pin\n"); - return -EINVAL; - } - + pcr = DAVINCI_MCBSP_PCR_FSRM | DAVINCI_MCBSP_PCR_FSXM; + pcr |= DAVINCI_MCBSP_PCR_SCLKME; break; case SND_SOC_DAIFMT_BC_FC: /* codec is master */ -- cgit From d503a04f8bc0c75dc9db9452d8cc79d748afb752 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 5 Apr 2024 16:11:33 -0700 Subject: bpf: Add support for certain atomics in bpf_arena to x86 JIT Support atomics in bpf_arena that can be JITed as a single x86 instruction. Instructions that are JITed as loops are not supported at the moment, since they require more complex extable and loop logic. JITs can choose to do smarter things with bpf_jit_supports_insn(). Like arm64 may decide to support all bpf atomics instructions when emit_lse_atomic is available and none in ll_sc mode. bpf_jit_supports_percpu_insn(), bpf_jit_supports_ptr_xchg() and other such callbacks can be replaced with bpf_jit_supports_insn() in the future. Signed-off-by: Alexei Starovoitov Acked-by: Eduard Zingerman Link: https://lore.kernel.org/r/20240405231134.17274-1-alexei.starovoitov@gmail.com Signed-off-by: Martin KaFai Lau --- arch/x86/net/bpf_jit_comp.c | 72 +++++++++++++++++++++++++++++++++++++++++++++ include/linux/filter.h | 4 +++ kernel/bpf/core.c | 5 ++++ kernel/bpf/verifier.c | 19 +++++++++++- 4 files changed, 99 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 6cf9a5697c09..2b5a475c4dd0 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1172,6 +1172,54 @@ static int emit_atomic(u8 **pprog, u8 atomic_op, return 0; } +static int emit_atomic_index(u8 **pprog, u8 atomic_op, u32 size, + u32 dst_reg, u32 src_reg, u32 index_reg, int off) +{ + u8 *prog = *pprog; + + EMIT1(0xF0); /* lock prefix */ + switch (size) { + case BPF_W: + EMIT1(add_3mod(0x40, dst_reg, src_reg, index_reg)); + break; + case BPF_DW: + EMIT1(add_3mod(0x48, dst_reg, src_reg, index_reg)); + break; + default: + pr_err("bpf_jit: 1 and 2 byte atomics are not supported\n"); + return -EFAULT; + } + + /* emit opcode */ + switch (atomic_op) { + case BPF_ADD: + case BPF_AND: + case BPF_OR: + case BPF_XOR: + /* lock *(u32/u64*)(dst_reg + idx_reg + off) = src_reg */ + EMIT1(simple_alu_opcodes[atomic_op]); + break; + case BPF_ADD | BPF_FETCH: + /* src_reg = atomic_fetch_add(dst_reg + idx_reg + off, src_reg); */ + EMIT2(0x0F, 0xC1); + break; + case BPF_XCHG: + /* src_reg = atomic_xchg(dst_reg + idx_reg + off, src_reg); */ + EMIT1(0x87); + break; + case BPF_CMPXCHG: + /* r0 = atomic_cmpxchg(dst_reg + idx_reg + off, r0, src_reg); */ + EMIT2(0x0F, 0xB1); + break; + default: + pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op); + return -EFAULT; + } + emit_insn_suffix_SIB(&prog, dst_reg, src_reg, index_reg, off); + *pprog = prog; + return 0; +} + #define DONT_CLEAR 1 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) @@ -1982,6 +2030,15 @@ populate_extable: return err; break; + case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: + start_of_ldx = prog; + err = emit_atomic_index(&prog, insn->imm, BPF_SIZE(insn->code), + dst_reg, src_reg, X86_REG_R12, insn->off); + if (err) + return err; + goto populate_extable; + /* call */ case BPF_JMP | BPF_CALL: { int offs; @@ -3486,6 +3543,21 @@ bool bpf_jit_supports_arena(void) return true; } +bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) +{ + if (!in_arena) + return true; + switch (insn->code) { + case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_ATOMIC | BPF_DW: + if (insn->imm == (BPF_AND | BPF_FETCH) || + insn->imm == (BPF_OR | BPF_FETCH) || + insn->imm == (BPF_XOR | BPF_FETCH)) + return false; + } + return true; +} + bool bpf_jit_supports_ptr_xchg(void) { return true; diff --git a/include/linux/filter.h b/include/linux/filter.h index 161d5f7b64ed..7a27f19bf44d 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -75,6 +75,9 @@ struct ctl_table_header; /* unused opcode to mark special load instruction. Same as BPF_MSH */ #define BPF_PROBE_MEM32 0xa0 +/* unused opcode to mark special atomic instruction */ +#define BPF_PROBE_ATOMIC 0xe0 + /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 @@ -997,6 +1000,7 @@ bool bpf_jit_supports_far_kfunc_call(void); bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_ptr_xchg(void); bool bpf_jit_supports_arena(void); +bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 7a33a3a7e63c..a41718eaeefe 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2965,6 +2965,11 @@ bool __weak bpf_jit_supports_arena(void) return false; } +bool __weak bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) +{ + return false; +} + /* Return TRUE if the JIT backend satisfies the following two conditions: * 1) JIT backend supports atomic_xchg() on pointer-sized words. * 2) Under the specific arch, the implementation of xchg() is the same diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 590db4e4c071..2aad6d90550f 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6970,6 +6970,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn return err; } +static int save_aux_ptr_type(struct bpf_verifier_env *env, enum bpf_reg_type type, + bool allow_trust_missmatch); + static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) { int load_reg; @@ -7030,7 +7033,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i is_pkt_reg(env, insn->dst_reg) || is_flow_key_reg(env, insn->dst_reg) || is_sk_reg(env, insn->dst_reg) || - is_arena_reg(env, insn->dst_reg)) { + (is_arena_reg(env, insn->dst_reg) && !bpf_jit_supports_insn(insn, true))) { verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n", insn->dst_reg, reg_type_str(env, reg_state(env, insn->dst_reg)->type)); @@ -7066,6 +7069,11 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i if (err) return err; + if (is_arena_reg(env, insn->dst_reg)) { + err = save_aux_ptr_type(env, PTR_TO_ARENA, false); + if (err) + return err; + } /* Check whether we can write into the same memory. */ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, BPF_SIZE(insn->code), BPF_WRITE, -1, true, false); @@ -18955,6 +18963,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) insn->code == (BPF_ST | BPF_MEM | BPF_W) || insn->code == (BPF_ST | BPF_MEM | BPF_DW)) { type = BPF_WRITE; + } else if ((insn->code == (BPF_STX | BPF_ATOMIC | BPF_W) || + insn->code == (BPF_STX | BPF_ATOMIC | BPF_DW)) && + env->insn_aux_data[i + delta].ptr_type == PTR_TO_ARENA) { + insn->code = BPF_STX | BPF_PROBE_ATOMIC | BPF_SIZE(insn->code); + env->prog->aux->num_exentries++; + continue; } else { continue; } @@ -19226,6 +19240,9 @@ static int jit_subprogs(struct bpf_verifier_env *env) BPF_CLASS(insn->code) == BPF_ST) && BPF_MODE(insn->code) == BPF_PROBE_MEM32) num_exentries++; + if (BPF_CLASS(insn->code) == BPF_STX && + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) + num_exentries++; } func[i]->aux->num_exentries = num_exentries; func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable; -- cgit From 3253804773c0613a1bad5bfea2edf172b760d8b6 Mon Sep 17 00:00:00 2001 From: Stefan Berger Date: Fri, 23 Feb 2024 12:25:05 -0500 Subject: security: allow finer granularity in permitting copy-up of security xattrs Copying up xattrs is solely based on the security xattr name. For finer granularity add a dentry parameter to the security_inode_copy_up_xattr hook definition, allowing decisions to be based on the xattr content as well. Co-developed-by: Mimi Zohar Signed-off-by: Stefan Berger Acked-by: Amir Goldstein Acked-by: Paul Moore (LSM,SELinux) Signed-off-by: Mimi Zohar --- fs/overlayfs/copy_up.c | 2 +- include/linux/lsm_hook_defs.h | 3 ++- include/linux/security.h | 4 ++-- security/integrity/evm/evm_main.c | 2 +- security/security.c | 5 +++-- security/selinux/hooks.c | 2 +- security/smack/smack_lsm.c | 2 +- 7 files changed, 11 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 0762575a1e70..a5ef2005a2cc 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -114,7 +114,7 @@ int ovl_copy_xattr(struct super_block *sb, const struct path *oldpath, struct de if (ovl_is_private_xattr(sb, name)) continue; - error = security_inode_copy_up_xattr(name); + error = security_inode_copy_up_xattr(old, name); if (error < 0 && error != -EOPNOTSUPP) break; if (error == 1) { diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 334e00efbde4..f804b76cde44 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -176,7 +176,8 @@ LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) -LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, const char *name) +LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src, + const char *name) LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, struct kernfs_node *kn) LSM_HOOK(int, 0, file_permission, struct file *file, int mask) diff --git a/include/linux/security.h b/include/linux/security.h index 41a8f667bdfa..21cf70346b33 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -398,7 +398,7 @@ int security_inode_setsecurity(struct inode *inode, const char *name, const void int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); void security_inode_getsecid(struct inode *inode, u32 *secid); int security_inode_copy_up(struct dentry *src, struct cred **new); -int security_inode_copy_up_xattr(const char *name); +int security_inode_copy_up_xattr(struct dentry *src, const char *name); int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn); int security_file_permission(struct file *file, int mask); @@ -1016,7 +1016,7 @@ static inline int security_kernfs_init_security(struct kernfs_node *kn_dir, return 0; } -static inline int security_inode_copy_up_xattr(const char *name) +static inline int security_inode_copy_up_xattr(struct dentry *src, const char *name) { return -EOPNOTSUPP; } diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index 81dbade5b9b3..b0a862bfd74a 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -946,7 +946,7 @@ static void evm_inode_post_setattr(struct mnt_idmap *idmap, evm_update_evmxattr(dentry, NULL, NULL, 0); } -static int evm_inode_copy_up_xattr(const char *name) +static int evm_inode_copy_up_xattr(struct dentry *src, const char *name) { if (strcmp(name, XATTR_NAME_EVM) == 0) return 1; /* Discard */ diff --git a/security/security.c b/security/security.c index 0a9a0ac3f266..e5da848c50b9 100644 --- a/security/security.c +++ b/security/security.c @@ -2628,6 +2628,7 @@ EXPORT_SYMBOL(security_inode_copy_up); /** * security_inode_copy_up_xattr() - Filter xattrs in an overlayfs copy-up op + * @src: union dentry of copy-up file * @name: xattr name * * Filter the xattrs being copied up when a unioned file is copied up from a @@ -2638,7 +2639,7 @@ EXPORT_SYMBOL(security_inode_copy_up); * if the security module does not know about attribute, or a negative * error code to abort the copy up. */ -int security_inode_copy_up_xattr(const char *name) +int security_inode_copy_up_xattr(struct dentry *src, const char *name) { int rc; @@ -2647,7 +2648,7 @@ int security_inode_copy_up_xattr(const char *name) * xattr), -EOPNOTSUPP if it does not know anything about the xattr or * any other error code in case of an error. */ - rc = call_int_hook(inode_copy_up_xattr, name); + rc = call_int_hook(inode_copy_up_xattr, src, name); if (rc != LSM_RET_DEFAULT(inode_copy_up_xattr)) return rc; diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 3448454c82d0..9f018f51e47a 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -3530,7 +3530,7 @@ static int selinux_inode_copy_up(struct dentry *src, struct cred **new) return 0; } -static int selinux_inode_copy_up_xattr(const char *name) +static int selinux_inode_copy_up_xattr(struct dentry *dentry, const char *name) { /* The copy_up hook above sets the initial context on an inode, but we * don't then want to overwrite it by blindly copying all the lower diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 146667937811..79d8dc633b9a 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -4885,7 +4885,7 @@ static int smack_inode_copy_up(struct dentry *dentry, struct cred **new) return 0; } -static int smack_inode_copy_up_xattr(const char *name) +static int smack_inode_copy_up_xattr(struct dentry *src, const char *name) { /* * Return 1 if this is the smack access Smack attribute. -- cgit From 309e2b775da8b2c28fccc4ac2621801f06920ce0 Mon Sep 17 00:00:00 2001 From: Stefan Berger Date: Fri, 23 Feb 2024 12:25:08 -0500 Subject: ima: Move file-change detection variables into new structure Move all the variables used for file change detection into a structure that can be used by IMA and EVM. Implement an inline function for storing the identification of an inode and one for detecting changes to an inode based on this new structure. Co-developed-by: Mimi Zohar Signed-off-by: Stefan Berger Signed-off-by: Mimi Zohar --- include/linux/integrity.h | 34 ++++++++++++++++++++++++++++++++++ security/integrity/ima/ima.h | 4 +--- security/integrity/ima/ima_api.c | 10 +++++----- security/integrity/ima/ima_iint.c | 2 +- security/integrity/ima/ima_main.c | 7 +++---- 5 files changed, 44 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/integrity.h b/include/linux/integrity.h index 459b79683783..f5842372359b 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h @@ -8,6 +8,7 @@ #define _LINUX_INTEGRITY_H #include +#include enum integrity_status { INTEGRITY_PASS = 0, @@ -28,4 +29,37 @@ static inline void integrity_load_keys(void) } #endif /* CONFIG_INTEGRITY */ +/* An inode's attributes for detection of changes */ +struct integrity_inode_attributes { + u64 version; /* track inode changes */ + unsigned long ino; + dev_t dev; +}; + +/* + * On stacked filesystems the i_version alone is not enough to detect file data + * or metadata change. Additional metadata is required. + */ +static inline void +integrity_inode_attrs_store(struct integrity_inode_attributes *attrs, + u64 i_version, const struct inode *inode) +{ + attrs->version = i_version; + attrs->dev = inode->i_sb->s_dev; + attrs->ino = inode->i_ino; +} + +/* + * On stacked filesystems detect whether the inode or its content has changed. + */ +static inline bool +integrity_inode_attrs_changed(const struct integrity_inode_attributes *attrs, + const struct inode *inode) +{ + return (inode->i_sb->s_dev != attrs->dev || + inode->i_ino != attrs->ino || + !inode_eq_iversion(inode, attrs->version)); +} + + #endif /* _LINUX_INTEGRITY_H */ diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index 11d7c0332207..9151b5369cdc 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -175,12 +175,10 @@ struct ima_kexec_hdr { /* IMA integrity metadata associated with an inode */ struct ima_iint_cache { struct mutex mutex; /* protects: version, flags, digest */ - u64 version; /* track inode changes */ + struct integrity_inode_attributes real_inode; unsigned long flags; unsigned long measured_pcrs; unsigned long atomic_flags; - unsigned long real_ino; - dev_t real_dev; enum integrity_status ima_file_status:4; enum integrity_status ima_mmap_status:4; enum integrity_status ima_bprm_status:4; diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index 3d286de231e1..984e861f6e33 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -305,11 +305,11 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file, iint->ima_hash = tmpbuf; memcpy(iint->ima_hash, &hash, length); - iint->version = i_version; - if (real_inode != inode) { - iint->real_ino = real_inode->i_ino; - iint->real_dev = real_inode->i_sb->s_dev; - } + if (real_inode == inode) + iint->real_inode.version = i_version; + else + integrity_inode_attrs_store(&iint->real_inode, i_version, + real_inode); /* Possibly temporary failure due to type of read (eg. O_DIRECT) */ if (!result) diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c index e7c9c216c1c6..e23412a2c56b 100644 --- a/security/integrity/ima/ima_iint.c +++ b/security/integrity/ima/ima_iint.c @@ -59,7 +59,7 @@ static void ima_iint_init_always(struct ima_iint_cache *iint, struct inode *inode) { iint->ima_hash = NULL; - iint->version = 0; + iint->real_inode.version = 0; iint->flags = 0UL; iint->atomic_flags = 0UL; iint->ima_file_status = INTEGRITY_UNKNOWN; diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index eebf629f192e..4b215d85c14b 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -173,7 +173,7 @@ static void ima_check_last_writer(struct ima_iint_cache *iint, STATX_CHANGE_COOKIE, AT_STATX_SYNC_AS_STAT) || !(stat.result_mask & STATX_CHANGE_COOKIE) || - stat.change_cookie != iint->version) { + stat.change_cookie != iint->real_inode.version) { iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); iint->measured_pcrs = 0; if (update) @@ -292,9 +292,8 @@ static int process_measurement(struct file *file, const struct cred *cred, if (real_inode != inode && (action & IMA_DO_MASK) && (iint->flags & IMA_DONE_MASK)) { if (!IS_I_VERSION(real_inode) || - real_inode->i_sb->s_dev != iint->real_dev || - real_inode->i_ino != iint->real_ino || - !inode_eq_iversion(real_inode, iint->version)) { + integrity_inode_attrs_changed(&iint->real_inode, + real_inode)) { iint->flags &= ~IMA_DONE_MASK; iint->measured_pcrs = 0; } -- cgit From a652aa59068bd78d96a1ab6ea7c0c4d3c79fb5e8 Mon Sep 17 00:00:00 2001 From: Stefan Berger Date: Fri, 23 Feb 2024 12:25:09 -0500 Subject: evm: Store and detect metadata inode attributes changes On stacked filesystem the metadata inode may be different than the one file data inode and therefore changes to it need to be detected independently. Therefore, store the i_version, device number, and inode number associated with the file metadata inode. Implement a function to detect changes to the inode and if a change is detected reset the evm_status. This function will be called by IMA when IMA detects that the metadata inode is different from the file's inode. Co-developed-by: Mimi Zohar Signed-off-by: Stefan Berger Signed-off-by: Mimi Zohar --- include/linux/evm.h | 8 ++++++++ security/integrity/evm/evm.h | 6 ++++-- security/integrity/evm/evm_crypto.c | 23 +++++++++++++++++------ security/integrity/evm/evm_main.c | 30 ++++++++++++++++++++++++++++-- 4 files changed, 57 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/evm.h b/include/linux/evm.h index d48d6da32315..ddece4a6b25d 100644 --- a/include/linux/evm.h +++ b/include/linux/evm.h @@ -26,6 +26,8 @@ extern int evm_protected_xattr_if_enabled(const char *req_xattr_name); extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, int buffer_size, char type, bool canonical_fmt); +extern bool evm_metadata_changed(struct inode *inode, + struct inode *metadata_inode); #ifdef CONFIG_FS_POSIX_ACL extern int posix_xattr_acl(const char *xattrname); #else @@ -76,5 +78,11 @@ static inline int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer, return -EOPNOTSUPP; } +static inline bool evm_metadata_changed(struct inode *inode, + struct inode *metadata_inode) +{ + return false; +} + #endif /* CONFIG_EVM */ #endif /* LINUX_EVM_H */ diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h index 72e3341ae6f7..51aba5a54275 100644 --- a/security/integrity/evm/evm.h +++ b/security/integrity/evm/evm.h @@ -39,6 +39,7 @@ struct xattr_list { struct evm_iint_cache { unsigned long flags; enum integrity_status evm_status:4; + struct integrity_inode_attributes metadata_inode; }; extern struct lsm_blob_sizes evm_blob_sizes; @@ -74,11 +75,12 @@ int evm_update_evmxattr(struct dentry *dentry, size_t req_xattr_value_len); int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, const char *req_xattr_value, - size_t req_xattr_value_len, struct evm_digest *data); + size_t req_xattr_value_len, struct evm_digest *data, + struct evm_iint_cache *iint); int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, const char *req_xattr_value, size_t req_xattr_value_len, char type, - struct evm_digest *data); + struct evm_digest *data, struct evm_iint_cache *iint); int evm_init_hmac(struct inode *inode, const struct xattr *xattrs, char *hmac_val); int evm_init_secfs(void); diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c index 35416f55391c..7c06ffd633d2 100644 --- a/security/integrity/evm/evm_crypto.c +++ b/security/integrity/evm/evm_crypto.c @@ -221,7 +221,8 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, const char *req_xattr_name, const char *req_xattr_value, size_t req_xattr_value_len, - uint8_t type, struct evm_digest *data) + uint8_t type, struct evm_digest *data, + struct evm_iint_cache *iint) { struct inode *inode = d_inode(d_real(dentry, D_REAL_METADATA)); struct xattr_list *xattr; @@ -231,6 +232,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, int error; int size, user_space_size; bool ima_present = false; + u64 i_version = 0; if (!(inode->i_opflags & IOP_XATTR) || inode->i_sb->s_user_ns != &init_user_ns) @@ -294,6 +296,13 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, } hmac_add_misc(desc, inode, type, data->digest); + if (inode != d_backing_inode(dentry) && iint) { + if (IS_I_VERSION(inode)) + i_version = inode_query_iversion(inode); + integrity_inode_attrs_store(&iint->metadata_inode, i_version, + inode); + } + /* Portable EVM signatures must include an IMA hash */ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) error = -EPERM; @@ -305,18 +314,19 @@ out: int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, const char *req_xattr_value, size_t req_xattr_value_len, - struct evm_digest *data) + struct evm_digest *data, struct evm_iint_cache *iint) { return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value, - req_xattr_value_len, EVM_XATTR_HMAC, data); + req_xattr_value_len, EVM_XATTR_HMAC, data, + iint); } int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, const char *req_xattr_value, size_t req_xattr_value_len, - char type, struct evm_digest *data) + char type, struct evm_digest *data, struct evm_iint_cache *iint) { return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value, - req_xattr_value_len, type, data); + req_xattr_value_len, type, data, iint); } static int evm_is_immutable(struct dentry *dentry, struct inode *inode) @@ -357,6 +367,7 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, const char *xattr_value, size_t xattr_value_len) { struct inode *inode = d_backing_inode(dentry); + struct evm_iint_cache *iint = evm_iint_inode(inode); struct evm_digest data; int rc = 0; @@ -372,7 +383,7 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, data.hdr.algo = HASH_ALGO_SHA1; rc = evm_calc_hmac(dentry, xattr_name, xattr_value, - xattr_value_len, &data); + xattr_value_len, &data, iint); if (rc == 0) { data.hdr.xattr.sha1.type = EVM_XATTR_HMAC; rc = __vfs_setxattr_noperm(&nop_mnt_idmap, dentry, diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index c658d2f1494b..c1ca0894cd8a 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -226,7 +226,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, digest.hdr.algo = HASH_ALGO_SHA1; rc = evm_calc_hmac(dentry, xattr_name, xattr_value, - xattr_value_len, &digest); + xattr_value_len, &digest, iint); if (rc) break; rc = crypto_memneq(xattr_data->data, digest.digest, @@ -247,7 +247,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, hdr = (struct signature_v2_hdr *)xattr_data; digest.hdr.algo = hdr->hash_algo; rc = evm_calc_hash(dentry, xattr_name, xattr_value, - xattr_value_len, xattr_data->type, &digest); + xattr_value_len, xattr_data->type, &digest, + iint); if (rc) break; rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM, @@ -733,6 +734,31 @@ static void evm_reset_status(struct inode *inode) iint->evm_status = INTEGRITY_UNKNOWN; } +/** + * evm_metadata_changed: Detect changes to the metadata + * @inode: a file's inode + * @metadata_inode: metadata inode + * + * On a stacked filesystem detect whether the metadata has changed. If this is + * the case reset the evm_status associated with the inode that represents the + * file. + */ +bool evm_metadata_changed(struct inode *inode, struct inode *metadata_inode) +{ + struct evm_iint_cache *iint = evm_iint_inode(inode); + bool ret = false; + + if (iint) { + ret = (!IS_I_VERSION(metadata_inode) || + integrity_inode_attrs_changed(&iint->metadata_inode, + metadata_inode)); + if (ret) + iint->evm_status = INTEGRITY_UNKNOWN; + } + + return ret; +} + /** * evm_revalidate_status - report whether EVM status re-validation is necessary * @xattr_name: pointer to the affected extended attribute name -- cgit From 1f65e57dc5417b166843438bef31c70b9a5208fe Mon Sep 17 00:00:00 2001 From: Stefan Berger Date: Fri, 23 Feb 2024 12:25:12 -0500 Subject: fs: Rename SB_I_EVM_UNSUPPORTED to SB_I_EVM_HMAC_UNSUPPORTED Now that EVM supports RSA signatures for previously completely unsupported filesystems rename the flag SB_I_EVM_UNSUPPORTED to SB_I_EVM_HMAC_UNSUPPORTED to reflect that only HMAC is not supported. Suggested-by: Amir Goldstein Suggested-by: Mimi Zohar Signed-off-by: Stefan Berger Acked-by: Amir Goldstein Signed-off-by: Mimi Zohar --- fs/overlayfs/super.c | 2 +- include/linux/fs.h | 2 +- security/integrity/evm/evm_main.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index a40fc7e05525..06a231970cb5 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -1460,7 +1460,7 @@ int ovl_fill_super(struct super_block *sb, struct fs_context *fc) * lead to unexpected results. */ sb->s_iflags |= SB_I_NOUMASK; - sb->s_iflags |= SB_I_EVM_UNSUPPORTED; + sb->s_iflags |= SB_I_EVM_HMAC_UNSUPPORTED; err = -ENOMEM; root_dentry = ovl_get_root(sb, ctx->upper.dentry, oe); diff --git a/include/linux/fs.h b/include/linux/fs.h index 8dfd53b52744..7bbd5e2d2a20 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1177,7 +1177,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_USERNS_VISIBLE 0x00000010 /* fstype already mounted */ #define SB_I_IMA_UNVERIFIABLE_SIGNATURE 0x00000020 #define SB_I_UNTRUSTED_MOUNTER 0x00000040 -#define SB_I_EVM_UNSUPPORTED 0x00000080 +#define SB_I_EVM_HMAC_UNSUPPORTED 0x00000080 #define SB_I_SKIP_SYNC 0x00000100 /* Skip superblock at global sync */ #define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */ diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c index cfb4f9809369..c4a6081ce596 100644 --- a/security/integrity/evm/evm_main.c +++ b/security/integrity/evm/evm_main.c @@ -155,7 +155,7 @@ static int is_unsupported_fs(struct dentry *dentry) { struct inode *inode = d_backing_inode(dentry); - if (inode->i_sb->s_iflags & SB_I_EVM_UNSUPPORTED) { + if (inode->i_sb->s_iflags & SB_I_EVM_HMAC_UNSUPPORTED) { pr_info_once("%s not supported\n", inode->i_sb->s_type->name); return 1; } -- cgit From 959fa5c188bf095558c417554e4772ac1fda3531 Mon Sep 17 00:00:00 2001 From: Mina Almasry Date: Mon, 8 Apr 2024 08:29:56 -0700 Subject: net: make napi_frag_unref reuse skb_page_unref The implementations of these 2 functions are almost identical. Remove the implementation of napi_frag_unref, and make it a call into skb_page_unref so we don't duplicate the implementation. Signed-off-by: Mina Almasry Reviewed-by: Eric Dumazet Reviewed-by: Jacob Keller Link: https://lore.kernel.org/r/20240408153000.2152844-2-almasrymina@google.com Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 12 +++--------- net/ipv4/esp4.c | 2 +- net/ipv6/esp6.c | 2 +- 3 files changed, 5 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7dfb906d92f7..c0ff85bb087a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3522,10 +3522,10 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, bool napi_pp_put_page(struct page *page); static inline void -skb_page_unref(const struct sk_buff *skb, struct page *page) +skb_page_unref(struct page *page, bool recycle) { #ifdef CONFIG_PAGE_POOL - if (skb->pp_recycle && napi_pp_put_page(page)) + if (recycle && napi_pp_put_page(page)) return; #endif put_page(page); @@ -3534,13 +3534,7 @@ skb_page_unref(const struct sk_buff *skb, struct page *page) static inline void napi_frag_unref(skb_frag_t *frag, bool recycle) { - struct page *page = skb_frag_page(frag); - -#ifdef CONFIG_PAGE_POOL - if (recycle && napi_pp_put_page(page)) - return; -#endif - put_page(page); + skb_page_unref(skb_frag_page(frag), recycle); } /** diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3d647c9a7a21..40330253f076 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -114,7 +114,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) */ if (req->src != req->dst) for (sg = sg_next(req->src); sg; sg = sg_next(sg)) - skb_page_unref(skb, sg_page(sg)); + skb_page_unref(sg_page(sg), skb->pp_recycle); } #ifdef CONFIG_INET_ESPINTCP diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index fe8d53f5a5ee..fb431d0a3475 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -131,7 +131,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) */ if (req->src != req->dst) for (sg = sg_next(req->src); sg; sg = sg_next(sg)) - skb_page_unref(skb, sg_page(sg)); + skb_page_unref(sg_page(sg), skb->pp_recycle); } #ifdef CONFIG_INET6_ESPINTCP -- cgit From f58f3c9563409e618e591d0d540316286cb0665f Mon Sep 17 00:00:00 2001 From: Mina Almasry Date: Mon, 8 Apr 2024 08:29:58 -0700 Subject: net: remove napi_frag_unref With the changes in the last patches, napi_frag_unref() is now reduandant. Remove it and use skb_page_unref directly. Signed-off-by: Mina Almasry Reviewed-by: Dragos Tatulea Reviewed-by: Eric Dumazet Reviewed-by: Jacob Keller Link: https://lore.kernel.org/r/20240408153000.2152844-4-almasrymina@google.com Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 8 +------- net/core/skbuff.c | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index c0ff85bb087a..7135a3e94afd 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3531,12 +3531,6 @@ skb_page_unref(struct page *page, bool recycle) put_page(page); } -static inline void -napi_frag_unref(skb_frag_t *frag, bool recycle) -{ - skb_page_unref(skb_frag_page(frag), recycle); -} - /** * __skb_frag_unref - release a reference on a paged fragment. * @frag: the paged fragment @@ -3547,7 +3541,7 @@ napi_frag_unref(skb_frag_t *frag, bool recycle) */ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { - napi_frag_unref(frag, recycle); + skb_page_unref(skb_frag_page(frag), recycle); } /** diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 21cd01641f4c..888874ef8566 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1101,7 +1101,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason) } for (i = 0; i < shinfo->nr_frags; i++) - napi_frag_unref(&shinfo->frags[i], skb->pp_recycle); + __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); free_head: if (shinfo->frag_list) -- cgit From a88d970c8bb55dc20f319ce3e33c7ce0e9647480 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 17 Mar 2024 14:44:38 -0700 Subject: lib: Add one-byte emulation function Architectures are required to provide four-byte cmpxchg() and 64-bit architectures are additionally required to provide eight-byte cmpxchg(). However, there are cases where one-byte cmpxchg() would be extremely useful. Therefore, provide cmpxchg_emu_u8() that emulates one-byte cmpxchg() in terms of four-byte cmpxchg(). Note that this emulations is fully ordered, and can (for example) cause one-byte cmpxchg_relaxed() to incur the overhead of full ordering. If this causes problems for a given architecture, that architecture is free to provide its own lighter-weight primitives. [ paulmck: Apply Marco Elver feedback. ] [ paulmck: Apply kernel test robot feedback. ] [ paulmck: Drop two-byte support per Arnd Bergmann feedback. ] Link: https://lore.kernel.org/all/0733eb10-5e7a-4450-9b8a-527b97c842ff@paulmck-laptop/ Signed-off-by: Paul E. McKenney Acked-by: Marco Elver Cc: Andrew Morton Cc: Thomas Gleixner Cc: "Peter Zijlstra (Intel)" Cc: Douglas Anderson Cc: Petr Mladek Cc: Arnd Bergmann Cc: --- arch/Kconfig | 3 +++ include/linux/cmpxchg-emu.h | 15 +++++++++++++++ lib/Makefile | 1 + lib/cmpxchg-emu.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 64 insertions(+) create mode 100644 include/linux/cmpxchg-emu.h create mode 100644 lib/cmpxchg-emu.c (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 9f066785bb71..284663392eef 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1609,4 +1609,7 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT # strict alignment always, even with -falign-functions. def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG +config ARCH_NEED_CMPXCHG_1_EMU + bool + endmenu diff --git a/include/linux/cmpxchg-emu.h b/include/linux/cmpxchg-emu.h new file mode 100644 index 000000000000..998deec67740 --- /dev/null +++ b/include/linux/cmpxchg-emu.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Emulated 1-byte and 2-byte cmpxchg operations for architectures + * lacking direct support for these sizes. These are implemented in terms + * of 4-byte cmpxchg operations. + * + * Copyright (C) 2024 Paul E. McKenney. + */ + +#ifndef __LINUX_CMPXCHG_EMU_H +#define __LINUX_CMPXCHG_EMU_H + +uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new); + +#endif /* __LINUX_CMPXCHG_EMU_H */ diff --git a/lib/Makefile b/lib/Makefile index ffc6b2341b45..cc3d52fdb477 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -236,6 +236,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o +obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o #ensure exported functions have prototypes diff --git a/lib/cmpxchg-emu.c b/lib/cmpxchg-emu.c new file mode 100644 index 000000000000..27f6f97cb60d --- /dev/null +++ b/lib/cmpxchg-emu.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Emulated 1-byte cmpxchg operation for architectures lacking direct + * support for this size. This is implemented in terms of 4-byte cmpxchg + * operations. + * + * Copyright (C) 2024 Paul E. McKenney. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +union u8_32 { + u8 b[4]; + u32 w; +}; + +/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */ +uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new) +{ + u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3); + int i = ((uintptr_t)p) & 0x3; + union u8_32 old32; + union u8_32 new32; + u32 ret; + + ret = READ_ONCE(*p32); + do { + old32.w = ret; + if (old32.b[i] != old) + return old32.b[i]; + new32.w = old32.w; + new32.b[i] = new; + instrument_atomic_read_write(p, 1); + ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above. + } while (ret != old32.w); + return old; +} +EXPORT_SYMBOL_GPL(cmpxchg_emu_u8); -- cgit From 5599f80196612efde96dbe6ef18f6ecc0cb4ba19 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Fri, 15 Mar 2024 15:44:39 +0100 Subject: HID: bpf: export hid_hw_output_report as a BPF kfunc We currently only export hid_hw_raw_request() as a BPF kfunc. However, some devices require an explicit write on the Output Report instead of the use of the control channel. So also export hid_hw_output_report to BPF Link: https://lore.kernel.org/r/20240315-b4-hid-bpf-new-funcs-v4-2-079c282469d3@kernel.org Signed-off-by: Benjamin Tissoires --- Documentation/hid/hid-bpf.rst | 2 +- drivers/hid/bpf/hid_bpf_dispatch.c | 112 +++++++++++++++++++++++++++---------- drivers/hid/hid-core.c | 1 + include/linux/hid_bpf.h | 1 + 4 files changed, 86 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/Documentation/hid/hid-bpf.rst b/Documentation/hid/hid-bpf.rst index 4fad83a6ebc3..a575004d9025 100644 --- a/Documentation/hid/hid-bpf.rst +++ b/Documentation/hid/hid-bpf.rst @@ -179,7 +179,7 @@ Available API that can be used in syscall HID-BPF programs: ----------------------------------------------------------- .. kernel-doc:: drivers/hid/bpf/hid_bpf_dispatch.c - :functions: hid_bpf_attach_prog hid_bpf_hw_request hid_bpf_allocate_context hid_bpf_release_context + :functions: hid_bpf_attach_prog hid_bpf_hw_request hid_bpf_hw_output_report hid_bpf_allocate_context hid_bpf_release_context General overview of a HID-BPF program ===================================== diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index 52abb27426f4..0ccd7cdd29d8 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -376,6 +376,46 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx) put_device(&hid->dev); } +static int +__hid_bpf_hw_check_params(struct hid_bpf_ctx *ctx, __u8 *buf, size_t *buf__sz, + enum hid_report_type rtype) +{ + struct hid_report_enum *report_enum; + struct hid_report *report; + struct hid_device *hdev; + u32 report_len; + + /* check arguments */ + if (!ctx || !hid_bpf_ops || !buf) + return -EINVAL; + + switch (rtype) { + case HID_INPUT_REPORT: + case HID_OUTPUT_REPORT: + case HID_FEATURE_REPORT: + break; + default: + return -EINVAL; + } + + if (*buf__sz < 1) + return -EINVAL; + + hdev = (struct hid_device *)ctx->hid; /* discard const */ + + report_enum = hdev->report_enum + rtype; + report = hid_bpf_ops->hid_get_report(report_enum, buf); + if (!report) + return -EINVAL; + + report_len = hid_report_len(report); + + if (*buf__sz > report_len) + *buf__sz = report_len; + + return 0; +} + /** * hid_bpf_hw_request - Communicate with a HID device * @@ -392,24 +432,14 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, enum hid_report_type rtype, enum hid_class_request reqtype) { struct hid_device *hdev; - struct hid_report *report; - struct hid_report_enum *report_enum; + size_t size = buf__sz; u8 *dma_data; - u32 report_len; int ret; /* check arguments */ - if (!ctx || !hid_bpf_ops || !buf) - return -EINVAL; - - switch (rtype) { - case HID_INPUT_REPORT: - case HID_OUTPUT_REPORT: - case HID_FEATURE_REPORT: - break; - default: - return -EINVAL; - } + ret = __hid_bpf_hw_check_params(ctx, buf, &size, rtype); + if (ret) + return ret; switch (reqtype) { case HID_REQ_GET_REPORT: @@ -423,29 +453,16 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, return -EINVAL; } - if (buf__sz < 1) - return -EINVAL; - hdev = (struct hid_device *)ctx->hid; /* discard const */ - report_enum = hdev->report_enum + rtype; - report = hid_bpf_ops->hid_get_report(report_enum, buf); - if (!report) - return -EINVAL; - - report_len = hid_report_len(report); - - if (buf__sz > report_len) - buf__sz = report_len; - - dma_data = kmemdup(buf, buf__sz, GFP_KERNEL); + dma_data = kmemdup(buf, size, GFP_KERNEL); if (!dma_data) return -ENOMEM; ret = hid_bpf_ops->hid_hw_raw_request(hdev, dma_data[0], dma_data, - buf__sz, + size, rtype, reqtype); @@ -455,6 +472,42 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz, kfree(dma_data); return ret; } + +/** + * hid_bpf_hw_output_report - Send an output report to a HID device + * + * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context() + * @buf: a %PTR_TO_MEM buffer + * @buf__sz: the size of the data to transfer + * + * Returns the number of bytes transferred on success, a negative error code otherwise. + */ +__bpf_kfunc int +hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz) +{ + struct hid_device *hdev; + size_t size = buf__sz; + u8 *dma_data; + int ret; + + /* check arguments */ + ret = __hid_bpf_hw_check_params(ctx, buf, &size, HID_OUTPUT_REPORT); + if (ret) + return ret; + + hdev = (struct hid_device *)ctx->hid; /* discard const */ + + dma_data = kmemdup(buf, size, GFP_KERNEL); + if (!dma_data) + return -ENOMEM; + + ret = hid_bpf_ops->hid_hw_output_report(hdev, + dma_data, + size); + + kfree(dma_data); + return ret; +} __bpf_kfunc_end_defs(); /* @@ -488,6 +541,7 @@ BTF_ID_FLAGS(func, hid_bpf_attach_prog) BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE) BTF_ID_FLAGS(func, hid_bpf_hw_request) +BTF_ID_FLAGS(func, hid_bpf_hw_output_report) BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids) static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index de7a477d6665..1243595890ba 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2974,6 +2974,7 @@ EXPORT_SYMBOL_GPL(hid_check_keys_pressed); static struct hid_bpf_ops hid_ops = { .hid_get_report = hid_get_report, .hid_hw_raw_request = hid_hw_raw_request, + .hid_hw_output_report = hid_hw_output_report, .owner = THIS_MODULE, .bus_type = &hid_bus_type, }; diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index 7118ac28d468..5c7ff93dc73e 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -103,6 +103,7 @@ struct hid_bpf_ops { unsigned char reportnum, __u8 *buf, size_t len, enum hid_report_type rtype, enum hid_class_request reqtype); + int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len); struct module *owner; const struct bus_type *bus_type; }; -- cgit From 9be50ac30a83896a753ab9f64e941763bb7900be Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Fri, 15 Mar 2024 15:44:42 +0100 Subject: HID: bpf: allow to inject HID event from BPF It can be interesting to inject events from BPF as if the event were to come from the device. For example, some multitouch devices do not all the time send a proximity out event, and we might want to send it for the physical device. Compared to uhid, we can now inject events on any physical device, not just uhid virtual ones. Link: https://lore.kernel.org/r/20240315-b4-hid-bpf-new-funcs-v4-5-079c282469d3@kernel.org Signed-off-by: Benjamin Tissoires --- Documentation/hid/hid-bpf.rst | 2 +- drivers/hid/bpf/hid_bpf_dispatch.c | 29 +++++++++++++++++++++++++++++ drivers/hid/hid-core.c | 1 + include/linux/hid_bpf.h | 2 ++ 4 files changed, 33 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/Documentation/hid/hid-bpf.rst b/Documentation/hid/hid-bpf.rst index a575004d9025..0765b3298ecf 100644 --- a/Documentation/hid/hid-bpf.rst +++ b/Documentation/hid/hid-bpf.rst @@ -179,7 +179,7 @@ Available API that can be used in syscall HID-BPF programs: ----------------------------------------------------------- .. kernel-doc:: drivers/hid/bpf/hid_bpf_dispatch.c - :functions: hid_bpf_attach_prog hid_bpf_hw_request hid_bpf_hw_output_report hid_bpf_allocate_context hid_bpf_release_context + :functions: hid_bpf_attach_prog hid_bpf_hw_request hid_bpf_hw_output_report hid_bpf_input_report hid_bpf_allocate_context hid_bpf_release_context General overview of a HID-BPF program ===================================== diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c index 0ccd7cdd29d8..83e5c73ce995 100644 --- a/drivers/hid/bpf/hid_bpf_dispatch.c +++ b/drivers/hid/bpf/hid_bpf_dispatch.c @@ -508,6 +508,34 @@ hid_bpf_hw_output_report(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz) kfree(dma_data); return ret; } + +/** + * hid_bpf_input_report - Inject a HID report in the kernel from a HID device + * + * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context() + * @type: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT) + * @buf: a %PTR_TO_MEM buffer + * @buf__sz: the size of the data to transfer + * + * Returns %0 on success, a negative error code otherwise. + */ +__bpf_kfunc int +hid_bpf_input_report(struct hid_bpf_ctx *ctx, enum hid_report_type type, u8 *buf, + const size_t buf__sz) +{ + struct hid_device *hdev; + size_t size = buf__sz; + int ret; + + /* check arguments */ + ret = __hid_bpf_hw_check_params(ctx, buf, &size, type); + if (ret) + return ret; + + hdev = (struct hid_device *)ctx->hid; /* discard const */ + + return hid_input_report(hdev, type, buf, size, 0); +} __bpf_kfunc_end_defs(); /* @@ -542,6 +570,7 @@ BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL) BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE) BTF_ID_FLAGS(func, hid_bpf_hw_request) BTF_ID_FLAGS(func, hid_bpf_hw_output_report) +BTF_ID_FLAGS(func, hid_bpf_input_report) BTF_KFUNCS_END(hid_bpf_syscall_kfunc_ids) static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = { diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 1243595890ba..b1fa0378e8f4 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -2975,6 +2975,7 @@ static struct hid_bpf_ops hid_ops = { .hid_get_report = hid_get_report, .hid_hw_raw_request = hid_hw_raw_request, .hid_hw_output_report = hid_hw_output_report, + .hid_input_report = hid_input_report, .owner = THIS_MODULE, .bus_type = &hid_bus_type, }; diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index 5c7ff93dc73e..17b08f500098 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -104,6 +104,8 @@ struct hid_bpf_ops { size_t len, enum hid_report_type rtype, enum hid_class_request reqtype); int (*hid_hw_output_report)(struct hid_device *hdev, __u8 *buf, size_t len); + int (*hid_input_report)(struct hid_device *hid, enum hid_report_type type, + u8 *data, u32 size, int interrupt); struct module *owner; const struct bus_type *bus_type; }; -- cgit From a01e7214bef904723d26d293eb17586078610379 Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Wed, 27 Mar 2024 14:43:37 +0530 Subject: PCI: endpoint: Remove "core_init_notifier" flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "core_init_notifier" flag is set by the glue drivers requiring refclk from the host to complete the DWC core initialization. Also, those drivers will send a notification to the EPF drivers once the initialization is fully completed using the pci_epc_init_notify() API. Only then, the EPF drivers will start functioning. For the rest of the drivers generating refclk locally, EPF drivers will start functioning post binding with them. EPF drivers rely on the 'core_init_notifier' flag to differentiate between the drivers. Unfortunately, this creates two different flows for the EPF drivers. So to avoid that, let's get rid of the "core_init_notifier" flag and follow a single initialization flow for the EPF drivers. This is done by calling the dw_pcie_ep_init_notify() from all glue drivers after the completion of dw_pcie_ep_init_registers() API. This will allow all the glue drivers to send the notification to the EPF drivers once the initialization is fully completed. Only difference here is that, the drivers requiring refclk from host will send the notification once refclk is received, while others will send it during probe time itself. But this also requires the EPC core driver to deliver the notification after EPF driver bind. Because, the glue driver can send the notification before the EPF drivers bind() and in those cases the EPF drivers will miss the event. To accommodate this, EPC core is now caching the state of the EPC initialization in 'init_complete' flag and pci-ep-cfs driver sends the notification to EPF drivers based on that after each EPF driver bind. Link: https://lore.kernel.org/linux-pci/20240327-pci-dbi-rework-v12-8-082625472414@linaro.org Tested-by: Niklas Cassel Signed-off-by: Manivannan Sadhasivam Signed-off-by: Krzysztof Wilczyński Reviewed-by: Frank Li Reviewed-by: Niklas Cassel --- drivers/pci/controller/cadence/pcie-cadence-ep.c | 2 ++ drivers/pci/controller/dwc/pci-dra7xx.c | 2 ++ drivers/pci/controller/dwc/pci-imx6.c | 2 ++ drivers/pci/controller/dwc/pci-keystone.c | 2 ++ drivers/pci/controller/dwc/pci-layerscape-ep.c | 2 ++ drivers/pci/controller/dwc/pcie-artpec6.c | 2 ++ drivers/pci/controller/dwc/pcie-designware-ep.c | 1 + drivers/pci/controller/dwc/pcie-designware-plat.c | 2 ++ drivers/pci/controller/dwc/pcie-keembay.c | 2 ++ drivers/pci/controller/dwc/pcie-qcom-ep.c | 1 - drivers/pci/controller/dwc/pcie-rcar-gen4.c | 2 ++ drivers/pci/controller/dwc/pcie-tegra194.c | 1 - drivers/pci/controller/dwc/pcie-uniphier-ep.c | 2 ++ drivers/pci/controller/pcie-rcar-ep.c | 2 ++ drivers/pci/controller/pcie-rockchip-ep.c | 2 ++ drivers/pci/endpoint/functions/pci-epf-test.c | 18 +++++------------- drivers/pci/endpoint/pci-ep-cfs.c | 9 +++++++++ drivers/pci/endpoint/pci-epc-core.c | 22 ++++++++++++++++++++++ include/linux/pci-epc.h | 7 ++++--- 19 files changed, 65 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 81c50dc64da9..55c42ca2b777 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -746,6 +746,8 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) spin_lock_init(&ep->lock); + pci_epc_init_notify(epc); + return 0; free_epc_mem: diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 395042b29ffc..d2d17d37d3e0 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -474,6 +474,8 @@ static int dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, return ret; } + dw_pcie_ep_init_notify(ep); + return 0; } diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 8d28ecc381bc..917c69edee1d 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -1131,6 +1131,8 @@ static int imx6_add_pcie_ep(struct imx6_pcie *imx6_pcie, return ret; } + dw_pcie_ep_init_notify(ep); + /* Start LTSSM. */ imx6_pcie_ltssm_enable(dev); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 81ebac520650..d3a7d14ee685 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -1293,6 +1293,8 @@ static int ks_pcie_probe(struct platform_device *pdev) goto err_ep_init; } + dw_pcie_ep_init_notify(&pci->ep); + break; default: dev_err(dev, "INVALID device type %d\n", mode); diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index 9eb2233e3d7f..7dde6d5fa4d8 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c @@ -286,6 +286,8 @@ static int __init ls_pcie_ep_probe(struct platform_device *pdev) return ret; } + dw_pcie_ep_init_notify(&pci->ep); + return ls_pcie_ep_interrupt_init(pcie, pdev); } diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index a6095561db4a..a4630b92489b 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -452,6 +452,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev) return ret; } + dw_pcie_ep_init_notify(&pci->ep); + break; default: dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 2063cf2049e5..47391d7d3a73 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -632,6 +632,7 @@ void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep) struct dw_pcie *pci = to_dw_pcie_from_ep(ep); dw_pcie_edma_remove(pci); + ep->epc->init_complete = false; } EXPORT_SYMBOL_GPL(dw_pcie_ep_cleanup); diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index ca9b22e654cd..8490c5d6ff9f 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -154,6 +154,8 @@ static int dw_plat_pcie_probe(struct platform_device *pdev) dw_pcie_ep_deinit(&pci->ep); } + dw_pcie_ep_init_notify(&pci->ep); + break; default: dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); diff --git a/drivers/pci/controller/dwc/pcie-keembay.c b/drivers/pci/controller/dwc/pcie-keembay.c index b2556dbcffb5..98bbc83182b4 100644 --- a/drivers/pci/controller/dwc/pcie-keembay.c +++ b/drivers/pci/controller/dwc/pcie-keembay.c @@ -442,6 +442,8 @@ static int keembay_pcie_probe(struct platform_device *pdev) return ret; } + dw_pcie_ep_init_notify(&pci->ep); + break; default: dev_err(dev, "Invalid device type %d\n", pcie->mode); diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c index 3697b4a944cc..2fb8c15e7a91 100644 --- a/drivers/pci/controller/dwc/pcie-qcom-ep.c +++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c @@ -775,7 +775,6 @@ static void qcom_pcie_ep_init_debugfs(struct qcom_pcie_ep *pcie_ep) static const struct pci_epc_features qcom_pcie_epc_features = { .linkup_notifier = true, - .core_init_notifier = true, .msi_capable = true, .msix_capable = false, .align = SZ_4K, diff --git a/drivers/pci/controller/dwc/pcie-rcar-gen4.c b/drivers/pci/controller/dwc/pcie-rcar-gen4.c index e155a905fb4f..cfeccc2f9ee1 100644 --- a/drivers/pci/controller/dwc/pcie-rcar-gen4.c +++ b/drivers/pci/controller/dwc/pcie-rcar-gen4.c @@ -437,6 +437,8 @@ static int rcar_gen4_add_dw_pcie_ep(struct rcar_gen4_pcie *rcar) rcar_gen4_pcie_ep_deinit(rcar); } + dw_pcie_ep_init_notify(ep); + return ret; } diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index db043f579fbe..ddc23602eca7 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -2006,7 +2006,6 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, static const struct pci_epc_features tegra_pcie_epc_features = { .linkup_notifier = true, - .core_init_notifier = true, .msi_capable = false, .msix_capable = false, .bar[BAR_0] = { .type = BAR_FIXED, .fixed_size = SZ_1M, diff --git a/drivers/pci/controller/dwc/pcie-uniphier-ep.c b/drivers/pci/controller/dwc/pcie-uniphier-ep.c index 0e5e7344de48..a2b844268e28 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier-ep.c +++ b/drivers/pci/controller/dwc/pcie-uniphier-ep.c @@ -410,6 +410,8 @@ static int uniphier_pcie_ep_probe(struct platform_device *pdev) return ret; } + dw_pcie_ep_init_notify(&priv->pci.ep); + return 0; } diff --git a/drivers/pci/controller/pcie-rcar-ep.c b/drivers/pci/controller/pcie-rcar-ep.c index 05967c6c0b42..047e2cef5afc 100644 --- a/drivers/pci/controller/pcie-rcar-ep.c +++ b/drivers/pci/controller/pcie-rcar-ep.c @@ -542,6 +542,8 @@ static int rcar_pcie_ep_probe(struct platform_device *pdev) goto err_pm_put; } + pci_epc_init_notify(epc); + return 0; err_pm_put: diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index c9046e97a1d2..8613df8184df 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -609,6 +609,8 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev) rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE, PCIE_CLIENT_CONFIG); + pci_epc_init_notify(epc); + return 0; err_epc_mem_exit: pci_epc_mem_exit(epc); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index cd4ffb39dcdc..212fc303fb63 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -753,6 +753,7 @@ static int pci_epf_test_core_init(struct pci_epf *epf) const struct pci_epc_features *epc_features; struct pci_epc *epc = epf->epc; struct device *dev = &epf->dev; + bool linkup_notifier = false; bool msix_capable = false; bool msi_capable = true; int ret; @@ -795,6 +796,10 @@ static int pci_epf_test_core_init(struct pci_epf *epf) } } + linkup_notifier = epc_features->linkup_notifier; + if (!linkup_notifier) + queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); + return 0; } @@ -890,8 +895,6 @@ static int pci_epf_test_bind(struct pci_epf *epf) const struct pci_epc_features *epc_features; enum pci_barno test_reg_bar = BAR_0; struct pci_epc *epc = epf->epc; - bool linkup_notifier = false; - bool core_init_notifier = false; if (WARN_ON_ONCE(!epc)) return -EINVAL; @@ -902,8 +905,6 @@ static int pci_epf_test_bind(struct pci_epf *epf) return -EOPNOTSUPP; } - linkup_notifier = epc_features->linkup_notifier; - core_init_notifier = epc_features->core_init_notifier; test_reg_bar = pci_epc_get_first_free_bar(epc_features); if (test_reg_bar < 0) return -EINVAL; @@ -916,21 +917,12 @@ static int pci_epf_test_bind(struct pci_epf *epf) if (ret) return ret; - if (!core_init_notifier) { - ret = pci_epf_test_core_init(epf); - if (ret) - return ret; - } - epf_test->dma_supported = true; ret = pci_epf_test_init_dma_chan(epf_test); if (ret) epf_test->dma_supported = false; - if (!linkup_notifier && !core_init_notifier) - queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); - return 0; } diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index 0ea64e24ed61..3b21e28f9b59 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -64,6 +64,9 @@ static int pci_secondary_epc_epf_link(struct config_item *epf_item, return ret; } + /* Send any pending EPC initialization complete to the EPF driver */ + pci_epc_notify_pending_init(epc, epf); + return 0; } @@ -125,6 +128,9 @@ static int pci_primary_epc_epf_link(struct config_item *epf_item, return ret; } + /* Send any pending EPC initialization complete to the EPF driver */ + pci_epc_notify_pending_init(epc, epf); + return 0; } @@ -230,6 +236,9 @@ static int pci_epc_epf_link(struct config_item *epc_item, return ret; } + /* Send any pending EPC initialization complete to the EPF driver */ + pci_epc_notify_pending_init(epc, epf); + return 0; } diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index da3fc0795b0b..47d27ec7439d 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -748,10 +748,32 @@ void pci_epc_init_notify(struct pci_epc *epc) epf->event_ops->core_init(epf); mutex_unlock(&epf->lock); } + epc->init_complete = true; mutex_unlock(&epc->list_lock); } EXPORT_SYMBOL_GPL(pci_epc_init_notify); +/** + * pci_epc_notify_pending_init() - Notify the pending EPC device initialization + * complete to the EPF device + * @epc: the EPC device whose core initialization is pending to be notified + * @epf: the EPF device to be notified + * + * Invoke to notify the pending EPC device initialization complete to the EPF + * device. This is used to deliver the notification if the EPC initialization + * got completed before the EPF driver bind. + */ +void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf) +{ + if (epc->init_complete) { + mutex_lock(&epf->lock); + if (epf->event_ops && epf->event_ops->core_init) + epf->event_ops->core_init(epf); + mutex_unlock(&epf->lock); + } +} +EXPORT_SYMBOL_GPL(pci_epc_notify_pending_init); + /** * pci_epc_bme_notify() - Notify the EPF device that the EPC device has received * the BME event from the Root complex diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index cc2f70d061c8..acc5f96161fe 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -128,6 +128,8 @@ struct pci_epc_mem { * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops * @function_num_map: bitmap to manage physical function number + * @init_complete: flag to indicate whether the EPC initialization is complete + * or not */ struct pci_epc { struct device dev; @@ -143,6 +145,7 @@ struct pci_epc { /* mutex to protect against concurrent access of EP controller */ struct mutex lock; unsigned long function_num_map; + bool init_complete; }; /** @@ -179,8 +182,6 @@ struct pci_epc_bar_desc { /** * struct pci_epc_features - features supported by a EPC device per function * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up - * @core_init_notifier: indicate cores that can notify about their availability - * for initialization * @msi_capable: indicate if the endpoint function has MSI capability * @msix_capable: indicate if the endpoint function has MSI-X capability * @bar: array specifying the hardware description for each BAR @@ -188,7 +189,6 @@ struct pci_epc_bar_desc { */ struct pci_epc_features { unsigned int linkup_notifier : 1; - unsigned int core_init_notifier : 1; unsigned int msi_capable : 1; unsigned int msix_capable : 1; struct pci_epc_bar_desc bar[PCI_STD_NUM_BARS]; @@ -225,6 +225,7 @@ int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, void pci_epc_linkup(struct pci_epc *epc); void pci_epc_linkdown(struct pci_epc *epc); void pci_epc_init_notify(struct pci_epc *epc); +void pci_epc_notify_pending_init(struct pci_epc *epc, struct pci_epf *epf); void pci_epc_bme_notify(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type); -- cgit From 699c23f02c65cbfc3e638f14ce0d70c23a2e1f02 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 9 Apr 2024 21:35:27 -0700 Subject: bpf: Add bpf_link support for sk_msg and sk_skb progs Add bpf_link support for sk_msg and sk_skb programs. We have an internal request to support bpf_link for sk_msg programs so user space can have a uniform handling with bpf_link based libbpf APIs. Using bpf_link based libbpf API also has a benefit which makes system robust by decoupling prog life cycle and attachment life cycle. Reviewed-by: John Fastabend Signed-off-by: Yonghong Song Link: https://lore.kernel.org/r/20240410043527.3737160-1-yonghong.song@linux.dev Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 6 + include/linux/skmsg.h | 4 + include/uapi/linux/bpf.h | 5 + kernel/bpf/syscall.c | 4 + net/core/sock_map.c | 263 ++++++++++++++++++++++++++++++++++++++--- tools/include/uapi/linux/bpf.h | 5 + 6 files changed, 271 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 62762390c93d..5034c1b4ded7 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2996,6 +2996,7 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); int sock_map_bpf_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog); void sock_map_unhash(struct sock *sk); void sock_map_destroy(struct sock *sk); @@ -3094,6 +3095,11 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, { return -EINVAL; } + +static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_SYSCALL */ #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index e65ec3fd2799..9c8dd4c01412 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -58,6 +58,10 @@ struct sk_psock_progs { struct bpf_prog *stream_parser; struct bpf_prog *stream_verdict; struct bpf_prog *skb_verdict; + struct bpf_link *msg_parser_link; + struct bpf_link *stream_parser_link; + struct bpf_link *stream_verdict_link; + struct bpf_link *skb_verdict_link; }; enum sk_psock_state_bits { diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 6fe9f11c8abe..cee0a7915c08 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1135,6 +1135,7 @@ enum bpf_link_type { BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, __MAX_BPF_LINK_TYPE, }; @@ -6724,6 +6725,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; }; } __attribute__((aligned(8))); diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index e44c276e8617..7d392ec83655 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -5213,6 +5213,10 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) case BPF_PROG_TYPE_SK_LOOKUP: ret = netns_bpf_link_create(attr, prog); break; + case BPF_PROG_TYPE_SK_MSG: + case BPF_PROG_TYPE_SK_SKB: + ret = sock_map_link_create(attr, prog); + break; #ifdef CONFIG_NET case BPF_PROG_TYPE_XDP: ret = bpf_xdp_link_attach(attr, prog); diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 27d733c0f65e..63c016b4c169 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -24,8 +24,16 @@ struct bpf_stab { #define SOCK_CREATE_FLAG_MASK \ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) +/* This mutex is used to + * - protect race between prog/link attach/detach and link prog update, and + * - protect race between releasing and accessing map in bpf_link. + * A single global mutex lock is used since it is expected contention is low. + */ +static DEFINE_MUTEX(sockmap_mutex); + static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - struct bpf_prog *old, u32 which); + struct bpf_prog *old, struct bpf_link *link, + u32 which); static struct sk_psock_progs *sock_map_progs(struct bpf_map *map); static struct bpf_map *sock_map_alloc(union bpf_attr *attr) @@ -71,7 +79,9 @@ int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog) map = __bpf_map_get(f); if (IS_ERR(map)) return PTR_ERR(map); - ret = sock_map_prog_update(map, prog, NULL, attr->attach_type); + mutex_lock(&sockmap_mutex); + ret = sock_map_prog_update(map, prog, NULL, NULL, attr->attach_type); + mutex_unlock(&sockmap_mutex); fdput(f); return ret; } @@ -103,7 +113,9 @@ int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) goto put_prog; } - ret = sock_map_prog_update(map, NULL, prog, attr->attach_type); + mutex_lock(&sockmap_mutex); + ret = sock_map_prog_update(map, NULL, prog, NULL, attr->attach_type); + mutex_unlock(&sockmap_mutex); put_prog: bpf_prog_put(prog); put_map: @@ -1454,55 +1466,84 @@ static struct sk_psock_progs *sock_map_progs(struct bpf_map *map) return NULL; } -static int sock_map_prog_lookup(struct bpf_map *map, struct bpf_prog ***pprog, - u32 which) +static int sock_map_prog_link_lookup(struct bpf_map *map, struct bpf_prog ***pprog, + struct bpf_link ***plink, u32 which) { struct sk_psock_progs *progs = sock_map_progs(map); + struct bpf_prog **cur_pprog; + struct bpf_link **cur_plink; if (!progs) return -EOPNOTSUPP; switch (which) { case BPF_SK_MSG_VERDICT: - *pprog = &progs->msg_parser; + cur_pprog = &progs->msg_parser; + cur_plink = &progs->msg_parser_link; break; #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) case BPF_SK_SKB_STREAM_PARSER: - *pprog = &progs->stream_parser; + cur_pprog = &progs->stream_parser; + cur_plink = &progs->stream_parser_link; break; #endif case BPF_SK_SKB_STREAM_VERDICT: if (progs->skb_verdict) return -EBUSY; - *pprog = &progs->stream_verdict; + cur_pprog = &progs->stream_verdict; + cur_plink = &progs->stream_verdict_link; break; case BPF_SK_SKB_VERDICT: if (progs->stream_verdict) return -EBUSY; - *pprog = &progs->skb_verdict; + cur_pprog = &progs->skb_verdict; + cur_plink = &progs->skb_verdict_link; break; default: return -EOPNOTSUPP; } + *pprog = cur_pprog; + if (plink) + *plink = cur_plink; return 0; } +/* Handle the following four cases: + * prog_attach: prog != NULL, old == NULL, link == NULL + * prog_detach: prog == NULL, old != NULL, link == NULL + * link_attach: prog != NULL, old == NULL, link != NULL + * link_detach: prog == NULL, old != NULL, link != NULL + */ static int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, - struct bpf_prog *old, u32 which) + struct bpf_prog *old, struct bpf_link *link, + u32 which) { struct bpf_prog **pprog; + struct bpf_link **plink; int ret; - ret = sock_map_prog_lookup(map, &pprog, which); + ret = sock_map_prog_link_lookup(map, &pprog, &plink, which); if (ret) return ret; - if (old) - return psock_replace_prog(pprog, prog, old); + /* for prog_attach/prog_detach/link_attach, return error if a bpf_link + * exists for that prog. + */ + if ((!link || prog) && *plink) + return -EBUSY; - psock_set_prog(pprog, prog); - return 0; + if (old) { + ret = psock_replace_prog(pprog, prog, old); + if (!ret) + *plink = NULL; + } else { + psock_set_prog(pprog, prog); + if (link) + *plink = link; + } + + return ret; } int sock_map_bpf_prog_query(const union bpf_attr *attr, @@ -1527,7 +1568,7 @@ int sock_map_bpf_prog_query(const union bpf_attr *attr, rcu_read_lock(); - ret = sock_map_prog_lookup(map, &pprog, attr->query.attach_type); + ret = sock_map_prog_link_lookup(map, &pprog, NULL, attr->query.attach_type); if (ret) goto end; @@ -1657,6 +1698,196 @@ void sock_map_close(struct sock *sk, long timeout) } EXPORT_SYMBOL_GPL(sock_map_close); +struct sockmap_link { + struct bpf_link link; + struct bpf_map *map; + enum bpf_attach_type attach_type; +}; + +static void sock_map_link_release(struct bpf_link *link) +{ + struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); + + mutex_lock(&sockmap_mutex); + if (!sockmap_link->map) + goto out; + + WARN_ON_ONCE(sock_map_prog_update(sockmap_link->map, NULL, link->prog, link, + sockmap_link->attach_type)); + + bpf_map_put_with_uref(sockmap_link->map); + sockmap_link->map = NULL; +out: + mutex_unlock(&sockmap_mutex); +} + +static int sock_map_link_detach(struct bpf_link *link) +{ + sock_map_link_release(link); + return 0; +} + +static void sock_map_link_dealloc(struct bpf_link *link) +{ + kfree(link); +} + +/* Handle the following two cases: + * case 1: link != NULL, prog != NULL, old != NULL + * case 2: link != NULL, prog != NULL, old == NULL + */ +static int sock_map_link_update_prog(struct bpf_link *link, + struct bpf_prog *prog, + struct bpf_prog *old) +{ + const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); + struct bpf_prog **pprog, *old_link_prog; + struct bpf_link **plink; + int ret = 0; + + mutex_lock(&sockmap_mutex); + + /* If old prog is not NULL, ensure old prog is the same as link->prog. */ + if (old && link->prog != old) { + ret = -EPERM; + goto out; + } + /* Ensure link->prog has the same type/attach_type as the new prog. */ + if (link->prog->type != prog->type || + link->prog->expected_attach_type != prog->expected_attach_type) { + ret = -EINVAL; + goto out; + } + + ret = sock_map_prog_link_lookup(sockmap_link->map, &pprog, &plink, + sockmap_link->attach_type); + if (ret) + goto out; + + /* return error if the stored bpf_link does not match the incoming bpf_link. */ + if (link != *plink) { + ret = -EBUSY; + goto out; + } + + if (old) { + ret = psock_replace_prog(pprog, prog, old); + if (ret) + goto out; + } else { + psock_set_prog(pprog, prog); + } + + bpf_prog_inc(prog); + old_link_prog = xchg(&link->prog, prog); + bpf_prog_put(old_link_prog); + +out: + mutex_unlock(&sockmap_mutex); + return ret; +} + +static u32 sock_map_link_get_map_id(const struct sockmap_link *sockmap_link) +{ + u32 map_id = 0; + + mutex_lock(&sockmap_mutex); + if (sockmap_link->map) + map_id = sockmap_link->map->id; + mutex_unlock(&sockmap_mutex); + return map_id; +} + +static int sock_map_link_fill_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); + u32 map_id = sock_map_link_get_map_id(sockmap_link); + + info->sockmap.map_id = map_id; + info->sockmap.attach_type = sockmap_link->attach_type; + return 0; +} + +static void sock_map_link_show_fdinfo(const struct bpf_link *link, + struct seq_file *seq) +{ + const struct sockmap_link *sockmap_link = container_of(link, struct sockmap_link, link); + u32 map_id = sock_map_link_get_map_id(sockmap_link); + + seq_printf(seq, "map_id:\t%u\n", map_id); + seq_printf(seq, "attach_type:\t%u\n", sockmap_link->attach_type); +} + +static const struct bpf_link_ops sock_map_link_ops = { + .release = sock_map_link_release, + .dealloc = sock_map_link_dealloc, + .detach = sock_map_link_detach, + .update_prog = sock_map_link_update_prog, + .fill_link_info = sock_map_link_fill_info, + .show_fdinfo = sock_map_link_show_fdinfo, +}; + +int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_link_primer link_primer; + struct sockmap_link *sockmap_link; + enum bpf_attach_type attach_type; + struct bpf_map *map; + int ret; + + if (attr->link_create.flags) + return -EINVAL; + + map = bpf_map_get_with_uref(attr->link_create.target_fd); + if (IS_ERR(map)) + return PTR_ERR(map); + if (map->map_type != BPF_MAP_TYPE_SOCKMAP && map->map_type != BPF_MAP_TYPE_SOCKHASH) { + ret = -EINVAL; + goto out; + } + + sockmap_link = kzalloc(sizeof(*sockmap_link), GFP_USER); + if (!sockmap_link) { + ret = -ENOMEM; + goto out; + } + + attach_type = attr->link_create.attach_type; + bpf_link_init(&sockmap_link->link, BPF_LINK_TYPE_SOCKMAP, &sock_map_link_ops, prog); + sockmap_link->map = map; + sockmap_link->attach_type = attach_type; + + ret = bpf_link_prime(&sockmap_link->link, &link_primer); + if (ret) { + kfree(sockmap_link); + goto out; + } + + mutex_lock(&sockmap_mutex); + ret = sock_map_prog_update(map, prog, NULL, &sockmap_link->link, attach_type); + mutex_unlock(&sockmap_mutex); + if (ret) { + bpf_link_cleanup(&link_primer); + goto out; + } + + /* Increase refcnt for the prog since when old prog is replaced with + * psock_replace_prog() and psock_set_prog() its refcnt will be decreased. + * + * Actually, we do not need to increase refcnt for the prog since bpf_link + * will hold a reference. But in order to have less complexity w.r.t. + * replacing/setting prog, let us increase the refcnt to make things simpler. + */ + bpf_prog_inc(prog); + + return bpf_link_settle(&link_primer); + +out: + bpf_map_put_with_uref(map); + return ret; +} + static int sock_map_iter_attach_target(struct bpf_prog *prog, union bpf_iter_link_info *linfo, struct bpf_iter_aux_info *aux) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 6fe9f11c8abe..cee0a7915c08 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1135,6 +1135,7 @@ enum bpf_link_type { BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, BPF_LINK_TYPE_NETKIT = 13, + BPF_LINK_TYPE_SOCKMAP = 14, __MAX_BPF_LINK_TYPE, }; @@ -6724,6 +6725,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } netkit; + struct { + __u32 map_id; + __u32 attach_type; + } sockmap; }; } __attribute__((aligned(8))); -- cgit From 8dfd00f7069c54db78463f2a8a8cb677844fdf1f Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Mon, 8 Apr 2024 06:38:22 +0000 Subject: soundwire: reconcile dp0_prop and dpn_prop The definitions for DP0 are missing a set of fields that are required to reuse the same configuration code as DPn. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20240408063822.421963-1-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul --- include/linux/soundwire/sdw.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index e5d0aa58e7f6..7bb9119f3069 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -235,6 +235,7 @@ enum sdw_clk_stop_mode { * @BRA_flow_controlled: Slave implementation results in an OK_NotReady * response * @simple_ch_prep_sm: If channel prepare sequence is required + * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts * @@ -249,6 +250,7 @@ struct sdw_dp0_prop { u32 *words; bool BRA_flow_controlled; bool simple_ch_prep_sm; + u32 ch_prep_timeout; bool imp_def_interrupts; }; -- cgit From 63752ad191f93144c99e848b7dbda050bda16fa6 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 5 Apr 2024 21:56:46 +0300 Subject: genirq: Fix trivial typo in the comment CPY ==> COPY IRQ_SET_MASK_NOCOPY is defined with 'O' letter. Fix the comment. Signed-off-by: Andy Shevchenko Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240405185726.3931703-3-andriy.shevchenko@linux.intel.com --- include/linux/irq.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 97baa937ab5b..a217e1029c1d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -115,7 +115,7 @@ enum { * Return value for chip->irq_set_affinity() * * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity - * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity + * IRQ_SET_MASK_NOCOPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping * all descendant irqchips. -- cgit From 693f75b91a9171e99f84fc193e39f48e21ba4a4f Mon Sep 17 00:00:00 2001 From: Sreenath Vijayan Date: Wed, 13 Mar 2024 15:50:52 +0530 Subject: printk: Add function to replay kernel log on consoles Add a generic function console_replay_all() for replaying the kernel log on consoles, in any context. It would allow viewing the logs on an unresponsive terminal via sysrq. Reuse the existing code from console_flush_on_panic() for resetting the sequence numbers, by introducing a new helper function __console_rewind_all(). It is safe to be called under console_lock(). Try to acquire lock on the console subsystem without waiting. If successful, reset the sequence number to oldest available record on all consoles and call console_unlock() which will automatically flush the messages to the consoles. Suggested-by: John Ogness Suggested-by: Petr Mladek Signed-off-by: Shimoyashiki Taichi Reviewed-by: John Ogness Signed-off-by: Sreenath Vijayan Link: https://lore.kernel.org/r/90ee131c643a5033d117b556c0792de65129d4c3.1710220326.git.sreenath.vijayan@sony.com Signed-off-by: Greg Kroah-Hartman --- include/linux/printk.h | 4 +++ kernel/printk/printk.c | 77 ++++++++++++++++++++++++++++++++++---------------- 2 files changed, 57 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/printk.h b/include/linux/printk.h index 5e038e782256..1c5936ab991f 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -195,6 +195,7 @@ void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; void printk_trigger_flush(void); +void console_replay_all(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -274,6 +275,9 @@ static inline void dump_stack(void) static inline void printk_trigger_flush(void) { } +static inline void console_replay_all(void) +{ +} #endif bool this_cpu_in_panic(void); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 1e3b21682547..6206804d275b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -3161,6 +3161,40 @@ void console_unblank(void) pr_flush(1000, true); } +/* + * Rewind all consoles to the oldest available record. + * + * IMPORTANT: The function is safe only when called under + * console_lock(). It is not enforced because + * it is used as a best effort in panic(). + */ +static void __console_rewind_all(void) +{ + struct console *c; + short flags; + int cookie; + u64 seq; + + seq = prb_first_valid_seq(prb); + + cookie = console_srcu_read_lock(); + for_each_console_srcu(c) { + flags = console_srcu_read_flags(c); + + if (flags & CON_NBCON) { + nbcon_seq_force(c, seq); + } else { + /* + * This assignment is safe only when called under + * console_lock(). On panic, legacy consoles are + * only best effort. + */ + c->seq = seq; + } + } + console_srcu_read_unlock(cookie); +} + /** * console_flush_on_panic - flush console content on panic * @mode: flush all messages in buffer or just the pending ones @@ -3189,30 +3223,8 @@ void console_flush_on_panic(enum con_flush_mode mode) */ console_may_schedule = 0; - if (mode == CONSOLE_REPLAY_ALL) { - struct console *c; - short flags; - int cookie; - u64 seq; - - seq = prb_first_valid_seq(prb); - - cookie = console_srcu_read_lock(); - for_each_console_srcu(c) { - flags = console_srcu_read_flags(c); - - if (flags & CON_NBCON) { - nbcon_seq_force(c, seq); - } else { - /* - * This is an unsynchronized assignment. On - * panic legacy consoles are only best effort. - */ - c->seq = seq; - } - } - console_srcu_read_unlock(cookie); - } + if (mode == CONSOLE_REPLAY_ALL) + __console_rewind_all(); console_flush_all(false, &next_seq, &handover); } @@ -4301,6 +4313,23 @@ void kmsg_dump_rewind(struct kmsg_dump_iter *iter) } EXPORT_SYMBOL_GPL(kmsg_dump_rewind); +/** + * console_replay_all - replay kernel log on consoles + * + * Try to obtain lock on console subsystem and replay all + * available records in printk buffer on the consoles. + * Does nothing if lock is not obtained. + * + * Context: Any context. + */ +void console_replay_all(void) +{ + if (console_trylock()) { + __console_rewind_all(); + /* Consoles are flushed as part of console_unlock(). */ + console_unlock(); + } +} #endif #ifdef CONFIG_SMP -- cgit From 1019fa4839c97c4efff9c26af4d74a184921d8da Mon Sep 17 00:00:00 2001 From: Andrew Davis Date: Wed, 10 Apr 2024 09:48:03 -0500 Subject: uio: pruss: Remove this driver This UIO driver was used to control the PRU processors found on various TI SoCs. It was created before the Remoteproc framework, but now with that we have a standard way to program and manage the PRU processors. The proper PRU Remoteproc driver should be used instead of this driver. This driver only supported the original class of PRUSS (OMAP-L1xx / AM17xx / AM18xx / TMS320C674x / DA8xx) but when these platforms were switched to use Device Tree the support for DT was not added to this driver and so it is now unused/unusable. Support for these platforms can be added to the proper PRU Remoteproc driver if ever needed. Remove this driver. Signed-off-by: Andrew Davis Link: https://lore.kernel.org/r/20240410144803.126831-1-afd@ti.com Signed-off-by: Greg Kroah-Hartman --- drivers/uio/Kconfig | 18 --- drivers/uio/Makefile | 1 - drivers/uio/uio_pruss.c | 255 -------------------------------- include/linux/platform_data/uio_pruss.h | 18 --- 4 files changed, 292 deletions(-) delete mode 100644 drivers/uio/uio_pruss.c delete mode 100644 include/linux/platform_data/uio_pruss.h (limited to 'include/linux') diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig index 2e16c5338e5b..b060dcd7c635 100644 --- a/drivers/uio/Kconfig +++ b/drivers/uio/Kconfig @@ -125,24 +125,6 @@ config UIO_FSL_ELBC_GPCM_NETX5152 Information about this hardware can be found at: http://www.hilscher.com/netx -config UIO_PRUSS - tristate "Texas Instruments PRUSS driver" - select GENERIC_ALLOCATOR - depends on HAS_IOMEM && HAS_DMA - help - PRUSS driver for OMAPL138/DA850/AM18XX devices - PRUSS driver requires user space components, examples and user space - driver is available from below SVN repo - you may use anonymous login - - https://gforge.ti.com/gf/project/pru_sw/ - - More info on API is available at below wiki - - http://processors.wiki.ti.com/index.php/PRU_Linux_Application_Loader - - To compile this driver as a module, choose M here: the module - will be called uio_pruss. - config UIO_MF624 tristate "Humusoft MF624 DAQ PCI card driver" depends on PCI diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile index f2f416a14228..1c5f3b5a95cf 100644 --- a/drivers/uio/Makefile +++ b/drivers/uio/Makefile @@ -7,7 +7,6 @@ obj-$(CONFIG_UIO_AEC) += uio_aec.o obj-$(CONFIG_UIO_SERCOS3) += uio_sercos3.o obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o obj-$(CONFIG_UIO_NETX) += uio_netx.o -obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o obj-$(CONFIG_UIO_MF624) += uio_mf624.o obj-$(CONFIG_UIO_FSL_ELBC_GPCM) += uio_fsl_elbc_gpcm.o obj-$(CONFIG_UIO_HV_GENERIC) += uio_hv_generic.o diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c deleted file mode 100644 index f67881cba645..000000000000 --- a/drivers/uio/uio_pruss.c +++ /dev/null @@ -1,255 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Programmable Real-Time Unit Sub System (PRUSS) UIO driver (uio_pruss) - * - * This driver exports PRUSS host event out interrupts and PRUSS, L3 RAM, - * and DDR RAM to user space for applications interacting with PRUSS firmware - * - * Copyright (C) 2010-11 Texas Instruments Incorporated - http://www.ti.com/ - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DRV_NAME "pruss_uio" -#define DRV_VERSION "1.0" - -static int sram_pool_sz = SZ_16K; -module_param(sram_pool_sz, int, 0); -MODULE_PARM_DESC(sram_pool_sz, "sram pool size to allocate "); - -static int extram_pool_sz = SZ_256K; -module_param(extram_pool_sz, int, 0); -MODULE_PARM_DESC(extram_pool_sz, "external ram pool size to allocate"); - -/* - * Host event IRQ numbers from PRUSS - PRUSS can generate up to 8 interrupt - * events to AINTC of ARM host processor - which can be used for IPC b/w PRUSS - * firmware and user space application, async notification from PRU firmware - * to user space application - * 3 PRU_EVTOUT0 - * 4 PRU_EVTOUT1 - * 5 PRU_EVTOUT2 - * 6 PRU_EVTOUT3 - * 7 PRU_EVTOUT4 - * 8 PRU_EVTOUT5 - * 9 PRU_EVTOUT6 - * 10 PRU_EVTOUT7 -*/ -#define MAX_PRUSS_EVT 8 - -#define PINTC_HIDISR 0x0038 -#define PINTC_HIPIR 0x0900 -#define HIPIR_NOPEND 0x80000000 -#define PINTC_HIER 0x1500 - -struct uio_pruss_dev { - struct uio_info *info; - struct clk *pruss_clk; - dma_addr_t sram_paddr; - dma_addr_t ddr_paddr; - void __iomem *prussio_vaddr; - unsigned long sram_vaddr; - void *ddr_vaddr; - unsigned int hostirq_start; - unsigned int pintc_base; - struct gen_pool *sram_pool; -}; - -static irqreturn_t pruss_handler(int irq, struct uio_info *info) -{ - struct uio_pruss_dev *gdev = info->priv; - int intr_bit = (irq - gdev->hostirq_start + 2); - int val, intr_mask = (1 << intr_bit); - void __iomem *base = gdev->prussio_vaddr + gdev->pintc_base; - void __iomem *intren_reg = base + PINTC_HIER; - void __iomem *intrdis_reg = base + PINTC_HIDISR; - void __iomem *intrstat_reg = base + PINTC_HIPIR + (intr_bit << 2); - - val = ioread32(intren_reg); - /* Is interrupt enabled and active ? */ - if (!(val & intr_mask) && (ioread32(intrstat_reg) & HIPIR_NOPEND)) - return IRQ_NONE; - /* Disable interrupt */ - iowrite32(intr_bit, intrdis_reg); - return IRQ_HANDLED; -} - -static void pruss_cleanup(struct device *dev, struct uio_pruss_dev *gdev) -{ - int cnt; - struct uio_info *p = gdev->info; - - for (cnt = 0; cnt < MAX_PRUSS_EVT; cnt++, p++) { - uio_unregister_device(p); - } - iounmap(gdev->prussio_vaddr); - if (gdev->ddr_vaddr) { - dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr, - gdev->ddr_paddr); - } - if (gdev->sram_vaddr) - gen_pool_free(gdev->sram_pool, - gdev->sram_vaddr, - sram_pool_sz); - clk_disable(gdev->pruss_clk); -} - -static int pruss_probe(struct platform_device *pdev) -{ - struct uio_info *p; - struct uio_pruss_dev *gdev; - struct resource *regs_prussio; - struct device *dev = &pdev->dev; - int ret, cnt, i, len; - struct uio_pruss_pdata *pdata = dev_get_platdata(dev); - - gdev = devm_kzalloc(dev, sizeof(struct uio_pruss_dev), GFP_KERNEL); - if (!gdev) - return -ENOMEM; - - gdev->info = devm_kcalloc(dev, MAX_PRUSS_EVT, sizeof(*p), GFP_KERNEL); - if (!gdev->info) - return -ENOMEM; - - /* Power on PRU in case its not done as part of boot-loader */ - gdev->pruss_clk = devm_clk_get(dev, "pruss"); - if (IS_ERR(gdev->pruss_clk)) { - dev_err(dev, "Failed to get clock\n"); - return PTR_ERR(gdev->pruss_clk); - } - - ret = clk_enable(gdev->pruss_clk); - if (ret) { - dev_err(dev, "Failed to enable clock\n"); - return ret; - } - - regs_prussio = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs_prussio) { - dev_err(dev, "No PRUSS I/O resource specified\n"); - ret = -EIO; - goto err_clk_disable; - } - - if (!regs_prussio->start) { - dev_err(dev, "Invalid memory resource\n"); - ret = -EIO; - goto err_clk_disable; - } - - if (pdata->sram_pool) { - gdev->sram_pool = pdata->sram_pool; - gdev->sram_vaddr = - (unsigned long)gen_pool_dma_alloc(gdev->sram_pool, - sram_pool_sz, &gdev->sram_paddr); - if (!gdev->sram_vaddr) { - dev_err(dev, "Could not allocate SRAM pool\n"); - ret = -ENOMEM; - goto err_clk_disable; - } - } - - gdev->ddr_vaddr = dma_alloc_coherent(dev, extram_pool_sz, - &(gdev->ddr_paddr), GFP_KERNEL | GFP_DMA); - if (!gdev->ddr_vaddr) { - dev_err(dev, "Could not allocate external memory\n"); - ret = -ENOMEM; - goto err_free_sram; - } - - len = resource_size(regs_prussio); - gdev->prussio_vaddr = ioremap(regs_prussio->start, len); - if (!gdev->prussio_vaddr) { - dev_err(dev, "Can't remap PRUSS I/O address range\n"); - ret = -ENOMEM; - goto err_free_ddr_vaddr; - } - - ret = platform_get_irq(pdev, 0); - if (ret < 0) - goto err_unmap; - - gdev->hostirq_start = ret; - gdev->pintc_base = pdata->pintc_base; - - for (cnt = 0, p = gdev->info; cnt < MAX_PRUSS_EVT; cnt++, p++) { - p->mem[0].addr = regs_prussio->start; - p->mem[0].size = resource_size(regs_prussio); - p->mem[0].memtype = UIO_MEM_PHYS; - - p->mem[1].addr = gdev->sram_paddr; - p->mem[1].size = sram_pool_sz; - p->mem[1].memtype = UIO_MEM_PHYS; - - p->mem[2].addr = (uintptr_t) gdev->ddr_vaddr; - p->mem[2].dma_addr = gdev->ddr_paddr; - p->mem[2].size = extram_pool_sz; - p->mem[2].memtype = UIO_MEM_DMA_COHERENT; - p->mem[2].dma_device = dev; - - p->name = devm_kasprintf(dev, GFP_KERNEL, "pruss_evt%d", cnt); - p->version = DRV_VERSION; - - /* Register PRUSS IRQ lines */ - p->irq = gdev->hostirq_start + cnt; - p->handler = pruss_handler; - p->priv = gdev; - - ret = uio_register_device(dev, p); - if (ret < 0) - goto err_unloop; - } - - platform_set_drvdata(pdev, gdev); - return 0; - -err_unloop: - for (i = 0, p = gdev->info; i < cnt; i++, p++) { - uio_unregister_device(p); - } -err_unmap: - iounmap(gdev->prussio_vaddr); -err_free_ddr_vaddr: - dma_free_coherent(dev, extram_pool_sz, gdev->ddr_vaddr, - gdev->ddr_paddr); -err_free_sram: - if (pdata->sram_pool) - gen_pool_free(gdev->sram_pool, gdev->sram_vaddr, sram_pool_sz); -err_clk_disable: - clk_disable(gdev->pruss_clk); - - return ret; -} - -static int pruss_remove(struct platform_device *dev) -{ - struct uio_pruss_dev *gdev = platform_get_drvdata(dev); - - pruss_cleanup(&dev->dev, gdev); - return 0; -} - -static struct platform_driver pruss_driver = { - .probe = pruss_probe, - .remove = pruss_remove, - .driver = { - .name = DRV_NAME, - }, -}; - -module_platform_driver(pruss_driver); - -MODULE_LICENSE("GPL v2"); -MODULE_VERSION(DRV_VERSION); -MODULE_AUTHOR("Amit Chatterjee "); -MODULE_AUTHOR("Pratheesh Gangadhar "); diff --git a/include/linux/platform_data/uio_pruss.h b/include/linux/platform_data/uio_pruss.h deleted file mode 100644 index f76fa393b802..000000000000 --- a/include/linux/platform_data/uio_pruss.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * include/linux/platform_data/uio_pruss.h - * - * Platform data for uio_pruss driver - * - * Copyright (C) 2010-11 Texas Instruments Incorporated - https://www.ti.com/ - */ - -#ifndef _UIO_PRUSS_H_ -#define _UIO_PRUSS_H_ - -/* To configure the PRUSS INTC base offset for UIO driver */ -struct uio_pruss_pdata { - u32 pintc_base; - struct gen_pool *sram_pool; -}; -#endif /* _UIO_PRUSS_H_ */ -- cgit From e8c4bd6c6e6b7e7b416c42806981c2a81370001e Mon Sep 17 00:00:00 2001 From: Saurabh Sengar Date: Sat, 30 Mar 2024 01:51:57 -0700 Subject: Drivers: hv: vmbus: Add utility function for querying ring size Add a function to query for the preferred ring buffer size of VMBus device. This will allow the drivers (eg. UIO) to allocate the most optimized ring buffer size for devices. Signed-off-by: Saurabh Sengar Reviewed-by: Long Li Link: https://lore.kernel.org/r/1711788723-8593-2-git-send-email-ssengar@linux.microsoft.com Signed-off-by: Greg Kroah-Hartman --- drivers/hv/channel_mgmt.c | 15 ++++++++++++--- drivers/hv/hyperv_vmbus.h | 5 +++++ include/linux/hyperv.h | 2 ++ 3 files changed, 19 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 2f4d09ce027a..3c6011a48dab 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -120,7 +120,9 @@ const struct vmbus_device vmbus_devs[] = { }, /* File copy */ - { .dev_type = HV_FCOPY, + /* fcopy always uses 16KB ring buffer size and is working well for last many years */ + { .pref_ring_size = 0x4000, + .dev_type = HV_FCOPY, HV_FCOPY_GUID, .perf_device = false, .allowed_in_isolated = false, @@ -140,12 +142,19 @@ const struct vmbus_device vmbus_devs[] = { .allowed_in_isolated = false, }, - /* Unknown GUID */ - { .dev_type = HV_UNKNOWN, + /* + * Unknown GUID + * 64 KB ring buffer + 4 KB header should be sufficient size for any Hyper-V device apart + * from HV_NIC and HV_SCSI. This case avoid the fallback for unknown devices to allocate + * much bigger (2 MB) of ring size. + */ + { .pref_ring_size = 0x11000, + .dev_type = HV_UNKNOWN, .perf_device = false, .allowed_in_isolated = false, }, }; +EXPORT_SYMBOL_GPL(vmbus_devs); static const struct { guid_t guid; diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index f6b1e710f805..76ac5185a01a 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -417,6 +417,11 @@ static inline bool hv_is_perf_channel(struct vmbus_channel *channel) return vmbus_devs[channel->device_id].perf_device; } +static inline size_t hv_dev_ring_size(struct vmbus_channel *channel) +{ + return vmbus_devs[channel->device_id].pref_ring_size; +} + static inline bool hv_is_allocated_cpu(unsigned int cpu) { struct vmbus_channel *channel, *sc; diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6ef0557b4bff..7de9f90d3f95 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -820,6 +820,8 @@ struct vmbus_requestor { #define VMBUS_RQST_RESET (U64_MAX - 3) struct vmbus_device { + /* preferred ring buffer size in KB, 0 means no preferred size for this device */ + size_t pref_ring_size; u16 dev_type; guid_t guid; bool perf_device; -- cgit From a28380f119a918135c6b7155fb4eb95eaabb62dc Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Tue, 9 Apr 2024 13:02:05 -0700 Subject: devcoredump: Add dev_coredump_put() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It is useful for modules that do not want to keep coredump available after its unload. Otherwise, the coredump would only be removed after DEVCD_TIMEOUT seconds. v2: - dev_coredump_put() documentation updated (Mukesh) Cc: Rodrigo Vivi Cc: Mukesh Ojha Cc: Johannes Berg Cc: Jonathan Cavitt Reviewed-by: Johannes Berg Acked-by: Jonathan Cavitt Signed-off-by: José Roberto de Souza Acked-by: Greg Kroah-Hartman Link: https://patchwork.freedesktop.org/patch/msgid/20240409200206.108452-1-jose.souza@intel.com Signed-off-by: Rodrigo Vivi --- drivers/base/devcoredump.c | 23 +++++++++++++++++++++++ include/linux/devcoredump.h | 5 +++++ 2 files changed, 28 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c index 7e2d1f0d903a..82aeb09b3d1b 100644 --- a/drivers/base/devcoredump.c +++ b/drivers/base/devcoredump.c @@ -304,6 +304,29 @@ static ssize_t devcd_read_from_sgtable(char *buffer, loff_t offset, offset); } +/** + * dev_coredump_put - remove device coredump + * @dev: the struct device for the crashed device + * + * dev_coredump_put() removes coredump, if exists, for a given device from + * the file system and free its associated data otherwise, does nothing. + * + * It is useful for modules that do not want to keep coredump + * available after its unload. + */ +void dev_coredump_put(struct device *dev) +{ + struct device *existing; + + existing = class_find_device(&devcd_class, NULL, dev, + devcd_match_failing); + if (existing) { + devcd_free(existing, NULL); + put_device(existing); + } +} +EXPORT_SYMBOL_GPL(dev_coredump_put); + /** * dev_coredumpm - create device coredump with read/free methods * @dev: the struct device for the crashed device diff --git a/include/linux/devcoredump.h b/include/linux/devcoredump.h index c008169ed2c6..c8f7eb6cc191 100644 --- a/include/linux/devcoredump.h +++ b/include/linux/devcoredump.h @@ -63,6 +63,8 @@ void dev_coredumpm(struct device *dev, struct module *owner, void dev_coredumpsg(struct device *dev, struct scatterlist *table, size_t datalen, gfp_t gfp); + +void dev_coredump_put(struct device *dev); #else static inline void dev_coredumpv(struct device *dev, void *data, size_t datalen, gfp_t gfp) @@ -85,6 +87,9 @@ static inline void dev_coredumpsg(struct device *dev, struct scatterlist *table, { _devcd_free_sgtable(table); } +static inline void dev_coredump_put(struct device *dev) +{ +} #endif /* CONFIG_DEV_COREDUMP */ #endif /* __DEVCOREDUMP_H */ -- cgit From d48c03198a92edf41e89477dab4f602df15165ee Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sat, 6 Apr 2024 15:52:01 +0200 Subject: sysfs: Add sysfs_bin_attr_simple_read() helper When drivers expose a bin_attribute in sysfs which is backed by a buffer in memory, a common pattern is to set the @private and @size members in struct bin_attribute to the buffer's location and size. The ->read() callback then merely consists of a single memcpy() call. It's not even necessary to perform bounds checks as these are already handled by sysfs_kf_bin_read(). However each driver is so far providing its own ->read() implementation. The pattern is sufficiently frequent to merit a public helper, so add sysfs_bin_attr_simple_read() as well as BIN_ATTR_SIMPLE_RO() and BIN_ATTR_SIMPLE_ADMIN_RO() macros to ease declaration of such bin_attributes and reduce LoC and .text section size. Signed-off-by: Lukas Wunner Reviewed-by: Greg Kroah-Hartman Acked-by: Rafael J. Wysocki Acked-by: Ard Biesheuvel Link: https://lore.kernel.org/r/5ed62b197a442ec6db53d8746d9d806dd0576e2d.1712410202.git.lukas@wunner.de Signed-off-by: Greg Kroah-Hartman --- fs/sysfs/file.c | 27 +++++++++++++++++++++++++++ include/linux/sysfs.h | 15 +++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'include/linux') diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c index 6b7652fb8050..289b57d32c89 100644 --- a/fs/sysfs/file.c +++ b/fs/sysfs/file.c @@ -783,3 +783,30 @@ int sysfs_emit_at(char *buf, int at, const char *fmt, ...) return len; } EXPORT_SYMBOL_GPL(sysfs_emit_at); + +/** + * sysfs_bin_attr_simple_read - read callback to simply copy from memory. + * @file: attribute file which is being read. + * @kobj: object to which the attribute belongs. + * @attr: attribute descriptor. + * @buf: destination buffer. + * @off: offset in bytes from which to read. + * @count: maximum number of bytes to read. + * + * Simple ->read() callback for bin_attributes backed by a buffer in memory. + * The @private and @size members in struct bin_attribute must be set to the + * buffer's location and size before the bin_attribute is created in sysfs. + * + * Bounds check for @off and @count is done in sysfs_kf_bin_read(). + * Negative value check for @off is done in vfs_setpos() and default_llseek(). + * + * Returns number of bytes written to @buf. + */ +ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count) +{ + memcpy(buf, attr->private + off, count); + return count; +} +EXPORT_SYMBOL_GPL(sysfs_bin_attr_simple_read); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 326341c62385..a7d725fbf739 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -371,6 +371,17 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size) #define BIN_ATTR_ADMIN_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size) +#define __BIN_ATTR_SIMPLE_RO(_name, _mode) { \ + .attr = { .name = __stringify(_name), .mode = _mode }, \ + .read = sysfs_bin_attr_simple_read, \ +} + +#define BIN_ATTR_SIMPLE_RO(_name) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0444) + +#define BIN_ATTR_SIMPLE_ADMIN_RO(_name) \ +struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0400) + struct sysfs_ops { ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); @@ -478,6 +489,10 @@ int sysfs_emit(char *buf, const char *fmt, ...); __printf(3, 4) int sysfs_emit_at(char *buf, int at, const char *fmt, ...); +ssize_t sysfs_bin_attr_simple_read(struct file *file, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t off, size_t count); + #else /* CONFIG_SYSFS */ static inline int sysfs_create_dir_ns(struct kobject *kobj, const void *ns) -- cgit From 4cd47222e435dec8e3787614924174f53fcfb5ae Mon Sep 17 00:00:00 2001 From: George Stark Date: Thu, 11 Apr 2024 19:10:25 +0300 Subject: locking/mutex: Introduce devm_mutex_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using of devm API leads to a certain order of releasing resources. So all dependent resources which are not devm-wrapped should be deleted with respect to devm-release order. Mutex is one of such objects that often is bound to other resources and has no own devm wrapping. Since mutex_destroy() actually does nothing in non-debug builds frequently calling mutex_destroy() is just ignored which is safe for now but wrong formally and can lead to a problem if mutex_destroy() will be extended so introduce devm_mutex_init(). Suggested-by: Christophe Leroy Signed-off-by: George Stark Reviewed-by: Christophe Leroy Reviewed-by: Andy Shevchenko Reviewed-by: Marek Behún Acked-by: Waiman Long Link: https://lore.kernel.org/r/20240411161032.609544-2-gnstark@salutedevices.com Signed-off-by: Lee Jones --- include/linux/mutex.h | 27 +++++++++++++++++++++++++++ kernel/locking/mutex-debug.c | 12 ++++++++++++ 2 files changed, 39 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 67edc4ca2bee..a561c629d89f 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -22,6 +22,8 @@ #include #include +struct device; + #ifdef CONFIG_DEBUG_LOCK_ALLOC # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ , .dep_map = { \ @@ -117,6 +119,31 @@ do { \ } while (0) #endif /* CONFIG_PREEMPT_RT */ +#ifdef CONFIG_DEBUG_MUTEXES + +int __devm_mutex_init(struct device *dev, struct mutex *lock); + +#else + +static inline int __devm_mutex_init(struct device *dev, struct mutex *lock) +{ + /* + * When CONFIG_DEBUG_MUTEXES is off mutex_destroy() is just a nop so + * no really need to register it in the devm subsystem. + */ + return 0; +} + +#endif + +#define devm_mutex_init(dev, mutex) \ +({ \ + typeof(mutex) mutex_ = (mutex); \ + \ + mutex_init(mutex_); \ + __devm_mutex_init(dev, mutex_); \ +}) + /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index bc8abb8549d2..6e6f6071cfa2 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c @@ -12,6 +12,7 @@ */ #include #include +#include #include #include #include @@ -89,6 +90,17 @@ void debug_mutex_init(struct mutex *lock, const char *name, lock->magic = lock; } +static void devm_mutex_release(void *res) +{ + mutex_destroy(res); +} + +int __devm_mutex_init(struct device *dev, struct mutex *lock) +{ + return devm_add_action_or_reset(dev, devm_mutex_release, lock); +} +EXPORT_SYMBOL_GPL(__devm_mutex_init); + /*** * mutex_destroy - mark a mutex unusable * @lock: the mutex to be destroyed -- cgit From f3b65bbaed7c43d10989380d4b95e2a3e9fe5a6b Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 5 Apr 2024 07:58:12 -0400 Subject: KVM: delete .change_pte MMU notifier callback The .change_pte() MMU notifier callback was intended as an optimization. The original point of it was that KSM could tell KVM to flip its secondary PTE to a new location without having to first zap it. At the time there was also an .invalidate_page() callback; both of them were *not* bracketed by calls to mmu_notifier_invalidate_range_{start,end}(), and .invalidate_page() also doubled as a fallback implementation of .change_pte(). Later on, however, both callbacks were changed to occur within an invalidate_range_start/end() block. In the case of .change_pte(), commit 6bdb913f0a70 ("mm: wrap calls to set_pte_at_notify with invalidate_range_start and invalidate_range_end", 2012-10-09) did so to remove the fallback from .invalidate_page() to .change_pte() and allow sleepable .invalidate_page() hooks. This however made KVM's usage of the .change_pte() callback completely moot, because KVM unmaps the sPTEs during .invalidate_range_start() and therefore .change_pte() has no hope of finding a sPTE to change. Drop the generic KVM code that dispatches to kvm_set_spte_gfn(), as well as all the architecture specific implementations. Signed-off-by: Paolo Bonzini Acked-by: Anup Patel Acked-by: Michael Ellerman (powerpc) Reviewed-by: Bibo Mao Message-ID: <20240405115815.3226315-2-pbonzini@redhat.com> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/mmu.c | 34 ------------------ arch/loongarch/include/asm/kvm_host.h | 1 - arch/loongarch/kvm/mmu.c | 32 ----------------- arch/mips/kvm/mmu.c | 30 ---------------- arch/powerpc/include/asm/kvm_ppc.h | 1 - arch/powerpc/kvm/book3s.c | 5 --- arch/powerpc/kvm/book3s.h | 1 - arch/powerpc/kvm/book3s_64_mmu_hv.c | 12 ------- arch/powerpc/kvm/book3s_hv.c | 1 - arch/powerpc/kvm/book3s_pr.c | 7 ---- arch/powerpc/kvm/e500_mmu_host.c | 6 ---- arch/riscv/kvm/mmu.c | 20 ----------- arch/x86/kvm/mmu/mmu.c | 67 ++++------------------------------- arch/x86/kvm/mmu/spte.c | 16 --------- arch/x86/kvm/mmu/spte.h | 2 -- arch/x86/kvm/mmu/tdp_mmu.c | 46 ------------------------ arch/x86/kvm/mmu/tdp_mmu.h | 1 - include/linux/kvm_host.h | 2 -- include/trace/events/kvm.h | 15 -------- virt/kvm/kvm_main.c | 43 ---------------------- 20 files changed, 7 insertions(+), 335 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index dc04bc767865..ff17849be9f4 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1768,40 +1768,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - kvm_pfn_t pfn = pte_pfn(range->arg.pte); - - if (!kvm->arch.mmu.pgt) - return false; - - WARN_ON(range->end - range->start != 1); - - /* - * If the page isn't tagged, defer to user_mem_abort() for sanitising - * the MTE tags. The S2 pte should have been unmapped by - * mmu_notifier_invalidate_range_end(). - */ - if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn))) - return false; - - /* - * We've moved a page around, probably through CoW, so let's treat - * it just like a translation fault and the map handler will clean - * the cache to the PoC. - * - * The MMU notifiers will have unmapped a huge PMD before calling - * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and - * therefore we never need to clear out a huge PMD through this - * calling path and a memcache is not required. - */ - kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, - PAGE_SIZE, __pfn_to_phys(pfn), - KVM_PGTABLE_PROT_R, NULL, 0); - - return false; -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { u64 size = (range->end - range->start) << PAGE_SHIFT; diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h index 2d62f7b0d377..69305441f40d 100644 --- a/arch/loongarch/include/asm/kvm_host.h +++ b/arch/loongarch/include/asm/kvm_host.h @@ -203,7 +203,6 @@ void kvm_flush_tlb_all(void); void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c index a556cff35740..98883aa23ab8 100644 --- a/arch/loongarch/kvm/mmu.c +++ b/arch/loongarch/kvm/mmu.c @@ -494,38 +494,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) range->end << PAGE_SHIFT, &ctx); } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - unsigned long prot_bits; - kvm_pte_t *ptep; - kvm_pfn_t pfn = pte_pfn(range->arg.pte); - gpa_t gpa = range->start << PAGE_SHIFT; - - ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); - if (!ptep) - return false; - - /* Replacing an absent or old page doesn't need flushes */ - if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) { - kvm_set_pte(ptep, 0); - return false; - } - - /* Fill new pte if write protected or page migrated */ - prot_bits = _PAGE_PRESENT | __READABLE; - prot_bits |= _CACHE_MASK & pte_val(range->arg.pte); - - /* - * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support - * _PAGE_WRITE for map_page_fast if next page write fault - * _PAGE_DIRTY since gpa has already recorded as dirty page - */ - prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); - kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); - - return true; -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { kvm_ptw_ctx ctx; diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 467ee6b95ae1..c17157e700c0 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -444,36 +444,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return true; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - gpa_t gpa = range->start << PAGE_SHIFT; - pte_t hva_pte = range->arg.pte; - pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); - pte_t old_pte; - - if (!gpa_pte) - return false; - - /* Mapping may need adjusting depending on memslot flags */ - old_pte = *gpa_pte; - if (range->slot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) - hva_pte = pte_mkclean(hva_pte); - else if (range->slot->flags & KVM_MEM_READONLY) - hva_pte = pte_wrprotect(hva_pte); - - set_pte(gpa_pte, hva_pte); - - /* Replacing an absent or old page doesn't need flushes */ - if (!pte_present(old_pte) || !pte_young(old_pte)) - return false; - - /* Pages swapped, aged, moved, or cleaned require flushes */ - return !pte_present(hva_pte) || - !pte_young(hva_pte) || - pte_pfn(old_pte) != pte_pfn(hva_pte) || - (pte_dirty(old_pte) && !pte_dirty(hva_pte)); -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { return kvm_mips_mkold_gpa_pt(kvm, range->start, range->end); diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 3281215097cc..ca3829d47ab7 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -287,7 +287,6 @@ struct kvmppc_ops { bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); - bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range); void (*free_memslot)(struct kvm_memory_slot *slot); int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 8acec144120e..0d0624088e6b 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -899,11 +899,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return kvm->arch.kvm_ops->test_age_gfn(kvm, range); } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - return kvm->arch.kvm_ops->set_spte_gfn(kvm, range); -} - int kvmppc_core_init_vm(struct kvm *kvm) { diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 58391b4b32ed..4aa2ab89afbc 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -12,7 +12,6 @@ extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm, extern bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); -extern bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range); extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 2b1f0cdd8c18..1b51b1c4713b 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1010,18 +1010,6 @@ bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_test_age_rmapp(kvm, range->slot, range->start); } -bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) -{ - WARN_ON(range->start + 1 != range->end); - - if (kvm_is_radix(kvm)) - kvm_unmap_radix(kvm, range->slot, range->start); - else - kvm_unmap_rmapp(kvm, range->slot, range->start); - - return false; -} - static int vcpus_running(struct kvm *kvm) { return atomic_read(&kvm->arch.vcpus_running) != 0; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 8e86eb577eb8..35cb014a0c51 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -6364,7 +6364,6 @@ static struct kvmppc_ops kvm_ops_hv = { .unmap_gfn_range = kvm_unmap_gfn_range_hv, .age_gfn = kvm_age_gfn_hv, .test_age_gfn = kvm_test_age_gfn_hv, - .set_spte_gfn = kvm_set_spte_gfn_hv, .free_memslot = kvmppc_core_free_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 5b92619a05fd..a7d7137ea0c8 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -461,12 +461,6 @@ static bool kvm_test_age_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -static bool kvm_set_spte_gfn_pr(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* The page will get remapped properly on its next fault */ - return do_kvm_unmap_gfn(kvm, range); -} - /*****************************************/ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) @@ -2071,7 +2065,6 @@ static struct kvmppc_ops kvm_ops_pr = { .unmap_gfn_range = kvm_unmap_gfn_range_pr, .age_gfn = kvm_age_gfn_pr, .test_age_gfn = kvm_test_age_gfn_pr, - .set_spte_gfn = kvm_set_spte_gfn_pr, .free_memslot = kvmppc_core_free_memslot_pr, .init_vm = kvmppc_core_init_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr, diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index ccb8f16ffe41..c664fdec75b1 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -747,12 +747,6 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* The page will get remapped properly on its next fault */ - return kvm_e500_mmu_unmap_gfn(kvm, range); -} - /*****************************************/ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500) diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index a9e2fd7245e1..b63650f9b966 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -550,26 +550,6 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return false; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - int ret; - kvm_pfn_t pfn = pte_pfn(range->arg.pte); - - if (!kvm->arch.pgd) - return false; - - WARN_ON(range->end - range->start != 1); - - ret = gstage_map_page(kvm, NULL, range->start << PAGE_SHIFT, - __pfn_to_phys(pfn), PAGE_SIZE, true, true); - if (ret) { - kvm_debug("Failed to map G-stage page (error %d)\n", ret); - return true; - } - - return false; -} - bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) { pte_t *ptep; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 992e651540e8..08900a0563f9 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -432,8 +432,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) * The idea using the light way get the spte on x86_32 guest is from * gup_get_pte (mm/gup.c). * - * An spte tlb flush may be pending, because kvm_set_pte_rmap - * coalesces them and we are running out of the MMU lock. Therefore + * An spte tlb flush may be pending, because they are coalesced and + * we are running out of the MMU lock. Therefore * we need to protect against in-progress updates of the spte. * * Reading the spte while an update is in progress may get the old value @@ -1448,49 +1448,11 @@ static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, } static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - pte_t unused) + struct kvm_memory_slot *slot, gfn_t gfn, int level) { return __kvm_zap_rmap(kvm, rmap_head, slot); } -static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - pte_t pte) -{ - u64 *sptep; - struct rmap_iterator iter; - bool need_flush = false; - u64 new_spte; - kvm_pfn_t new_pfn; - - WARN_ON_ONCE(pte_huge(pte)); - new_pfn = pte_pfn(pte); - -restart: - for_each_rmap_spte(rmap_head, &iter, sptep) { - need_flush = true; - - if (pte_write(pte)) { - kvm_zap_one_rmap_spte(kvm, rmap_head, sptep); - goto restart; - } else { - new_spte = kvm_mmu_changed_pte_notifier_make_spte( - *sptep, new_pfn); - - mmu_spte_clear_track_bits(kvm, sptep); - mmu_spte_set(sptep, new_spte); - } - } - - if (need_flush && kvm_available_flush_remote_tlbs_range()) { - kvm_flush_remote_tlbs_gfn(kvm, gfn, level); - return false; - } - - return need_flush; -} - struct slot_rmap_walk_iterator { /* input fields. */ const struct kvm_memory_slot *slot; @@ -1562,7 +1524,7 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator) typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head, struct kvm_memory_slot *slot, gfn_t gfn, - int level, pte_t pte); + int level); static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, @@ -1574,7 +1536,7 @@ static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, for_each_slot_rmap_range(range->slot, PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL, range->start, range->end - 1, &iterator) ret |= handler(kvm, iterator.rmap, range->slot, iterator.gfn, - iterator.level, range->arg.pte); + iterator.level); return ret; } @@ -1596,22 +1558,8 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) return flush; } -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - bool flush = false; - - if (kvm_memslots_have_rmaps(kvm)) - flush = kvm_handle_gfn_range(kvm, range, kvm_set_pte_rmap); - - if (tdp_mmu_enabled) - flush |= kvm_tdp_mmu_set_spte_gfn(kvm, range); - - return flush; -} - static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, int level, - pte_t unused) + struct kvm_memory_slot *slot, gfn_t gfn, int level) { u64 *sptep; struct rmap_iterator iter; @@ -1624,8 +1572,7 @@ static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, } static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, - struct kvm_memory_slot *slot, gfn_t gfn, - int level, pte_t unused) + struct kvm_memory_slot *slot, gfn_t gfn, int level) { u64 *sptep; struct rmap_iterator iter; diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c index 4a599130e9c9..6c7ab3aa6aa7 100644 --- a/arch/x86/kvm/mmu/spte.c +++ b/arch/x86/kvm/mmu/spte.c @@ -322,22 +322,6 @@ u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) return spte; } -u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) -{ - u64 new_spte; - - new_spte = old_spte & ~SPTE_BASE_ADDR_MASK; - new_spte |= (u64)new_pfn << PAGE_SHIFT; - - new_spte &= ~PT_WRITABLE_MASK; - new_spte &= ~shadow_host_writable_mask; - new_spte &= ~shadow_mmu_writable_mask; - - new_spte = mark_spte_for_access_track(new_spte); - - return new_spte; -} - u64 mark_spte_for_access_track(u64 spte) { if (spte_ad_enabled(spte)) diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h index a129951c9a88..f5c600c52f83 100644 --- a/arch/x86/kvm/mmu/spte.h +++ b/arch/x86/kvm/mmu/spte.h @@ -496,8 +496,6 @@ static inline u64 restore_acc_track_spte(u64 spte) return spte; } -u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn); - void __init kvm_mmu_spte_module_init(void); void kvm_mmu_reset_all_pte_masks(void); diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index d078157e62aa..c6192a52bd31 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1258,52 +1258,6 @@ bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn); } -static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter, - struct kvm_gfn_range *range) -{ - u64 new_spte; - - /* Huge pages aren't expected to be modified without first being zapped. */ - WARN_ON_ONCE(pte_huge(range->arg.pte) || range->start + 1 != range->end); - - if (iter->level != PG_LEVEL_4K || - !is_shadow_present_pte(iter->old_spte)) - return false; - - /* - * Note, when changing a read-only SPTE, it's not strictly necessary to - * zero the SPTE before setting the new PFN, but doing so preserves the - * invariant that the PFN of a present * leaf SPTE can never change. - * See handle_changed_spte(). - */ - tdp_mmu_iter_set_spte(kvm, iter, 0); - - if (!pte_write(range->arg.pte)) { - new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte, - pte_pfn(range->arg.pte)); - - tdp_mmu_iter_set_spte(kvm, iter, new_spte); - } - - return true; -} - -/* - * Handle the changed_pte MMU notifier for the TDP MMU. - * data is a pointer to the new pte_t mapping the HVA specified by the MMU - * notifier. - * Returns non-zero if a flush is needed before releasing the MMU lock. - */ -bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* - * No need to handle the remote TLB flush under RCU protection, the - * target SPTE _must_ be a leaf SPTE, i.e. cannot result in freeing a - * shadow page. See the WARN on pfn_changed in handle_changed_spte(). - */ - return kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn); -} - /* * Remove write access from all SPTEs at or above min_level that map GFNs * [start, end). Returns true if an SPTE has been changed and the TLBs need to diff --git a/arch/x86/kvm/mmu/tdp_mmu.h b/arch/x86/kvm/mmu/tdp_mmu.h index 6e1ea04ca885..58b55e61bd33 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.h +++ b/arch/x86/kvm/mmu/tdp_mmu.h @@ -31,7 +31,6 @@ bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range, bool flush); bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, const struct kvm_memory_slot *slot, int min_level); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 48f31dcd318a..afbc99264ffa 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -259,7 +259,6 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); #ifdef CONFIG_KVM_GENERIC_MMU_NOTIFIER union kvm_mmu_notifier_arg { - pte_t pte; unsigned long attributes; }; @@ -273,7 +272,6 @@ struct kvm_gfn_range { bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); -bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); #endif enum { diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 011fba6b5552..74e40d5d4af4 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -456,21 +456,6 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->start, __entry->end) ); -TRACE_EVENT(kvm_set_spte_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) -); - TRACE_EVENT(kvm_age_hva, TP_PROTO(unsigned long start, unsigned long end), TP_ARGS(start, end), diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index fb49c2a60200..fb0f922d3109 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -705,48 +705,6 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn return __kvm_handle_hva_range(kvm, &range).ret; } -static bool kvm_change_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) -{ - /* - * Skipping invalid memslots is correct if and only change_pte() is - * surrounded by invalidate_range_{start,end}(), which is currently - * guaranteed by the primary MMU. If that ever changes, KVM needs to - * unmap the memslot instead of skipping the memslot to ensure that KVM - * doesn't hold references to the old PFN. - */ - WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); - - if (range->slot->flags & KVM_MEMSLOT_INVALID) - return false; - - return kvm_set_spte_gfn(kvm, range); -} - -static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long address, - pte_t pte) -{ - struct kvm *kvm = mmu_notifier_to_kvm(mn); - const union kvm_mmu_notifier_arg arg = { .pte = pte }; - - trace_kvm_set_spte_hva(address); - - /* - * .change_pte() must be surrounded by .invalidate_range_{start,end}(). - * If mmu_invalidate_in_progress is zero, then no in-progress - * invalidations, including this one, found a relevant memslot at - * start(); rechecking memslots here is unnecessary. Note, a false - * positive (count elevated by a different invalidation) is sub-optimal - * but functionally ok. - */ - WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); - if (!READ_ONCE(kvm->mmu_invalidate_in_progress)) - return; - - kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn); -} - void kvm_mmu_invalidate_begin(struct kvm *kvm) { lockdep_assert_held_write(&kvm->mmu_lock); @@ -964,7 +922,6 @@ static const struct mmu_notifier_ops kvm_mmu_notifier_ops = { .clear_flush_young = kvm_mmu_notifier_clear_flush_young, .clear_young = kvm_mmu_notifier_clear_young, .test_young = kvm_mmu_notifier_test_young, - .change_pte = kvm_mmu_notifier_change_pte, .release = kvm_mmu_notifier_release, }; -- cgit From 997308f9ae72783bf370849ad309c9e42b5d0aa8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 5 Apr 2024 07:58:14 -0400 Subject: mmu_notifier: remove the .change_pte() callback The scope of set_pte_at_notify() has reduced more and more through the years. Initially, it was meant for when the change to the PTE was not bracketed by mmu_notifier_invalidate_range_{start,end}(). However, that has not been so for over ten years. During all this period the only implementation of .change_pte() was KVM and it had no actual functionality, because it was called after mmu_notifier_invalidate_range_start() zapped the secondary PTE. Now that this (nonfunctional) user of the .change_pte() callback is gone, the whole callback can be removed. For now, leave in place set_pte_at_notify() even though it is just a synonym for set_pte_at(). Signed-off-by: Paolo Bonzini Reviewed-by: David Hildenbrand Message-ID: <20240405115815.3226315-4-pbonzini@redhat.com> Acked-by: Andrew Morton Signed-off-by: Paolo Bonzini --- include/linux/mmu_notifier.h | 46 ++------------------------------------------ mm/mmu_notifier.c | 17 ---------------- 2 files changed, 2 insertions(+), 61 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index f349e08a9dfe..8c72bf651606 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -122,15 +122,6 @@ struct mmu_notifier_ops { struct mm_struct *mm, unsigned long address); - /* - * change_pte is called in cases that pte mapping to page is changed: - * for example, when ksm remaps pte to point to a new shared page. - */ - void (*change_pte)(struct mmu_notifier *subscription, - struct mm_struct *mm, - unsigned long address, - pte_t pte); - /* * invalidate_range_start() and invalidate_range_end() must be * paired and are called only when the mmap_lock and/or the @@ -392,8 +383,6 @@ extern int __mmu_notifier_clear_young(struct mm_struct *mm, unsigned long end); extern int __mmu_notifier_test_young(struct mm_struct *mm, unsigned long address); -extern void __mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte); extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r); extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r); extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm, @@ -439,13 +428,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm, return 0; } -static inline void mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte) -{ - if (mm_has_notifiers(mm)) - __mmu_notifier_change_pte(mm, address, pte); -} - static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { @@ -581,26 +563,6 @@ static inline void mmu_notifier_range_init_owner( __young; \ }) -/* - * set_pte_at_notify() sets the pte _after_ running the notifier. - * This is safe to start by updating the secondary MMUs, because the primary MMU - * pte invalidate must have already happened with a ptep_clear_flush() before - * set_pte_at_notify() has been invoked. Updating the secondary MMUs first is - * required when we change both the protection of the mapping from read-only to - * read-write and the pfn (like during copy on write page faults). Otherwise the - * old page would remain mapped readonly in the secondary MMUs after the new - * page is already writable by some CPU through the primary MMU. - */ -#define set_pte_at_notify(__mm, __address, __ptep, __pte) \ -({ \ - struct mm_struct *___mm = __mm; \ - unsigned long ___address = __address; \ - pte_t ___pte = __pte; \ - \ - mmu_notifier_change_pte(___mm, ___address, ___pte); \ - set_pte_at(___mm, ___address, __ptep, ___pte); \ -}) - #else /* CONFIG_MMU_NOTIFIER */ struct mmu_notifier_range { @@ -650,11 +612,6 @@ static inline int mmu_notifier_test_young(struct mm_struct *mm, return 0; } -static inline void mmu_notifier_change_pte(struct mm_struct *mm, - unsigned long address, pte_t pte) -{ -} - static inline void mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range) { @@ -693,7 +650,6 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm) #define ptep_clear_flush_notify ptep_clear_flush #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush #define pudp_huge_clear_flush_notify pudp_huge_clear_flush -#define set_pte_at_notify set_pte_at static inline void mmu_notifier_synchronize(void) { @@ -701,4 +657,6 @@ static inline void mmu_notifier_synchronize(void) #endif /* CONFIG_MMU_NOTIFIER */ +#define set_pte_at_notify set_pte_at + #endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index ec3b068cbbe6..8982e6139d07 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -424,23 +424,6 @@ int __mmu_notifier_test_young(struct mm_struct *mm, return young; } -void __mmu_notifier_change_pte(struct mm_struct *mm, unsigned long address, - pte_t pte) -{ - struct mmu_notifier *subscription; - int id; - - id = srcu_read_lock(&srcu); - hlist_for_each_entry_rcu(subscription, - &mm->notifier_subscriptions->list, hlist, - srcu_read_lock_held(&srcu)) { - if (subscription->ops->change_pte) - subscription->ops->change_pte(subscription, mm, address, - pte); - } - srcu_read_unlock(&srcu, id); -} - static int mn_itree_invalidate(struct mmu_notifier_subscriptions *subscriptions, const struct mmu_notifier_range *range) { -- cgit From 293066264fb45df4290897c22e69833bea5fe171 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Apr 2024 16:37:26 +0200 Subject: scsi: block: Add a helper to cancel atomic queue limit updates Drivers might have to perform complex actions to determine queue limits, and those might fail. Add a helper to cancel a queue limit update that can be called in those cases. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240409143748.980206-2-hch@lst.de Reviewed-by: Kanchan Joshi Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: John Garry Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- include/linux/blkdev.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c3e8f7cf96be..ded7f66dc4b9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -892,6 +892,19 @@ int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim); int queue_limits_set(struct request_queue *q, struct queue_limits *lim); +/** + * queue_limits_cancel_update - cancel an atomic update of queue limits + * @q: queue to update + * + * This functions cancels an atomic update of the queue limits started by + * queue_limits_start_update() and should be used when an error occurs after + * starting update. + */ +static inline void queue_limits_cancel_update(struct request_queue *q) +{ + mutex_unlock(&q->limits_lock); +} + /* * Access functions for manipulating queue properties */ -- cgit From 4373d2ecca7fa7ad04aa9c371c80049bafec2610 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Apr 2024 16:37:27 +0200 Subject: scsi: bsg: Pass queue_limits to bsg_setup_queue() This allows bsg_setup_queue() to pass them to blk_mq_alloc_queue() and thus set up the limits at queue allocation time. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240409143748.980206-3-hch@lst.de Reviewed-by: Bart Van Assche Reviewed-by: John Garry Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- block/bsg-lib.c | 6 ++++-- drivers/scsi/mpi3mr/mpi3mr_app.c | 2 +- drivers/scsi/scsi_transport_fc.c | 6 +++--- drivers/scsi/scsi_transport_iscsi.c | 3 ++- drivers/scsi/scsi_transport_sas.c | 4 ++-- drivers/ufs/core/ufs_bsg.c | 3 ++- include/linux/bsg-lib.h | 3 ++- 7 files changed, 16 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/block/bsg-lib.c b/block/bsg-lib.c index bcc7dee6abce..ee738d129a9f 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -354,12 +354,14 @@ static const struct blk_mq_ops bsg_mq_ops = { * bsg_setup_queue - Create and add the bsg hooks so we can receive requests * @dev: device to attach bsg device to * @name: device to give bsg device + * @lim: queue limits for the bsg queue * @job_fn: bsg job handler * @timeout: timeout handler function pointer * @dd_job_size: size of LLD data needed for each job */ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, - bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size) + struct queue_limits *lim, bsg_job_fn *job_fn, + bsg_timeout_fn *timeout, int dd_job_size) { struct bsg_set *bset; struct blk_mq_tag_set *set; @@ -383,7 +385,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, if (blk_mq_alloc_tag_set(set)) goto out_tag_set; - q = blk_mq_alloc_queue(set, NULL, NULL); + q = blk_mq_alloc_queue(set, lim, NULL); if (IS_ERR(q)) { ret = PTR_ERR(q); goto out_queue; diff --git a/drivers/scsi/mpi3mr/mpi3mr_app.c b/drivers/scsi/mpi3mr/mpi3mr_app.c index 38f63bc7ef3b..a45406fae262 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_app.c +++ b/drivers/scsi/mpi3mr/mpi3mr_app.c @@ -1860,7 +1860,7 @@ void mpi3mr_bsg_init(struct mpi3mr_ioc *mrioc) return; } - mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), + mrioc->bsg_queue = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, mpi3mr_bsg_request, NULL, 0); if (IS_ERR(mrioc->bsg_queue)) { ioc_err(mrioc, "%s: bsg registration failed\n", diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index b04075f19445..87b2235b8ece 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -4287,8 +4287,8 @@ fc_bsg_hostadd(struct Scsi_Host *shost, struct fc_host_attrs *fc_host) snprintf(bsg_name, sizeof(bsg_name), "fc_host%d", shost->host_no); - q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout, - i->f->dd_bsg_size); + q = bsg_setup_queue(dev, bsg_name, NULL, fc_bsg_dispatch, + fc_bsg_job_timeout, i->f->dd_bsg_size); if (IS_ERR(q)) { dev_err(dev, "fc_host%d: bsg interface failed to initialize - setup queue\n", @@ -4318,7 +4318,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) if (!i->f->bsg_request) return -ENOTSUPP; - q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep, + q = bsg_setup_queue(dev, dev_name(dev), NULL, fc_bsg_dispatch_prep, fc_bsg_job_timeout, i->f->dd_bsg_size); if (IS_ERR(q)) { dev_err(dev, "failed to setup bsg queue\n"); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index af3ac6346796..c131746bf207 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1542,7 +1542,8 @@ iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) return -ENOTSUPP; snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); - q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0); + q = bsg_setup_queue(dev, bsg_name, NULL, iscsi_bsg_host_dispatch, NULL, + 0); if (IS_ERR(q)) { shost_printk(KERN_ERR, shost, "bsg interface failed to " "initialize - no request queue\n"); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index d704c484a251..424a89513814 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -197,7 +197,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) } if (rphy) { - q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), + q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), NULL, sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); @@ -206,7 +206,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) char name[20]; snprintf(name, sizeof(name), "sas_host%d", shost->host_no); - q = bsg_setup_queue(&shost->shost_gendev, name, + q = bsg_setup_queue(&shost->shost_gendev, name, NULL, sas_smp_dispatch, NULL, 0); if (IS_ERR(q)) return PTR_ERR(q); diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c index 374e5aae4e7e..433d0480391e 100644 --- a/drivers/ufs/core/ufs_bsg.c +++ b/drivers/ufs/core/ufs_bsg.c @@ -253,7 +253,8 @@ int ufs_bsg_probe(struct ufs_hba *hba) if (ret) goto out; - q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0); + q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), NULL, ufs_bsg_request, + NULL, 0); if (IS_ERR(q)) { ret = PTR_ERR(q); goto out; diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 9e97ced2896d..14fa93268630 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -65,7 +65,8 @@ struct bsg_job { void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len); struct request_queue *bsg_setup_queue(struct device *dev, const char *name, - bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size); + struct queue_limits *lim, bsg_job_fn *job_fn, + bsg_timeout_fn *timeout, int dd_job_size); void bsg_remove_queue(struct request_queue *q); void bsg_job_put(struct bsg_job *job); int __must_check bsg_job_get(struct bsg_job *job); -- cgit From a25a9c85d17fd2f19bd5a2bb25b8361d72336bc7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Apr 2024 16:37:45 +0200 Subject: scsi: libata: Switch to using ->device_configure Switch to the ->device_configure method instead of ->slave_configure and update the block limits on the passed in queue_limits instead of using the per-limit accessors. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240409143748.980206-21-hch@lst.de Reviewed-by: John Garry Reviewed-by: Hannes Reinecke Acked-by: Damien Le Moal Signed-off-by: Martin K. Petersen --- drivers/ata/ahci.h | 2 +- drivers/ata/libata-sata.c | 11 +++++++---- drivers/ata/libata-scsi.c | 19 +++++++++++-------- drivers/ata/libata.h | 3 ++- drivers/ata/pata_macio.c | 2 +- drivers/ata/sata_mv.c | 2 +- drivers/ata/sata_nv.c | 4 ++-- drivers/ata/sata_sil24.c | 2 +- drivers/scsi/hisi_sas/hisi_sas.h | 3 ++- drivers/scsi/hisi_sas/hisi_sas_main.c | 7 ++++--- drivers/scsi/hisi_sas/hisi_sas_v1_hw.c | 2 +- drivers/scsi/hisi_sas/hisi_sas_v2_hw.c | 2 +- drivers/scsi/hisi_sas/hisi_sas_v3_hw.c | 7 ++++--- drivers/scsi/libsas/sas_scsi_host.c | 7 ++++--- include/linux/libata.h | 10 ++++++---- include/scsi/libsas.h | 5 +++-- 16 files changed, 51 insertions(+), 37 deletions(-) (limited to 'include/linux') diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 344c87210d8f..8f40f75ba08c 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -397,7 +397,7 @@ extern const struct attribute_group *ahci_sdev_groups[]; .sdev_groups = ahci_sdev_groups, \ .change_queue_depth = ata_scsi_change_queue_depth, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure extern struct ata_port_operations ahci_ops; extern struct ata_port_operations ahci_platform_ops; diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c index a8d773003d74..9e047bf912b1 100644 --- a/drivers/ata/libata-sata.c +++ b/drivers/ata/libata-sata.c @@ -1254,21 +1254,24 @@ void ata_sas_tport_delete(struct ata_port *ap) EXPORT_SYMBOL_GPL(ata_sas_tport_delete); /** - * ata_sas_slave_configure - Default slave_config routine for libata devices + * ata_sas_device_configure - Default device_configure routine for libata + * devices * @sdev: SCSI device to configure + * @lim: queue limits * @ap: ATA port to which SCSI device is attached * * RETURNS: * Zero. */ -int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) +int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_port *ap) { ata_scsi_sdev_config(sdev); - return ata_scsi_dev_config(sdev, ap->link.device); + return ata_scsi_dev_config(sdev, lim, ap->link.device); } -EXPORT_SYMBOL_GPL(ata_sas_slave_configure); +EXPORT_SYMBOL_GPL(ata_sas_device_configure); /** * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 0a0f483124c3..db441815d633 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1021,7 +1021,8 @@ bool ata_scsi_dma_need_drain(struct request *rq) } EXPORT_SYMBOL_GPL(ata_scsi_dma_need_drain); -int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) +int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_device *dev) { struct request_queue *q = sdev->request_queue; int depth = 1; @@ -1031,7 +1032,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) /* configure max sectors */ dev->max_sectors = min(dev->max_sectors, sdev->host->max_sectors); - blk_queue_max_hw_sectors(q, dev->max_sectors); + lim->max_hw_sectors = dev->max_sectors; if (dev->class == ATA_DEV_ATAPI) { sdev->sector_size = ATA_SECT_SIZE; @@ -1040,7 +1041,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); /* make room for appending the drain */ - blk_queue_max_segments(q, queue_max_segments(q) - 1); + lim->max_segments--; sdev->dma_drain_len = ATAPI_MAX_DRAIN; sdev->dma_drain_buf = kmalloc(sdev->dma_drain_len, GFP_NOIO); @@ -1077,7 +1078,7 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev) "sector_size=%u > PAGE_SIZE, PIO may malfunction\n", sdev->sector_size); - blk_queue_update_dma_alignment(q, sdev->sector_size - 1); + lim->dma_alignment = sdev->sector_size - 1; if (dev->flags & ATA_DFLAG_AN) set_bit(SDEV_EVT_MEDIA_CHANGE, sdev->supported_events); @@ -1131,8 +1132,9 @@ int ata_scsi_slave_alloc(struct scsi_device *sdev) EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc); /** - * ata_scsi_slave_config - Set SCSI device attributes + * ata_scsi_device_configure - Set SCSI device attributes * @sdev: SCSI device to examine + * @lim: queue limits * * This is called before we actually start reading * and writing to the device, to configure certain @@ -1142,17 +1144,18 @@ EXPORT_SYMBOL_GPL(ata_scsi_slave_alloc); * Defined by SCSI layer. We don't really care. */ -int ata_scsi_slave_config(struct scsi_device *sdev) +int ata_scsi_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); if (dev) - return ata_scsi_dev_config(sdev, dev); + return ata_scsi_dev_config(sdev, lim, dev); return 0; } -EXPORT_SYMBOL_GPL(ata_scsi_slave_config); +EXPORT_SYMBOL_GPL(ata_scsi_device_configure); /** * ata_scsi_slave_destroy - SCSI device is about to be destroyed diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 5c685bb1939e..8aab0df549ea 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -131,7 +131,8 @@ extern void ata_scsi_dev_rescan(struct work_struct *work); extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, unsigned int id, u64 lun); void ata_scsi_sdev_config(struct scsi_device *sdev); -int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev); +int ata_scsi_dev_config(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_device *dev); int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev); /* libata-eh.c */ diff --git a/drivers/ata/pata_macio.c b/drivers/ata/pata_macio.c index f046bfa5c6e7..3dbcd737ebb7 100644 --- a/drivers/ata/pata_macio.c +++ b/drivers/ata/pata_macio.c @@ -806,7 +806,7 @@ static int pata_macio_device_configure(struct scsi_device *sdev, int rc; /* First call original */ - rc = ata_scsi_slave_config(sdev); + rc = ata_scsi_device_configure(sdev, lim); if (rc) return rc; diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index e82786c63fbd..0bb900869ee0 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c @@ -673,7 +673,7 @@ static const struct scsi_host_template mv6_sht = { .sdev_groups = ata_ncq_sdev_groups, .change_queue_depth = ata_scsi_change_queue_depth, .tag_alloc_policy = BLK_TAG_ALLOC_RR, - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure }; static struct ata_port_operations mv5_ops = { diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index d0a8eb0e8011..36d99043ef50 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c @@ -676,7 +676,7 @@ static int nv_adma_device_configure(struct scsi_device *sdev, int adma_enable; u32 current_reg, new_reg, config_mask; - rc = ata_scsi_slave_config(sdev); + rc = ata_scsi_device_configure(sdev, lim); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ @@ -1882,7 +1882,7 @@ static int nv_swncq_device_configure(struct scsi_device *sdev, u8 check_maxtor = 0; unsigned char model_num[ATA_ID_PROD_LEN + 1]; - rc = ata_scsi_slave_config(sdev); + rc = ata_scsi_device_configure(sdev, lim); if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun) /* Not a proper libata device, ignore */ return rc; diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 142e70bfc498..72c03cbdaff4 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -381,7 +381,7 @@ static const struct scsi_host_template sil24_sht = { .tag_alloc_policy = BLK_TAG_ALLOC_FIFO, .sdev_groups = ata_ncq_sdev_groups, .change_queue_depth = ata_scsi_change_queue_depth, - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure }; static struct ata_port_operations sil24_ops = { diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 1e4550156b73..d223f482488f 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -643,7 +643,8 @@ extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); extern void hisi_sas_remove(struct platform_device *pdev); -extern int hisi_sas_slave_configure(struct scsi_device *sdev); +int hisi_sas_device_configure(struct scsi_device *sdev, + struct queue_limits *lim); extern int hisi_sas_slave_alloc(struct scsi_device *sdev); extern int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time); extern void hisi_sas_scan_start(struct Scsi_Host *shost); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 097dfe4b620d..11228ba51ded 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -868,10 +868,11 @@ err_out: return rc; } -int hisi_sas_slave_configure(struct scsi_device *sdev) +int hisi_sas_device_configure(struct scsi_device *sdev, + struct queue_limits *lim) { struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); + int ret = sas_device_configure(sdev, lim); if (ret) return ret; @@ -880,7 +881,7 @@ int hisi_sas_slave_configure(struct scsi_device *sdev) return 0; } -EXPORT_SYMBOL_GPL(hisi_sas_slave_configure); +EXPORT_SYMBOL_GPL(hisi_sas_device_configure); void hisi_sas_scan_start(struct Scsi_Host *shost) { diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 161feae3acab..71b5008c3552 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -1736,7 +1736,7 @@ ATTRIBUTE_GROUPS(host_v1_hw); static const struct scsi_host_template sht_v1_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT - .slave_configure = hisi_sas_slave_configure, + .device_configure = hisi_sas_device_configure, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index d89e97e8f5c2..342d75f12051 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -3568,7 +3568,7 @@ static void map_queues_v2_hw(struct Scsi_Host *shost) static const struct scsi_host_template sht_v2_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT - .slave_configure = hisi_sas_slave_configure, + .device_configure = hisi_sas_device_configure, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, .sg_tablesize = HISI_SAS_SGE_PAGE_CNT, diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 756660588a1e..a38165995e86 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -2894,11 +2894,12 @@ static ssize_t iopoll_q_cnt_v3_hw_show(struct device *dev, } static DEVICE_ATTR_RO(iopoll_q_cnt_v3_hw); -static int slave_configure_v3_hw(struct scsi_device *sdev) +static int device_configure_v3_hw(struct scsi_device *sdev, + struct queue_limits *lim) { struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev); struct hisi_hba *hisi_hba = shost_priv(shost); - int ret = hisi_sas_slave_configure(sdev); + int ret = hisi_sas_device_configure(sdev, lim); struct device *dev = hisi_hba->dev; if (ret) @@ -3321,7 +3322,7 @@ static void hisi_sas_map_queues(struct Scsi_Host *shost) static const struct scsi_host_template sht_v3_hw = { LIBSAS_SHT_BASE_NO_SLAVE_INIT - .slave_configure = slave_configure_v3_hw, + .device_configure = device_configure_v3_hw, .scan_finished = hisi_sas_scan_finished, .scan_start = hisi_sas_scan_start, .map_queues = hisi_sas_map_queues, diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 9047cfcd1072..da11d32840e2 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -804,14 +804,15 @@ EXPORT_SYMBOL_GPL(sas_target_alloc); #define SAS_DEF_QD 256 -int sas_slave_configure(struct scsi_device *scsi_dev) +int sas_device_configure(struct scsi_device *scsi_dev, + struct queue_limits *lim) { struct domain_device *dev = sdev_to_domain_dev(scsi_dev); BUG_ON(dev->rphy->identify.device_type != SAS_END_DEVICE); if (dev_is_sata(dev)) { - ata_sas_slave_configure(scsi_dev, dev->sata_dev.ap); + ata_sas_device_configure(scsi_dev, lim, dev->sata_dev.ap); return 0; } @@ -829,7 +830,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev) return 0; } -EXPORT_SYMBOL_GPL(sas_slave_configure); +EXPORT_SYMBOL_GPL(sas_device_configure); int sas_change_queue_depth(struct scsi_device *sdev, int depth) { diff --git a/include/linux/libata.h b/include/linux/libata.h index 6dd9a4f9ca7c..f9c892f8940d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -1151,7 +1151,8 @@ extern int ata_std_bios_param(struct scsi_device *sdev, sector_t capacity, int geom[]); extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev); extern int ata_scsi_slave_alloc(struct scsi_device *sdev); -extern int ata_scsi_slave_config(struct scsi_device *sdev); +int ata_scsi_device_configure(struct scsi_device *sdev, + struct queue_limits *lim); extern void ata_scsi_slave_destroy(struct scsi_device *sdev); extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth); @@ -1249,7 +1250,8 @@ extern struct ata_port *ata_sas_port_alloc(struct ata_host *, extern void ata_port_probe(struct ata_port *ap); extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap); extern void ata_sas_tport_delete(struct ata_port *ap); -extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *); +int ata_sas_device_configure(struct scsi_device *sdev, struct queue_limits *lim, + struct ata_port *ap); extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap); extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis); @@ -1415,13 +1417,13 @@ extern const struct attribute_group *ata_common_sdev_groups[]; __ATA_BASE_SHT(drv_name), \ .can_queue = ATA_DEF_QUEUE, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure #define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \ __ATA_BASE_SHT(drv_name), \ .can_queue = drv_qd, \ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \ - .slave_configure = ata_scsi_slave_config + .device_configure = ata_scsi_device_configure #define ATA_BASE_SHT(drv_name) \ ATA_SUBBASE_SHT(drv_name), \ diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index d06a0570f4c5..1324068dd950 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -683,7 +683,8 @@ int sas_phy_reset(struct sas_phy *phy, int hard_reset); int sas_phy_enable(struct sas_phy *phy, int enable); extern int sas_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); extern int sas_target_alloc(struct scsi_target *); -extern int sas_slave_configure(struct scsi_device *); +int sas_device_configure(struct scsi_device *dev, + struct queue_limits *lim); extern int sas_change_queue_depth(struct scsi_device *, int new_depth); extern int sas_bios_param(struct scsi_device *, struct block_device *, sector_t capacity, int *hsc); @@ -749,7 +750,7 @@ void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event, #endif #define LIBSAS_SHT_BASE _LIBSAS_SHT_BASE \ - .slave_configure = sas_slave_configure, \ + .device_configure = sas_device_configure, \ .slave_alloc = sas_slave_alloc, \ #define LIBSAS_SHT_BASE_NO_SLAVE_INIT _LIBSAS_SHT_BASE -- cgit From f6d827b180bda01f8805bf5e85307419b0d6f890 Mon Sep 17 00:00:00 2001 From: Mina Almasry Date: Wed, 10 Apr 2024 12:05:01 -0700 Subject: net: move skb ref helpers to new header Add a new header, linux/skbuff_ref.h, which contains all the skb_*_ref() helpers. Many of the consumers of skbuff.h do not actually use any of the skb ref helpers, and we can speed up compilation a bit by minimizing this header file. Additionally in the later patch in the series we add page_pool support to skb_frag_ref(), which requires some page_pool dependencies. We can now add these dependencies to skbuff_ref.h instead of a very ubiquitous skbuff.h Signed-off-by: Mina Almasry Link: https://lore.kernel.org/r/20240410190505.1225848-2-almasrymina@google.com Signed-off-by: Jakub Kicinski --- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 1 + drivers/net/ethernet/marvell/sky2.c | 1 + drivers/net/ethernet/mellanox/mlx4/en_rx.c | 1 + drivers/net/ethernet/sun/cassini.c | 1 + drivers/net/veth.c | 1 + drivers/net/xen-netback/netback.c | 1 + include/linux/skbuff.h | 63 ------------------ include/linux/skbuff_ref.h | 75 ++++++++++++++++++++++ net/core/gro.c | 1 + net/core/skbuff.c | 1 + net/ipv4/esp4.c | 1 + net/ipv4/tcp_output.c | 1 + net/ipv6/esp6.c | 1 + net/tls/tls_device.c | 1 + net/tls/tls_device_fallback.c | 1 + net/tls/tls_strp.c | 1 + 16 files changed, 89 insertions(+), 63 deletions(-) create mode 100644 include/linux/skbuff_ref.h (limited to 'include/linux') diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 6482728794dd..e8e460a92e0e 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "chcr_ktls.h" static LIST_HEAD(uld_ctx_list); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 07720841a8d7..f3f7f4cc27b3 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -34,6 +34,7 @@ #include #include #include +#include #include diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index eac49657bd07..8328df8645d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #if IS_ENABLED(CONFIG_IPV6) diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index bfb903506367..8f1f43dbb76d 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/net/veth.c b/drivers/net/veth.c index bcdfbf61eb66..426e68a95067 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #define DRV_NAME "veth" diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index ef76850d9bcd..48254fc07d64 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -38,6 +38,7 @@ #include #include #include +#include #include diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 7135a3e94afd..4072a7ee3859 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3492,73 +3492,10 @@ static inline struct page *skb_frag_page(const skb_frag_t *frag) return netmem_to_page(frag->netmem); } -/** - * __skb_frag_ref - take an addition reference on a paged fragment. - * @frag: the paged fragment - * - * Takes an additional reference on the paged fragment @frag. - */ -static inline void __skb_frag_ref(skb_frag_t *frag) -{ - get_page(skb_frag_page(frag)); -} - -/** - * skb_frag_ref - take an addition reference on a paged fragment of an skb. - * @skb: the buffer - * @f: the fragment offset. - * - * Takes an additional reference on the @f'th paged fragment of @skb. - */ -static inline void skb_frag_ref(struct sk_buff *skb, int f) -{ - __skb_frag_ref(&skb_shinfo(skb)->frags[f]); -} - int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom); int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, struct bpf_prog *prog); -bool napi_pp_put_page(struct page *page); - -static inline void -skb_page_unref(struct page *page, bool recycle) -{ -#ifdef CONFIG_PAGE_POOL - if (recycle && napi_pp_put_page(page)) - return; -#endif - put_page(page); -} - -/** - * __skb_frag_unref - release a reference on a paged fragment. - * @frag: the paged fragment - * @recycle: recycle the page if allocated via page_pool - * - * Releases a reference on the paged fragment @frag - * or recycles the page via the page_pool API. - */ -static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) -{ - skb_page_unref(skb_frag_page(frag), recycle); -} - -/** - * skb_frag_unref - release a reference on a paged fragment of an skb. - * @skb: the buffer - * @f: the fragment offset - * - * Releases a reference on the @f'th paged fragment of @skb. - */ -static inline void skb_frag_unref(struct sk_buff *skb, int f) -{ - struct skb_shared_info *shinfo = skb_shinfo(skb); - - if (!skb_zcopy_managed(skb)) - __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); -} - /** * skb_frag_address - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h new file mode 100644 index 000000000000..11f0a4063403 --- /dev/null +++ b/include/linux/skbuff_ref.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Skb ref helpers. + * + */ + +#ifndef _LINUX_SKBUFF_REF_H +#define _LINUX_SKBUFF_REF_H + +#include + +/** + * __skb_frag_ref - take an addition reference on a paged fragment. + * @frag: the paged fragment + * + * Takes an additional reference on the paged fragment @frag. + */ +static inline void __skb_frag_ref(skb_frag_t *frag) +{ + get_page(skb_frag_page(frag)); +} + +/** + * skb_frag_ref - take an addition reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset. + * + * Takes an additional reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_ref(struct sk_buff *skb, int f) +{ + __skb_frag_ref(&skb_shinfo(skb)->frags[f]); +} + +bool napi_pp_put_page(struct page *page); + +static inline void +skb_page_unref(struct page *page, bool recycle) +{ +#ifdef CONFIG_PAGE_POOL + if (recycle && napi_pp_put_page(page)) + return; +#endif + put_page(page); +} + +/** + * __skb_frag_unref - release a reference on a paged fragment. + * @frag: the paged fragment + * @recycle: recycle the page if allocated via page_pool + * + * Releases a reference on the paged fragment @frag + * or recycles the page via the page_pool API. + */ +static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) +{ + skb_page_unref(skb_frag_page(frag), recycle); +} + +/** + * skb_frag_unref - release a reference on a paged fragment of an skb. + * @skb: the buffer + * @f: the fragment offset + * + * Releases a reference on the @f'th paged fragment of @skb. + */ +static inline void skb_frag_unref(struct sk_buff *skb, int f) +{ + struct skb_shared_info *shinfo = skb_shinfo(skb); + + if (!skb_zcopy_managed(skb)) + __skb_frag_unref(&shinfo->frags[f], skb->pp_recycle); +} + +#endif /* _LINUX_SKBUFF_REF_H */ diff --git a/net/core/gro.c b/net/core/gro.c index 83f35d99a682..2459ab697f7f 100644 --- a/net/core/gro.c +++ b/net/core/gro.c @@ -3,6 +3,7 @@ #include #include #include +#include #define MAX_GRO_SKBS 8 diff --git a/net/core/skbuff.c b/net/core/skbuff.c index ab970ded8a7b..2554a6f5f386 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -51,6 +51,7 @@ #endif #include #include +#include #include #include #include diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 40330253f076..dff04580318f 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -20,6 +20,7 @@ #include #include #include +#include #include diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9282fafc0e61..61119d42b0fd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -44,6 +44,7 @@ #include #include #include +#include #include diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index fb431d0a3475..6bc0a84c8d05 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -36,6 +36,7 @@ #include #include #include +#include #include diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index bf8ed36b1ad6..ab6e694f7bc2 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -37,6 +37,7 @@ #include #include #include +#include #include "tls.h" #include "trace.h" diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index 4e7228f275fa..f9e3d3d90dcf 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -33,6 +33,7 @@ #include #include #include +#include #include "tls.h" diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c index ca1e0e198ceb..58c4b06f4f0c 100644 --- a/net/tls/tls_strp.c +++ b/net/tls/tls_strp.c @@ -2,6 +2,7 @@ /* Copyright (c) 2016 Tom Herbert */ #include +#include #include #include #include -- cgit From a580ea994fd37f4105028f5a85c38ff6508a2b25 Mon Sep 17 00:00:00 2001 From: Mina Almasry Date: Wed, 10 Apr 2024 12:05:02 -0700 Subject: net: mirror skb frag ref/unref helpers Refactor some of the skb frag ref/unref helpers for improved clarity. Implement napi_pp_get_page() to be the mirror counterpart of napi_pp_put_page(). Implement skb_page_ref() to be the mirror of skb_page_unref(). Improve __skb_frag_ref() to become a mirror counterpart of __skb_frag_unref(). Previously unref could handle pp & non-pp pages, while the ref could only handle non-pp pages. Now both the ref & unref helpers can correctly handle both pp & non-pp pages. Now that __skb_frag_ref() can handle both pp & non-pp pages, remove skb_pp_frag_ref(), and use __skb_frag_ref() instead. This lets us remove pp specific handling from skb_try_coalesce. Additionally, since __skb_frag_ref() can now handle both pp & non-pp pages, a latent issue in skb_shift() should now be fixed. Previously this function would do a non-pp ref & pp unref on potential pp frags (fragfrom). After this patch, skb_shift() should correctly do a pp ref/unref on pp frags. Signed-off-by: Mina Almasry Reviewed-by: Dragos Tatulea Reviewed-by: Jacob Keller Link: https://lore.kernel.org/r/20240410190505.1225848-3-almasrymina@google.com Signed-off-by: Jakub Kicinski --- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 2 +- drivers/net/ethernet/sun/cassini.c | 4 +- drivers/net/veth.c | 2 +- include/linux/skbuff_ref.h | 39 ++++++++++++++++-- net/core/skbuff.c | 46 ++-------------------- net/tls/tls_device_fallback.c | 2 +- 6 files changed, 44 insertions(+), 51 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index e8e460a92e0e..3832c2e8ea5a 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -1659,7 +1659,7 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb, for (i = 0; i < record->num_frags; i++) { skb_shinfo(nskb)->frags[i] = record->frags[i]; /* increase the frag ref count */ - __skb_frag_ref(&skb_shinfo(nskb)->frags[i]); + __skb_frag_ref(&skb_shinfo(nskb)->frags[i], nskb->pp_recycle); } skb_shinfo(nskb)->nr_frags = record->num_frags; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 8f1f43dbb76d..f058e154a3bc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -2000,7 +2000,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, skb->len += hlen - swivel; skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel); - __skb_frag_ref(frag); + __skb_frag_ref(frag, skb->pp_recycle); /* any more data? */ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { @@ -2024,7 +2024,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, frag++; skb_frag_fill_page_desc(frag, page->buffer, 0, hlen); - __skb_frag_ref(frag); + __skb_frag_ref(frag, skb->pp_recycle); RX_USED_ADD(page, hlen + cp->crc_size); } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 426e68a95067..0b0293629329 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -717,7 +717,7 @@ static void veth_xdp_get(struct xdp_buff *xdp) return; for (i = 0; i < sinfo->nr_frags; i++) - __skb_frag_ref(&sinfo->frags[i]); + __skb_frag_ref(&sinfo->frags[i], false); } static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h index 11f0a4063403..4dcdbe9fbc5f 100644 --- a/include/linux/skbuff_ref.h +++ b/include/linux/skbuff_ref.h @@ -8,16 +8,47 @@ #define _LINUX_SKBUFF_REF_H #include +#include + +#ifdef CONFIG_PAGE_POOL +static inline bool is_pp_page(struct page *page) +{ + return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; +} + +static inline bool napi_pp_get_page(struct page *page) +{ + page = compound_head(page); + + if (!is_pp_page(page)) + return false; + + page_pool_ref_page(page); + return true; +} +#endif + +static inline void skb_page_ref(struct page *page, bool recycle) +{ +#ifdef CONFIG_PAGE_POOL + if (recycle && napi_pp_get_page(page)) + return; +#endif + get_page(page); +} /** * __skb_frag_ref - take an addition reference on a paged fragment. * @frag: the paged fragment + * @recycle: skb->pp_recycle param of the parent skb. False if no parent skb. * - * Takes an additional reference on the paged fragment @frag. + * Takes an additional reference on the paged fragment @frag. Obtains the + * correct reference count depending on whether skb->pp_recycle is set and + * whether the frag is a page pool frag. */ -static inline void __skb_frag_ref(skb_frag_t *frag) +static inline void __skb_frag_ref(skb_frag_t *frag, bool recycle) { - get_page(skb_frag_page(frag)); + skb_page_ref(skb_frag_page(frag), recycle); } /** @@ -29,7 +60,7 @@ static inline void __skb_frag_ref(skb_frag_t *frag) */ static inline void skb_frag_ref(struct sk_buff *skb, int f) { - __skb_frag_ref(&skb_shinfo(skb)->frags[f]); + __skb_frag_ref(&skb_shinfo(skb)->frags[f], skb->pp_recycle); } bool napi_pp_put_page(struct page *page); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 2554a6f5f386..ea052fa710d8 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -907,11 +907,6 @@ static void skb_clone_fraglist(struct sk_buff *skb) skb_get(list); } -static bool is_pp_page(struct page *page) -{ - return (page->pp_magic & ~0x3UL) == PP_SIGNATURE; -} - int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom) { @@ -1033,37 +1028,6 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data) return napi_pp_put_page(virt_to_page(data)); } -/** - * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb - * @skb: page pool aware skb - * - * Increase the fragment reference count (pp_ref_count) of a skb. This is - * intended to gain fragment references only for page pool aware skbs, - * i.e. when skb->pp_recycle is true, and not for fragments in a - * non-pp-recycling skb. It has a fallback to increase references on normal - * pages, as page pool aware skbs may also have normal page fragments. - */ -static int skb_pp_frag_ref(struct sk_buff *skb) -{ - struct skb_shared_info *shinfo; - struct page *head_page; - int i; - - if (!skb->pp_recycle) - return -EINVAL; - - shinfo = skb_shinfo(skb); - - for (i = 0; i < shinfo->nr_frags; i++) { - head_page = compound_head(skb_frag_page(&shinfo->frags[i])); - if (likely(is_pp_page(head_page))) - page_pool_ref_page(head_page); - else - page_ref_inc(head_page); - } - return 0; -} - static void skb_kfree_head(void *head, unsigned int end_offset) { if (end_offset == SKB_SMALL_HEAD_HEADROOM) @@ -4176,7 +4140,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) to++; } else { - __skb_frag_ref(fragfrom); + __skb_frag_ref(fragfrom, skb->pp_recycle); skb_frag_page_copy(fragto, fragfrom); skb_frag_off_copy(fragto, fragfrom); skb_frag_size_set(fragto, todo); @@ -4826,7 +4790,7 @@ normal: } *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag; - __skb_frag_ref(nskb_frag); + __skb_frag_ref(nskb_frag, nskb->pp_recycle); size = skb_frag_size(nskb_frag); if (pos < offset) { @@ -5957,10 +5921,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, /* if the skb is not cloned this does nothing * since we set nr_frags to 0. */ - if (skb_pp_frag_ref(from)) { - for (i = 0; i < from_shinfo->nr_frags; i++) - __skb_frag_ref(&from_shinfo->frags[i]); - } + for (i = 0; i < from_shinfo->nr_frags; i++) + __skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle); to->truesize += delta; to->len += len; diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c index f9e3d3d90dcf..9237dded4467 100644 --- a/net/tls/tls_device_fallback.c +++ b/net/tls/tls_device_fallback.c @@ -278,7 +278,7 @@ static int fill_sg_in(struct scatterlist *sg_in, for (i = 0; remaining > 0; i++) { skb_frag_t *frag = &record->frags[i]; - __skb_frag_ref(frag); + __skb_frag_ref(frag, false); sg_set_page(sg_in + i, skb_frag_page(frag), skb_frag_size(frag), skb_frag_off(frag)); -- cgit From 3ba2ae361402f28754adf873954a22bf97b856a9 Mon Sep 17 00:00:00 2001 From: Stefan Berger Date: Thu, 4 Apr 2024 10:18:56 -0400 Subject: crypto: x509 - Add OID for NIST P521 and extend parser for it Enable the x509 parser to accept NIST P521 certificates and add the OID for ansip521r1, which is the identifier for NIST P521. Cc: David Howells Tested-by: Lukas Wunner Reviewed-by: Jarkko Sakkinen Signed-off-by: Stefan Berger Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/x509_cert_parser.c | 3 +++ include/linux/oid_registry.h | 1 + 2 files changed, 4 insertions(+) (limited to 'include/linux') diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index bb0bffa271b5..964208d1a35f 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -546,6 +546,9 @@ int x509_extract_key_data(void *context, size_t hdrlen, case OID_id_ansip384r1: ctx->cert->pub->pkey_algo = "ecdsa-nist-p384"; break; + case OID_id_ansip521r1: + ctx->cert->pub->pkey_algo = "ecdsa-nist-p521"; + break; default: return -ENOPKG; } diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 51421fdbb0ba..6f9242259edc 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -69,6 +69,7 @@ enum OID { OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ + OID_id_ansip521r1, /* 1.3.132.0.35 */ OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ OID_sha384, /* 2.16.840.1.101.3.4.2.2 */ OID_sha512, /* 2.16.840.1.101.3.4.2.3 */ -- cgit From 5c6ca9d936654a135b459c846885e08966e5e5bf Mon Sep 17 00:00:00 2001 From: Lukas Wunner Date: Sun, 7 Apr 2024 19:57:40 +0200 Subject: X.509: Introduce scope-based x509_certificate allocation Add a DEFINE_FREE() clause for x509_certificate structs and use it in x509_cert_parse() and x509_key_preparse(). These are the only functions where scope-based x509_certificate allocation currently makes sense. A third user will be introduced with the forthcoming SPDM library (Security Protocol and Data Model) for PCI device authentication. Unlike most other DEFINE_FREE() clauses, this one checks for IS_ERR() instead of NULL before calling x509_free_certificate() at end of scope. That's because the "constructor" of x509_certificate structs, x509_cert_parse(), returns a valid pointer or an ERR_PTR(), but never NULL. Comparing the Assembler output before/after has shown they are identical, save for the fact that gcc-12 always generates two return paths when __cleanup() is used, one for the success case and one for the error case. In x509_cert_parse(), add a hint for the compiler that kzalloc() never returns an ERR_PTR(). Otherwise the compiler adds a gratuitous IS_ERR() check on return. Introduce an assume() macro for this which can be re-used elsewhere in the kernel to provide hints for the compiler. Suggested-by: Jonathan Cameron Link: https://lore.kernel.org/all/20231003153937.000034ca@Huawei.com/ Link: https://lwn.net/Articles/934679/ Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu --- crypto/asymmetric_keys/x509_cert_parser.c | 43 ++++++++++++------------------- crypto/asymmetric_keys/x509_parser.h | 3 +++ crypto/asymmetric_keys/x509_public_key.c | 31 +++++++--------------- include/linux/compiler.h | 2 ++ 4 files changed, 30 insertions(+), 49 deletions(-) (limited to 'include/linux') diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 964208d1a35f..a814e5f136e2 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c @@ -60,24 +60,24 @@ EXPORT_SYMBOL_GPL(x509_free_certificate); */ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) { - struct x509_certificate *cert; - struct x509_parse_context *ctx; + struct x509_certificate *cert __free(x509_free_certificate); + struct x509_parse_context *ctx __free(kfree) = NULL; struct asymmetric_key_id *kid; long ret; - ret = -ENOMEM; cert = kzalloc(sizeof(struct x509_certificate), GFP_KERNEL); + assume(!IS_ERR(cert)); /* Avoid gratuitous IS_ERR() check on return */ if (!cert) - goto error_no_cert; + return ERR_PTR(-ENOMEM); cert->pub = kzalloc(sizeof(struct public_key), GFP_KERNEL); if (!cert->pub) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); cert->sig = kzalloc(sizeof(struct public_key_signature), GFP_KERNEL); if (!cert->sig) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL); if (!ctx) - goto error_no_ctx; + return ERR_PTR(-ENOMEM); ctx->cert = cert; ctx->data = (unsigned long)data; @@ -85,7 +85,7 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) /* Attempt to decode the certificate */ ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); /* Decode the AuthorityKeyIdentifier */ if (ctx->raw_akid) { @@ -95,20 +95,19 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) ctx->raw_akid, ctx->raw_akid_size); if (ret < 0) { pr_warn("Couldn't decode AuthKeyIdentifier\n"); - goto error_decode; + return ERR_PTR(ret); } } - ret = -ENOMEM; cert->pub->key = kmemdup(ctx->key, ctx->key_size, GFP_KERNEL); if (!cert->pub->key) - goto error_decode; + return ERR_PTR(-ENOMEM); cert->pub->keylen = ctx->key_size; cert->pub->params = kmemdup(ctx->params, ctx->params_size, GFP_KERNEL); if (!cert->pub->params) - goto error_decode; + return ERR_PTR(-ENOMEM); cert->pub->paramlen = ctx->params_size; cert->pub->algo = ctx->key_algo; @@ -116,33 +115,23 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) /* Grab the signature bits */ ret = x509_get_sig_params(cert); if (ret < 0) - goto error_decode; + return ERR_PTR(ret); /* Generate cert issuer + serial number key ID */ kid = asymmetric_key_generate_id(cert->raw_serial, cert->raw_serial_size, cert->raw_issuer, cert->raw_issuer_size); - if (IS_ERR(kid)) { - ret = PTR_ERR(kid); - goto error_decode; - } + if (IS_ERR(kid)) + return ERR_CAST(kid); cert->id = kid; /* Detect self-signed certificates */ ret = x509_check_for_self_signed(cert); if (ret < 0) - goto error_decode; - - kfree(ctx); - return cert; + return ERR_PTR(ret); -error_decode: - kfree(ctx); -error_no_ctx: - x509_free_certificate(cert); -error_no_cert: - return ERR_PTR(ret); + return_ptr(cert); } EXPORT_SYMBOL_GPL(x509_cert_parse); diff --git a/crypto/asymmetric_keys/x509_parser.h b/crypto/asymmetric_keys/x509_parser.h index 97a886cbe01c..0688c222806b 100644 --- a/crypto/asymmetric_keys/x509_parser.h +++ b/crypto/asymmetric_keys/x509_parser.h @@ -5,6 +5,7 @@ * Written by David Howells (dhowells@redhat.com) */ +#include #include #include #include @@ -44,6 +45,8 @@ struct x509_certificate { * x509_cert_parser.c */ extern void x509_free_certificate(struct x509_certificate *cert); +DEFINE_FREE(x509_free_certificate, struct x509_certificate *, + if (!IS_ERR(_T)) x509_free_certificate(_T)) extern struct x509_certificate *x509_cert_parse(const void *data, size_t datalen); extern int x509_decode_time(time64_t *_t, size_t hdrlen, unsigned char tag, diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index 6a4f00be22fc..00ac7159fba2 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -161,12 +161,11 @@ not_self_signed: */ static int x509_key_preparse(struct key_preparsed_payload *prep) { - struct asymmetric_key_ids *kids; - struct x509_certificate *cert; + struct x509_certificate *cert __free(x509_free_certificate); + struct asymmetric_key_ids *kids __free(kfree) = NULL; + char *p, *desc __free(kfree) = NULL; const char *q; size_t srlen, sulen; - char *desc = NULL, *p; - int ret; cert = x509_cert_parse(prep->data, prep->datalen); if (IS_ERR(cert)) @@ -188,9 +187,8 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) } /* Don't permit addition of blacklisted keys */ - ret = -EKEYREJECTED; if (cert->blacklisted) - goto error_free_cert; + return -EKEYREJECTED; /* Propose a description */ sulen = strlen(cert->subject); @@ -202,10 +200,9 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) q = cert->raw_serial; } - ret = -ENOMEM; desc = kmalloc(sulen + 2 + srlen * 2 + 1, GFP_KERNEL); if (!desc) - goto error_free_cert; + return -ENOMEM; p = memcpy(desc, cert->subject, sulen); p += sulen; *p++ = ':'; @@ -215,16 +212,14 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) kids = kmalloc(sizeof(struct asymmetric_key_ids), GFP_KERNEL); if (!kids) - goto error_free_desc; + return -ENOMEM; kids->id[0] = cert->id; kids->id[1] = cert->skid; kids->id[2] = asymmetric_key_generate_id(cert->raw_subject, cert->raw_subject_size, "", 0); - if (IS_ERR(kids->id[2])) { - ret = PTR_ERR(kids->id[2]); - goto error_free_kids; - } + if (IS_ERR(kids->id[2])) + return PTR_ERR(kids->id[2]); /* We're pinning the module by being linked against it */ __module_get(public_key_subtype.owner); @@ -242,15 +237,7 @@ static int x509_key_preparse(struct key_preparsed_payload *prep) cert->sig = NULL; desc = NULL; kids = NULL; - ret = 0; - -error_free_kids: - kfree(kids); -error_free_desc: - kfree(desc); -error_free_cert: - x509_free_certificate(cert); - return ret; + return 0; } static struct asymmetric_key_parser x509_key_parser = { diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c00cc6c0878a..53666eb19909 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -148,6 +148,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, } while (0) #endif +#define assume(cond) do { if (!(cond)) __builtin_unreachable(); } while (0) + /* * KENTRY - kernel entry point * This can be used to annotate symbols (functions or data) that are used -- cgit From f7842747d13d9f4eedba9edec5b834c9d9237717 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 5 Apr 2024 07:58:15 -0400 Subject: mm: replace set_pte_at_notify() with just set_pte_at() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the demise of the .change_pte() MMU notifier callback, there is no notification happening in set_pte_at_notify(). It is a synonym of set_pte_at() and can be replaced with it. Signed-off-by: Paolo Bonzini Reviewed-by: David Hildenbrand Reviewed-by: Philippe Mathieu-Daudé Message-ID: <20240405115815.3226315-5-pbonzini@redhat.com> Acked-by: Andrew Morton Signed-off-by: Paolo Bonzini --- include/linux/mmu_notifier.h | 2 -- kernel/events/uprobes.c | 6 +++--- mm/ksm.c | 4 ++-- mm/memory.c | 7 +------ mm/migrate_device.c | 8 ++------ 5 files changed, 8 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 8c72bf651606..d39ebb10caeb 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -657,6 +657,4 @@ static inline void mmu_notifier_synchronize(void) #endif /* CONFIG_MMU_NOTIFIER */ -#define set_pte_at_notify set_pte_at - #endif /* _LINUX_MMU_NOTIFIER_H */ diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index e4834d23e1d1..1215bc299390 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -18,7 +18,7 @@ #include #include #include /* anon_vma_prepare */ -#include /* set_pte_at_notify */ +#include #include /* folio_free_swap */ #include /* user_enable_single_step */ #include /* notifier mechanism */ @@ -195,8 +195,8 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); ptep_clear_flush(vma, addr, pvmw.pte); if (new_page) - set_pte_at_notify(mm, addr, pvmw.pte, - mk_pte(new_page, vma->vm_page_prot)); + set_pte_at(mm, addr, pvmw.pte, + mk_pte(new_page, vma->vm_page_prot)); folio_remove_rmap_pte(old_folio, old_page, vma); if (!folio_mapped(old_folio)) diff --git a/mm/ksm.c b/mm/ksm.c index 8c001819cf10..108a4d167824 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1345,7 +1345,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, if (pte_write(entry)) entry = pte_wrprotect(entry); - set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); + set_pte_at(mm, pvmw.address, pvmw.pte, entry); } *orig_pte = entry; err = 0; @@ -1447,7 +1447,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * See Documentation/mm/mmu_notifier.rst */ ptep_clear_flush(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, newpte); + set_pte_at(mm, addr, ptep, newpte); folio = page_folio(page); folio_remove_rmap_pte(folio, page, vma); diff --git a/mm/memory.c b/mm/memory.c index f2bc6dd15eb8..9a6f4d8aa379 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3327,13 +3327,8 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) ptep_clear_flush(vma, vmf->address, vmf->pte); folio_add_new_anon_rmap(new_folio, vma, vmf->address); folio_add_lru_vma(new_folio, vma); - /* - * We call the notify macro here because, when using secondary - * mmu page tables (such as kvm shadow page tables), we want the - * new page to be mapped directly into the secondary page table. - */ BUG_ON(unshare && pte_write(entry)); - set_pte_at_notify(mm, vmf->address, vmf->pte, entry); + set_pte_at(mm, vmf->address, vmf->pte, entry); update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1); if (old_folio) { /* diff --git a/mm/migrate_device.c b/mm/migrate_device.c index b6c27c76e1a0..66206734b1b9 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -664,13 +664,9 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, if (flush) { flush_cache_page(vma, addr, pte_pfn(orig_pte)); ptep_clear_flush(vma, addr, ptep); - set_pte_at_notify(mm, addr, ptep, entry); - update_mmu_cache(vma, addr, ptep); - } else { - /* No need to invalidate - it was non-present before */ - set_pte_at(mm, addr, ptep, entry); - update_mmu_cache(vma, addr, ptep); } + set_pte_at(mm, addr, ptep, entry); + update_mmu_cache(vma, addr, ptep); pte_unmap_unlock(ptep, ptl); *src = MIGRATE_PFN_MIGRATE; -- cgit From d2f85a263883b679f87ed8f911746105658e9c47 Mon Sep 17 00:00:00 2001 From: Yi Liu Date: Thu, 28 Mar 2024 05:29:58 -0700 Subject: iommu: Pass domain to remove_dev_pasid() op Existing remove_dev_pasid() callbacks of the underlying iommu drivers get the attached domain from the group->pasid_array. However, the domain stored in group->pasid_array is not always correct in all scenarios. A wrong domain may result in failure in remove_dev_pasid() callback. To avoid such problems, it is more reliable to pass the domain to the remove_dev_pasid() op. Suggested-by: Jason Gunthorpe Signed-off-by: Yi Liu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240328122958.83332-3-yi.l.liu@intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 9 ++------- drivers/iommu/intel/iommu.c | 11 +++-------- drivers/iommu/iommu.c | 9 +++++---- include/linux/iommu.h | 3 ++- 4 files changed, 12 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 41f93c3ab160..2e1988c861f1 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3053,14 +3053,9 @@ static int arm_smmu_def_domain_type(struct device *dev) return 0; } -static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid) +static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) { - struct iommu_domain *domain; - - domain = iommu_get_domain_for_dev_pasid(dev, pasid, IOMMU_DOMAIN_SVA); - if (WARN_ON(IS_ERR(domain)) || !domain) - return; - arm_smmu_sva_remove_dev_pasid(domain, dev, pasid); } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 50eb9aed47cc..45c75a8a0ef5 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4587,19 +4587,15 @@ static int intel_iommu_iotlb_sync_map(struct iommu_domain *domain, return 0; } -static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) +static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain) { struct device_domain_info *info = dev_iommu_priv_get(dev); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct dev_pasid_info *curr, *dev_pasid = NULL; struct intel_iommu *iommu = info->iommu; - struct dmar_domain *dmar_domain; - struct iommu_domain *domain; unsigned long flags; - domain = iommu_get_domain_for_dev_pasid(dev, pasid, 0); - if (WARN_ON_ONCE(!domain)) - goto out_tear_down; - /* * The SVA implementation needs to handle its own stuffs like the mm * notification. Before consolidating that code into iommu core, let @@ -4610,7 +4606,6 @@ static void intel_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid) goto out_tear_down; } - dmar_domain = to_dmar_domain(domain); spin_lock_irqsave(&dmar_domain->lock, flags); list_for_each_entry(curr, &dmar_domain->dev_pasids, link_domain) { if (curr->dev == dev && curr->pasid == pasid) { diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 659a77f7bb83..3183b0ed4cdb 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -3335,20 +3335,21 @@ err_revert: if (device == last_gdev) break; - ops->remove_dev_pasid(device->dev, pasid); + ops->remove_dev_pasid(device->dev, pasid, domain); } return ret; } static void __iommu_remove_group_pasid(struct iommu_group *group, - ioasid_t pasid) + ioasid_t pasid, + struct iommu_domain *domain) { struct group_device *device; const struct iommu_ops *ops; for_each_group_device(group, device) { ops = dev_iommu_ops(device->dev); - ops->remove_dev_pasid(device->dev, pasid); + ops->remove_dev_pasid(device->dev, pasid, domain); } } @@ -3418,7 +3419,7 @@ void iommu_detach_device_pasid(struct iommu_domain *domain, struct device *dev, struct iommu_group *group = dev->iommu_group; mutex_lock(&group->mutex); - __iommu_remove_group_pasid(group, pasid); + __iommu_remove_group_pasid(group, pasid, domain); WARN_ON(xa_erase(&group->pasid_array, pasid) != domain); mutex_unlock(&group->mutex); } diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2e925b5eba53..40dd439307e8 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -578,7 +578,8 @@ struct iommu_ops { struct iommu_page_response *msg); int (*def_domain_type)(struct device *dev); - void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid); + void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, + struct iommu_domain *domain); const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; -- cgit From ec84ca4025c0b90c6b7173a230f78ec00cad44f5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Apr 2024 16:37:48 +0200 Subject: scsi: block: Remove now unused queue limits helpers Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240409143748.980206-24-hch@lst.de Reviewed-by: Bart Van Assche Reviewed-by: John Garry Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Signed-off-by: Martin K. Petersen --- block/blk-settings.c | 245 ----------------------------------------- drivers/s390/block/dasd_eckd.c | 6 +- include/linux/blkdev.h | 13 --- include/linux/mmc/host.h | 4 +- 4 files changed, 5 insertions(+), 263 deletions(-) (limited to 'include/linux') diff --git a/block/blk-settings.c b/block/blk-settings.c index 3c7d8d638ab5..292aadf8d807 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -284,72 +284,6 @@ int queue_limits_set(struct request_queue *q, struct queue_limits *lim) } EXPORT_SYMBOL_GPL(queue_limits_set); -/** - * blk_queue_bounce_limit - set bounce buffer limit for queue - * @q: the request queue for the device - * @bounce: bounce limit to enforce - * - * Description: - * Force bouncing for ISA DMA ranges or highmem. - * - * DEPRECATED, don't use in new code. - **/ -void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce) -{ - q->limits.bounce = bounce; -} -EXPORT_SYMBOL(blk_queue_bounce_limit); - -/** - * blk_queue_max_hw_sectors - set max sectors for a request for this queue - * @q: the request queue for the device - * @max_hw_sectors: max hardware sectors in the usual 512b unit - * - * Description: - * Enables a low level driver to set a hard upper limit, - * max_hw_sectors, on the size of requests. max_hw_sectors is set by - * the device driver based upon the capabilities of the I/O - * controller. - * - * max_dev_sectors is a hard limit imposed by the storage device for - * READ/WRITE requests. It is set by the disk driver. - * - * max_sectors is a soft limit imposed by the block layer for - * filesystem type requests. This value can be overridden on a - * per-device basis in /sys/block//queue/max_sectors_kb. - * The soft limit can not exceed max_hw_sectors. - **/ -void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) -{ - struct queue_limits *limits = &q->limits; - unsigned int max_sectors; - - if ((max_hw_sectors << 9) < PAGE_SIZE) { - max_hw_sectors = 1 << (PAGE_SHIFT - 9); - pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors); - } - - max_hw_sectors = round_down(max_hw_sectors, - limits->logical_block_size >> SECTOR_SHIFT); - limits->max_hw_sectors = max_hw_sectors; - - max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors); - - if (limits->max_user_sectors) - max_sectors = min(max_sectors, limits->max_user_sectors); - else - max_sectors = min(max_sectors, BLK_DEF_MAX_SECTORS_CAP); - - max_sectors = round_down(max_sectors, - limits->logical_block_size >> SECTOR_SHIFT); - limits->max_sectors = max_sectors; - - if (!q->disk) - return; - q->disk->bdi->io_pages = max_sectors >> (PAGE_SHIFT - 9); -} -EXPORT_SYMBOL(blk_queue_max_hw_sectors); - /** * blk_queue_chunk_sectors - set size of the chunk for this queue * @q: the request queue for the device @@ -436,65 +370,6 @@ void blk_queue_max_zone_append_sectors(struct request_queue *q, } EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors); -/** - * blk_queue_max_segments - set max hw segments for a request for this queue - * @q: the request queue for the device - * @max_segments: max number of segments - * - * Description: - * Enables a low level driver to set an upper limit on the number of - * hw data segments in a request. - **/ -void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) -{ - if (!max_segments) { - max_segments = 1; - pr_info("%s: set to minimum %u\n", __func__, max_segments); - } - - q->limits.max_segments = max_segments; -} -EXPORT_SYMBOL(blk_queue_max_segments); - -/** - * blk_queue_max_discard_segments - set max segments for discard requests - * @q: the request queue for the device - * @max_segments: max number of segments - * - * Description: - * Enables a low level driver to set an upper limit on the number of - * segments in a discard request. - **/ -void blk_queue_max_discard_segments(struct request_queue *q, - unsigned short max_segments) -{ - q->limits.max_discard_segments = max_segments; -} -EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments); - -/** - * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg - * @q: the request queue for the device - * @max_size: max size of segment in bytes - * - * Description: - * Enables a low level driver to set an upper limit on the size of a - * coalesced segment - **/ -void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) -{ - if (max_size < PAGE_SIZE) { - max_size = PAGE_SIZE; - pr_info("%s: set to minimum %u\n", __func__, max_size); - } - - /* see blk_queue_virt_boundary() for the explanation */ - WARN_ON_ONCE(q->limits.virt_boundary_mask); - - q->limits.max_segment_size = max_size; -} -EXPORT_SYMBOL(blk_queue_max_segment_size); - /** * blk_queue_logical_block_size - set logical block size for the queue * @q: the request queue for the device @@ -661,29 +536,6 @@ void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt) } EXPORT_SYMBOL(blk_limits_io_opt); -/** - * blk_queue_io_opt - set optimal request size for the queue - * @q: the request queue for the device - * @opt: optimal request size in bytes - * - * Description: - * Storage devices may report an optimal I/O size, which is the - * device's preferred unit for sustained I/O. This is rarely reported - * for disk drives. For RAID arrays it is usually the stripe width or - * the internal track size. A properly aligned multiple of - * optimal_io_size is the preferred request size for workloads where - * sustained throughput is desired. - */ -void blk_queue_io_opt(struct request_queue *q, unsigned int opt) -{ - blk_limits_io_opt(&q->limits, opt); - if (!q->disk) - return; - q->disk->bdi->ra_pages = - max(queue_io_opt(q) * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); -} -EXPORT_SYMBOL(blk_queue_io_opt); - static int queue_limit_alignment_offset(const struct queue_limits *lim, sector_t sector) { @@ -933,81 +785,6 @@ void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) } EXPORT_SYMBOL(blk_queue_update_dma_pad); -/** - * blk_queue_segment_boundary - set boundary rules for segment merging - * @q: the request queue for the device - * @mask: the memory boundary mask - **/ -void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) -{ - if (mask < PAGE_SIZE - 1) { - mask = PAGE_SIZE - 1; - pr_info("%s: set to minimum %lx\n", __func__, mask); - } - - q->limits.seg_boundary_mask = mask; -} -EXPORT_SYMBOL(blk_queue_segment_boundary); - -/** - * blk_queue_virt_boundary - set boundary rules for bio merging - * @q: the request queue for the device - * @mask: the memory boundary mask - **/ -void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) -{ - q->limits.virt_boundary_mask = mask; - - /* - * Devices that require a virtual boundary do not support scatter/gather - * I/O natively, but instead require a descriptor list entry for each - * page (which might not be idential to the Linux PAGE_SIZE). Because - * of that they are not limited by our notion of "segment size". - */ - if (mask) - q->limits.max_segment_size = UINT_MAX; -} -EXPORT_SYMBOL(blk_queue_virt_boundary); - -/** - * blk_queue_dma_alignment - set dma length and memory alignment - * @q: the request queue for the device - * @mask: alignment mask - * - * description: - * set required memory and length alignment for direct dma transactions. - * this is used when building direct io requests for the queue. - * - **/ -void blk_queue_dma_alignment(struct request_queue *q, int mask) -{ - q->limits.dma_alignment = mask; -} -EXPORT_SYMBOL(blk_queue_dma_alignment); - -/** - * blk_queue_update_dma_alignment - update dma length and memory alignment - * @q: the request queue for the device - * @mask: alignment mask - * - * description: - * update required memory and length alignment for direct dma transactions. - * If the requested alignment is larger than the current alignment, then - * the current queue alignment is updated to the new value, otherwise it - * is left alone. The design of this is to allow multiple objects - * (driver, device, transport etc) to set their respective - * alignments without having them interfere. - * - **/ -void blk_queue_update_dma_alignment(struct request_queue *q, int mask) -{ - BUG_ON(mask > PAGE_SIZE); - - if (mask > q->limits.dma_alignment) - q->limits.dma_alignment = mask; -} -EXPORT_SYMBOL(blk_queue_update_dma_alignment); - /** * blk_set_queue_depth - tell the block layer about the device queue depth * @q: the request queue for the device @@ -1061,28 +838,6 @@ void blk_queue_required_elevator_features(struct request_queue *q, } EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); -/** - * blk_queue_can_use_dma_map_merging - configure queue for merging segments. - * @q: the request queue for the device - * @dev: the device pointer for dma - * - * Tell the block layer about merging the segments by dma map of @q. - */ -bool blk_queue_can_use_dma_map_merging(struct request_queue *q, - struct device *dev) -{ - unsigned long boundary = dma_get_merge_boundary(dev); - - if (!boundary) - return false; - - /* No need to update max_segment_size. see blk_queue_virt_boundary() */ - blk_queue_virt_boundary(q, boundary); - - return true; -} -EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); - /** * disk_set_zoned - inidicate a zoned device * @disk: gendisk to configure diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 180a008d38ea..2f16f543079b 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4561,9 +4561,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track( len_to_track_end = 0; /* * A tidaw can address 4k of memory, but must not cross page boundaries - * We can let the block layer handle this by setting - * blk_queue_segment_boundary to page boundaries and - * blk_max_segment_size to page size when setting up the request queue. + * We can let the block layer handle this by setting seg_boundary_mask + * to page boundaries and max_segment_size to page size when setting up + * the request queue. * For write requests, a TIDAW must not cross track boundaries, because * we have to set the CBC flag on the last tidaw for each track. */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ded7f66dc4b9..e3c7082efa39 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -908,15 +908,9 @@ static inline void queue_limits_cancel_update(struct request_queue *q) /* * Access functions for manipulating queue properties */ -void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce limit); -extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); -extern void blk_queue_max_segments(struct request_queue *, unsigned short); -extern void blk_queue_max_discard_segments(struct request_queue *, - unsigned short); void blk_queue_max_secure_erase_sectors(struct request_queue *q, unsigned int max_sectors); -extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); extern void blk_queue_max_discard_sectors(struct request_queue *q, unsigned int max_discard_sectors); extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, @@ -933,7 +927,6 @@ void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); -extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, @@ -941,10 +934,6 @@ extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev, sector_t offset, const char *pfx); extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); -extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); -extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); -extern void blk_queue_dma_alignment(struct request_queue *, int); -extern void blk_queue_update_dma_alignment(struct request_queue *, int); extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); @@ -961,8 +950,6 @@ void disk_set_independent_access_ranges(struct gendisk *disk, extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); -extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, - struct device *dev); bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 5894bf912f7b..88c6a76042ee 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -433,8 +433,8 @@ struct mmc_host { mmc_pm_flag_t pm_caps; /* supported pm features */ /* host specific block data */ - unsigned int max_seg_size; /* see blk_queue_max_segment_size */ - unsigned short max_segs; /* see blk_queue_max_segments */ + unsigned int max_seg_size; /* lim->max_segment_size */ + unsigned short max_segs; /* lim->max_segments */ unsigned short unused; unsigned int max_req_size; /* maximum number of bytes in one req */ unsigned int max_blk_size; /* maximum size of one mmc block */ -- cgit From 86d2a2f51fbada84e377665df06b5a479a1edc99 Mon Sep 17 00:00:00 2001 From: Bitao Hu Date: Thu, 11 Apr 2024 15:41:30 +0800 Subject: genirq: Convert kstat_irqs to a struct The irq_desc::kstat_irqs member is a per-CPU variable of type int, which is only capable of counting. A snapshot mechanism for interrupt statistics will be added soon, which requires an additional variable to store the snapshot. To facilitate expansion, convert kstat_irqs here to a struct containing only the count. Originally-by: Thomas Gleixner Signed-off-by: Bitao Hu Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240411074134.30922-2-yaoma@linux.alibaba.com --- arch/mips/dec/setup.c | 2 +- arch/parisc/kernel/smp.c | 2 +- arch/powerpc/kvm/book3s_hv_rm_xics.c | 2 +- include/linux/irqdesc.h | 12 ++++++++++-- kernel/irq/internals.h | 2 +- kernel/irq/irqdesc.c | 9 ++++----- kernel/irq/proc.c | 5 ++--- scripts/gdb/linux/interrupts.py | 6 +++--- 8 files changed, 23 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index 6c3704f51d0d..87f0a1436bf9 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c @@ -756,7 +756,7 @@ void __init arch_init_irq(void) NULL)) pr_err("Failed to register fpu interrupt\n"); desc_fpu = irq_to_desc(irq_fpu); - fpu_kstat_irq = this_cpu_ptr(desc_fpu->kstat_irqs); + fpu_kstat_irq = this_cpu_ptr(&desc_fpu->kstat_irqs->cnt); } if (dec_interrupt[DEC_IRQ_CASCADE] >= 0) { if (request_irq(dec_interrupt[DEC_IRQ_CASCADE], no_action, diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 444154271f23..800eb64e91ad 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -344,7 +344,7 @@ static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) struct irq_desc *desc = irq_to_desc(i); if (desc && desc->kstat_irqs) - *per_cpu_ptr(desc->kstat_irqs, cpuid) = 0; + *per_cpu_ptr(desc->kstat_irqs, cpuid) = (struct irqstat) { }; } #endif diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c index e42984878503..f2636414d82a 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_xics.c +++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c @@ -837,7 +837,7 @@ static inline void this_cpu_inc_rm(unsigned int __percpu *addr) */ static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc) { - this_cpu_inc_rm(desc->kstat_irqs); + this_cpu_inc_rm(&desc->kstat_irqs->cnt); __this_cpu_inc(kstat.irqs_sum); } diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index d9451d456a73..c28612674acb 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -17,6 +17,14 @@ struct irq_desc; struct irq_domain; struct pt_regs; +/** + * struct irqstat - interrupt statistics + * @cnt: real-time interrupt count + */ +struct irqstat { + unsigned int cnt; +}; + /** * struct irq_desc - interrupt descriptor * @irq_common_data: per irq and chip data passed down to chip functions @@ -55,7 +63,7 @@ struct pt_regs; struct irq_desc { struct irq_common_data irq_common_data; struct irq_data irq_data; - unsigned int __percpu *kstat_irqs; + struct irqstat __percpu *kstat_irqs; irq_flow_handler_t handle_irq; struct irqaction *action; /* IRQ action list */ unsigned int status_use_accessors; @@ -119,7 +127,7 @@ extern struct irq_desc irq_desc[NR_IRQS]; static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc, unsigned int cpu) { - return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; + return desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; } static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index bcc7f21db9ee..1d92532c2aae 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -258,7 +258,7 @@ static inline void irq_state_set_masked(struct irq_desc *desc) static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc) { - __this_cpu_inc(*desc->kstat_irqs); + __this_cpu_inc(desc->kstat_irqs->cnt); __this_cpu_inc(kstat.irqs_sum); } diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 4c6b32318ce3..b59b79200ad7 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -134,7 +134,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, desc->name = NULL; desc->owner = owner; for_each_possible_cpu(cpu) - *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; + *per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { }; desc_smp_init(desc, node, affinity); } @@ -186,7 +186,7 @@ static int init_desc(struct irq_desc *desc, int irq, int node, const struct cpumask *affinity, struct module *owner) { - desc->kstat_irqs = alloc_percpu(unsigned int); + desc->kstat_irqs = alloc_percpu(struct irqstat); if (!desc->kstat_irqs) return -ENOMEM; @@ -968,8 +968,7 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) { struct irq_desc *desc = irq_to_desc(irq); - return desc && desc->kstat_irqs ? - *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; + return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0; } static bool irq_is_nmi(struct irq_desc *desc) @@ -991,7 +990,7 @@ static unsigned int kstat_irqs(unsigned int irq) return data_race(desc->tot_count); for_each_possible_cpu(cpu) - sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu)); + sum += data_race(per_cpu(desc->kstat_irqs->cnt, cpu)); return sum; } diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 623b8136e9af..6954e0a02047 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -490,7 +490,7 @@ int show_interrupts(struct seq_file *p, void *v) if (desc->kstat_irqs) { for_each_online_cpu(j) - any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j)); + any_count |= data_race(per_cpu(desc->kstat_irqs->cnt, j)); } if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) @@ -498,8 +498,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, "%*d: ", prec, i); for_each_online_cpu(j) - seq_printf(p, "%10u ", desc->kstat_irqs ? - *per_cpu_ptr(desc->kstat_irqs, j) : 0); + seq_printf(p, "%10u ", desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0); raw_spin_lock_irqsave(&desc->lock, flags); if (desc->irq_data.chip) { diff --git a/scripts/gdb/linux/interrupts.py b/scripts/gdb/linux/interrupts.py index 66ae5c7690cf..616a5f26377a 100644 --- a/scripts/gdb/linux/interrupts.py +++ b/scripts/gdb/linux/interrupts.py @@ -37,7 +37,7 @@ def show_irq_desc(prec, irq): any_count = 0 if desc['kstat_irqs']: for cpu in cpus.each_online_cpu(): - any_count += cpus.per_cpu(desc['kstat_irqs'], cpu) + any_count += cpus.per_cpu(desc['kstat_irqs'], cpu)['cnt'] if (desc['action'] == 0 or irq_desc_is_chained(desc)) and any_count == 0: return text; @@ -45,7 +45,7 @@ def show_irq_desc(prec, irq): text += "%*d: " % (prec, irq) for cpu in cpus.each_online_cpu(): if desc['kstat_irqs']: - count = cpus.per_cpu(desc['kstat_irqs'], cpu) + count = cpus.per_cpu(desc['kstat_irqs'], cpu)['cnt'] else: count = 0 text += "%10u" % (count) @@ -177,7 +177,7 @@ def arm_common_show_interrupts(prec): if desc == 0: continue for cpu in cpus.each_online_cpu(): - text += "%10u" % (cpus.per_cpu(desc['kstat_irqs'], cpu)) + text += "%10u" % (cpus.per_cpu(desc['kstat_irqs'], cpu)['cnt']) text += " %s" % (ipi_types[ipi].string()) text += "\n" return text -- cgit From 99cf63c56661be0a0c42f79b56f37a4aa34b4779 Mon Sep 17 00:00:00 2001 From: Bitao Hu Date: Thu, 11 Apr 2024 15:41:31 +0800 Subject: genirq: Provide a snapshot mechanism for interrupt statistics The soft lockup detector lacks a mechanism to identify interrupt storms as root cause of a lockup. To enable this the detector needs a mechanism to snapshot the interrupt count statistics on a CPU when the detector observes a potential lockup scenario and compare that against the interrupt count when it warns about the lockup later on. The number of interrupts in that period give a hint whether the lockup might have been caused by an interrupt storm. Instead of having extra storage in the lockup detector and accessing the internals of the interrupt descriptor directly, add a snapshot member to the per CPU irq_desc::kstat_irq structure and provide interfaces to take a snapshot of all interrupts on the current CPU and to retrieve the delta of a specific interrupt later on. Originally-by: Thomas Gleixner Signed-off-by: Bitao Hu Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240411074134.30922-3-yaoma@linux.alibaba.com --- include/linux/irqdesc.h | 4 ++++ include/linux/kernel_stat.h | 8 ++++++++ kernel/irq/Kconfig | 4 ++++ kernel/irq/irqdesc.c | 25 +++++++++++++++++++++++++ 4 files changed, 41 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c28612674acb..fd091c35d572 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -20,9 +20,13 @@ struct pt_regs; /** * struct irqstat - interrupt statistics * @cnt: real-time interrupt count + * @ref: snapshot of interrupt count */ struct irqstat { unsigned int cnt; +#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT + unsigned int ref; +#endif }; /** diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 9935f7ecbfb9..9c042c6384bb 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -79,6 +79,14 @@ static inline unsigned int kstat_cpu_softirqs_sum(int cpu) return sum; } +#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT +extern void kstat_snapshot_irqs(void); +extern unsigned int kstat_get_irq_since_snapshot(unsigned int irq); +#else +static inline void kstat_snapshot_irqs(void) { } +static inline unsigned int kstat_get_irq_since_snapshot(unsigned int irq) { return 0; } +#endif + /* * Number of interrupts per specific IRQ source, since bootup */ diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 2531f3496ab6..529adb1f5859 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -108,6 +108,10 @@ config GENERIC_IRQ_MATRIX_ALLOCATOR config GENERIC_IRQ_RESERVATION_MODE bool +# Snapshot for interrupt statistics +config GENERIC_IRQ_STAT_SNAPSHOT + bool + # Support forced irq threading config IRQ_FORCED_THREADING bool diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index b59b79200ad7..f348faffa7b4 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -994,6 +994,31 @@ static unsigned int kstat_irqs(unsigned int irq) return sum; } +#ifdef CONFIG_GENERIC_IRQ_STAT_SNAPSHOT + +void kstat_snapshot_irqs(void) +{ + struct irq_desc *desc; + unsigned int irq; + + for_each_irq_desc(irq, desc) { + if (!desc->kstat_irqs) + continue; + this_cpu_write(desc->kstat_irqs->ref, this_cpu_read(desc->kstat_irqs->cnt)); + } +} + +unsigned int kstat_get_irq_since_snapshot(unsigned int irq) +{ + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc || !desc->kstat_irqs) + return 0; + return this_cpu_read(desc->kstat_irqs->cnt) - this_cpu_read(desc->kstat_irqs->ref); +} + +#endif + /** * kstat_irqs_usr - Get the statistics for an interrupt from thread context * @irq: The interrupt number -- cgit From cda30c6542c8bb445bc84f6616cac8d012547f0a Mon Sep 17 00:00:00 2001 From: Tim Schumacher Date: Thu, 28 Mar 2024 21:50:33 +0100 Subject: efi: Clear up misconceptions about a maximum variable name size The UEFI specification does not make any mention of a maximum variable name size, so the headers and implementation shouldn't claim that one exists either. Comments referring to this limit have been removed or rewritten, as this is an implementation detail local to the Linux kernel. Where appropriate, the magic value of 1024 has been replaced with EFI_VAR_NAME_LEN, as this is used for the efi_variable struct definition. This in itself does not change any behavior, but should serve as points of interest when making future changes in the same area. A related build-time check has been added to ensure that the special 512 byte sized buffer will not overflow with a potentially decreased EFI_VAR_NAME_LEN. Signed-off-by: Tim Schumacher Signed-off-by: Ard Biesheuvel --- drivers/firmware/efi/vars.c | 2 +- fs/efivarfs/vars.c | 5 +++-- include/linux/efi.h | 9 ++++----- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index f654e6f6af87..4056ba7f3440 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -215,7 +215,7 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, if (data_size > 0) { status = check_var_size(nonblocking, attr, - data_size + ucs2_strsize(name, 1024)); + data_size + ucs2_strsize(name, EFI_VAR_NAME_LEN)); if (status != EFI_SUCCESS) return status; } diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c index 4d722af1014f..3cc89bb624f0 100644 --- a/fs/efivarfs/vars.c +++ b/fs/efivarfs/vars.c @@ -295,9 +295,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor, unsigned long strsize1, strsize2; bool found = false; - strsize1 = ucs2_strsize(variable_name, 1024); + strsize1 = ucs2_strsize(variable_name, EFI_VAR_NAME_LEN); list_for_each_entry_safe(entry, n, head, list) { - strsize2 = ucs2_strsize(entry->var.VariableName, 1024); + strsize2 = ucs2_strsize(entry->var.VariableName, EFI_VAR_NAME_LEN); if (strsize1 == strsize2 && !memcmp(variable_name, &(entry->var.VariableName), strsize2) && @@ -396,6 +396,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *, do { variable_name_size = 512; + BUILD_BUG_ON(EFI_VAR_NAME_LEN < 512); status = efivar_get_next_variable(&variable_name_size, variable_name, diff --git a/include/linux/efi.h b/include/linux/efi.h index d59b0947fba0..418e555459da 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -1072,12 +1072,11 @@ static inline u64 efivar_reserved_space(void) { return 0; } #endif /* - * The maximum size of VariableName + Data = 1024 - * Therefore, it's reasonable to save that much - * space in each part of the structure, - * and we use a page for reading/writing. + * There is no actual upper limit specified for the variable name size. + * + * This limit exists only for practical purposes, since name conversions + * are bounds-checked and name data is occasionally stored in-line. */ - #define EFI_VAR_NAME_LEN 1024 int efivars_register(struct efivars *efivars, -- cgit From 42bd2af5950456d46fdaa91c3a8fb02e680f19f5 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Wed, 10 Apr 2024 17:10:12 -0700 Subject: vfs: relax linkat() AT_EMPTY_PATH - aka flink() - requirements MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit "The definition of insanity is doing the same thing over and over again and expecting different results” We've tried to do this before, most recently with commit bb2314b47996 ("fs: Allow unprivileged linkat(..., AT_EMPTY_PATH) aka flink") about a decade ago. But the effort goes back even further than that, eg this thread back from 1998 that is so old that we don't even have it archived in lore: https://lkml.org/lkml/1998/3/10/108 which also points out some of the reasons why it's dangerous. Or, how about then in 2003: https://lkml.org/lkml/2003/4/6/112 where we went through some of the same arguments, just wirh different people involved. In particular, having access to a file descriptor does not necessarily mean that you have access to the path that was used for lookup, and there may be very good reasons why you absolutely must not have access to a path to said file. For example, if we were passed a file descriptor from the outside into some limited environment (think chroot, but also user namespaces etc) a 'flink()' system call could now make that file visible inside a context where it's not supposed to be visible. In the process the user may also be able to re-open it with permissions that the original file descriptor did not have (eg a read-only file descriptor may be associated with an underlying file that is writable). Another variation on this is if somebody else (typically root) opens a file in a directory that is not accessible to others, and passes the file descriptor on as a read-only file. Again, the access to the file descriptor does not imply that you should have access to a path to the file in the filesystem. So while we have tried this several times in the past, it never works. The last time we did this, that commit bb2314b47996 quickly got reverted again in commit f0cc6ffb8ce8 (Revert "fs: Allow unprivileged linkat(..., AT_EMPTY_PATH) aka flink"), with a note saying "We may re-do this once the whole discussion about the interface is done". Well, the discussion is long done, and didn't come to any resolution. There's no question that 'flink()' would be a useful operation, but it's a dangerous one. However, it does turn out that since 2008 (commit d76b0d9b2d87: "CRED: Use creds in file structs") we have had a fairly straightforward way to check whether the file descriptor was opened by the same credentials as the credentials of the flink(). That allows the most common patterns that people want to use, which tend to be to either open the source carefully (ie using the openat2() RESOLVE_xyz flags, and/or checking ownership with fstat() before linking), or to use O_TMPFILE and fill in the file contents before it's exposed to the world with linkat(). But it also means that if the file descriptor was opened by somebody else, or we've gone through a credentials change since, the operation no longer works (unless we have CAP_DAC_READ_SEARCH capabilities in the opener's user namespace, as before). Note that the credential equality check is done by using pointer equality, which means that it's not enough that you have effectively the same user - they have to be literally identical, since our credentials are using copy-on-write semantics. So you can't change your credentials to something else and try to change it back to the same ones between the open() and the linkat(). This is not meant to be some kind of generic permission check, this is literally meant as a "the open and link calls are 'atomic' wrt user credentials" check. It also means that you can't just move things between namespaces, because the credentials aren't just a list of uid's and gid's: they includes the pointer to the user_ns that the capabilities are relative to. So let's try this one more time and see if maybe this approach ends up being workable after all. Cc: Andrew Lutomirski Cc: Al Viro Cc: Christian Brauner Cc: Peter Anvin Cc: Jan Kara Signed-off-by: Linus Torvalds Link: https://lore.kernel.org/r/20240411001012.12513-1-torvalds@linux-foundation.org [brauner: relax capability check to opener of the file] Link: https://lore.kernel.org/all/20231113-undenkbar-gediegen-efde5f1c34bc@brauner Signed-off-by: Christian Brauner --- fs/namei.c | 19 +++++++++++++------ include/linux/namei.h | 1 + 2 files changed, 14 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/fs/namei.c b/fs/namei.c index ceb9ddf8dfdd..8c1ff0b388e2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2422,6 +2422,14 @@ static const char *path_init(struct nameidata *nd, unsigned flags) if (!f.file) return ERR_PTR(-EBADF); + if (flags & LOOKUP_LINKAT_EMPTY) { + if (f.file->f_cred != current_cred() && + !ns_capable(f.file->f_cred->user_ns, CAP_DAC_READ_SEARCH)) { + fdput(f); + return ERR_PTR(-ENOENT); + } + } + dentry = f.file->f_path.dentry; if (*s && unlikely(!d_can_lookup(dentry))) { @@ -4644,14 +4652,13 @@ int do_linkat(int olddfd, struct filename *old, int newdfd, goto out_putnames; } /* - * To use null names we require CAP_DAC_READ_SEARCH + * To use null names we require CAP_DAC_READ_SEARCH or + * that the open-time creds of the dfd matches current. * This ensures that not everyone will be able to create - * handlink using the passed filedescriptor. + * a hardlink using the passed file descriptor. */ - if (flags & AT_EMPTY_PATH && !capable(CAP_DAC_READ_SEARCH)) { - error = -ENOENT; - goto out_putnames; - } + if (flags & AT_EMPTY_PATH) + how |= LOOKUP_LINKAT_EMPTY; if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; diff --git a/include/linux/namei.h b/include/linux/namei.h index 74e0cc14ebf8..967aa9ea9f96 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -44,6 +44,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT}; #define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */ #define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */ #define LOOKUP_CACHED 0x200000 /* Only do cached lookup */ +#define LOOKUP_LINKAT_EMPTY 0x400000 /* Linkat request with empty path. */ /* LOOKUP_* flags which do scope-related checks based on the dirfd. */ #define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT) -- cgit From 058e87782c91696020bdb0aa28ddf77d89aed266 Mon Sep 17 00:00:00 2001 From: "Uladzislau Rezki (Sony)" Date: Wed, 14 Feb 2024 14:45:53 -0800 Subject: rcu: Update lockdep while in RCU read-side critical section With Ankur's lazy-/auto-preemption patches applied and with a lazy-preemptible kernel in combination with a non-preemptible RCU, lockdep sometimes complains about context switches within RCU read-side critical sections. This is a false positive due to rcu_read_unlock() updating lockdep state too late: __release(RCU); __rcu_read_unlock(); // Context switch here results in lockdep false positive!!! rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ Although this complaint could also happen with preemptible RCU in a preemptible kernel, the odds of that happening aer quite low. In constrast, with non-preemptible RCU, a long critical section has a high probability of performing a context switch from the preempt_enable() in __rcu_read_unlock(). The fix is straightforward, just move the rcu_lock_release() within rcu_read_unlock() to obtain the reverse order from that of rcu_read_lock(): rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ __release(RCU); __rcu_read_unlock(); This commit makes this change. Co-developed-by: Frederic Weisbecker Signed-off-by: Frederic Weisbecker Co-developed-by: Joel Fernandes (Google) Signed-off-by: Joel Fernandes (Google) Co-developed-by: Boqun Feng Signed-off-by: Boqun Feng Signed-off-by: Uladzislau Rezki (Sony) Reviewed-by: Paul E. McKenney Cc: Ankur Arora Cc: Thomas Gleixner --- include/linux/rcupdate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 17d7ed5f3ae6..2c54750e36a0 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -809,9 +809,9 @@ static inline void rcu_read_unlock(void) { RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock() used illegally while idle"); + rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ __release(RCU); __rcu_read_unlock(); - rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */ } /** -- cgit From 65b4a59557f6fb5d4355093ad5a2c1bd5598ee41 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 15 Feb 2024 09:04:30 -0800 Subject: srcu: Make Tiny SRCU explicitly disable preemption Because Tiny SRCU is used only in kernels built with either CONFIG_PREEMPT_NONE=y or CONFIG_PREEMPT_VOLUNTARY=y, there has not been any need for TINY SRCU to explicitly disable preemption. However, the prospect of lazy preemption changes that, and the lazy-preemption patches do result in rcutorture runs finding both too-short grace periods and grace-period hangs for Tiny SRCU. This commit therefore adds the needed preempt_disable() and preempt_enable() calls to Tiny SRCU. Signed-off-by: Paul E. McKenney Cc: Ankur Arora Cc: Thomas Gleixner Signed-off-by: Uladzislau Rezki (Sony) --- include/linux/srcutiny.h | 2 ++ kernel/rcu/srcutiny.c | 31 ++++++++++++++++++++++++++----- 2 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 447133171d95..4d96bbdb45f0 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -64,8 +64,10 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) { int idx; + preempt_disable(); // Needed for PREEMPT_AUTO idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); + preempt_enable(); return idx; } diff --git a/kernel/rcu/srcutiny.c b/kernel/rcu/srcutiny.c index c38e5933a5d6..5afd5cf494db 100644 --- a/kernel/rcu/srcutiny.c +++ b/kernel/rcu/srcutiny.c @@ -96,9 +96,12 @@ EXPORT_SYMBOL_GPL(cleanup_srcu_struct); */ void __srcu_read_unlock(struct srcu_struct *ssp, int idx) { - int newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; + int newval; + preempt_disable(); // Needed for PREEMPT_AUTO + newval = READ_ONCE(ssp->srcu_lock_nesting[idx]) - 1; WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval); + preempt_enable(); if (!newval && READ_ONCE(ssp->srcu_gp_waiting) && in_task()) swake_up_one(&ssp->srcu_wq); } @@ -117,8 +120,11 @@ void srcu_drive_gp(struct work_struct *wp) struct srcu_struct *ssp; ssp = container_of(wp, struct srcu_struct, srcu_work); - if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) + preempt_disable(); // Needed for PREEMPT_AUTO + if (ssp->srcu_gp_running || ULONG_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) { return; /* Already running or nothing to do. */ + preempt_enable(); + } /* Remove recently arrived callbacks and wait for readers. */ WRITE_ONCE(ssp->srcu_gp_running, true); @@ -130,9 +136,12 @@ void srcu_drive_gp(struct work_struct *wp) idx = (ssp->srcu_idx & 0x2) / 2; WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); WRITE_ONCE(ssp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ + preempt_enable(); swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx])); + preempt_disable(); // Needed for PREEMPT_AUTO WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1); + preempt_enable(); /* Invoke the callbacks we removed above. */ while (lh) { @@ -150,8 +159,11 @@ void srcu_drive_gp(struct work_struct *wp) * at interrupt level, but the ->srcu_gp_running checks will * straighten that out. */ + preempt_disable(); // Needed for PREEMPT_AUTO WRITE_ONCE(ssp->srcu_gp_running, false); - if (ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) + idx = ULONG_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max)); + preempt_enable(); + if (idx) schedule_work(&ssp->srcu_work); } EXPORT_SYMBOL_GPL(srcu_drive_gp); @@ -160,9 +172,12 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp) { unsigned long cookie; + preempt_disable(); // Needed for PREEMPT_AUTO cookie = get_state_synchronize_srcu(ssp); - if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) + if (ULONG_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) { + preempt_enable(); return; + } WRITE_ONCE(ssp->srcu_idx_max, cookie); if (!READ_ONCE(ssp->srcu_gp_running)) { if (likely(srcu_init_done)) @@ -170,6 +185,7 @@ static void srcu_gp_start_if_needed(struct srcu_struct *ssp) else if (list_empty(&ssp->srcu_work.entry)) list_add(&ssp->srcu_work.entry, &srcu_boot_list); } + preempt_enable(); } /* @@ -183,11 +199,13 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, rhp->func = func; rhp->next = NULL; + preempt_disable(); // Needed for PREEMPT_AUTO local_irq_save(flags); *ssp->srcu_cb_tail = rhp; ssp->srcu_cb_tail = &rhp->next; local_irq_restore(flags); srcu_gp_start_if_needed(ssp); + preempt_enable(); } EXPORT_SYMBOL_GPL(call_srcu); @@ -241,9 +259,12 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); */ unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) { - unsigned long ret = get_state_synchronize_srcu(ssp); + unsigned long ret; + preempt_disable(); // Needed for PREEMPT_AUTO + ret = get_state_synchronize_srcu(ssp); srcu_gp_start_if_needed(ssp); + preempt_enable(); return ret; } EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); -- cgit From bd3520a93a84cd8c3897283e5891a9106fcf5acc Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Sat, 13 Apr 2024 00:25:21 +0000 Subject: iommu: observability of the IOMMU allocations Add NR_IOMMU_PAGES into node_stat_item that counts number of pages that are allocated by the IOMMU subsystem. The allocations can be view per-node via: /sys/devices/system/node/nodeN/vmstat. For example: $ grep iommu /sys/devices/system/node/node*/vmstat /sys/devices/system/node/node0/vmstat:nr_iommu_pages 106025 /sys/devices/system/node/node1/vmstat:nr_iommu_pages 3464 The value is in page-count, therefore, in the above example the iommu allocations amount to ~428M. Signed-off-by: Pasha Tatashin Acked-by: David Rientjes Tested-by: Bagas Sanjaya Link: https://lore.kernel.org/r/20240413002522.1101315-11-pasha.tatashin@soleen.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu-pages.h | 30 ++++++++++++++++++++++++++++++ include/linux/mmzone.h | 3 +++ mm/vmstat.c | 3 +++ 3 files changed, 36 insertions(+) (limited to 'include/linux') diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h index 5a222d0ad25c..1264b0f6b6c3 100644 --- a/drivers/iommu/iommu-pages.h +++ b/drivers/iommu/iommu-pages.h @@ -20,6 +20,30 @@ * large, i.e. multiple gigabytes in size. */ +/** + * __iommu_alloc_account - account for newly allocated page. + * @page: head struct page of the page. + * @order: order of the page + */ +static inline void __iommu_alloc_account(struct page *page, int order) +{ + const long pgcnt = 1l << order; + + mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt); +} + +/** + * __iommu_free_account - account a page that is about to be freed. + * @page: head struct page of the page. + * @order: order of the page + */ +static inline void __iommu_free_account(struct page *page, int order) +{ + const long pgcnt = 1l << order; + + mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt); +} + /** * __iommu_alloc_pages - allocate a zeroed page of a given order. * @gfp: buddy allocator flags @@ -35,6 +59,8 @@ static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order) if (unlikely(!page)) return NULL; + __iommu_alloc_account(page, order); + return page; } @@ -48,6 +74,7 @@ static inline void __iommu_free_pages(struct page *page, int order) if (!page) return; + __iommu_free_account(page, order); __free_pages(page, order); } @@ -67,6 +94,8 @@ static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order) if (unlikely(!page)) return NULL; + __iommu_alloc_account(page, order); + return page_address(page); } @@ -147,6 +176,7 @@ static inline void iommu_put_pages_list(struct list_head *page) struct page *p = list_entry(page->prev, struct page, lru); list_del(&p->lru); + __iommu_free_account(p, 0); put_page(p); } } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index c11b7cde81ef..be17195f6f86 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -206,6 +206,9 @@ enum node_stat_item { #endif NR_PAGETABLE, /* used for pagetables */ NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */ +#ifdef CONFIG_IOMMU_SUPPORT + NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */ +#endif #ifdef CONFIG_SWAP NR_SWAPCACHE, #endif diff --git a/mm/vmstat.c b/mm/vmstat.c index db79935e4a54..8507c497218b 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1242,6 +1242,9 @@ const char * const vmstat_text[] = { #endif "nr_page_table_pages", "nr_sec_page_table_pages", +#ifdef CONFIG_IOMMU_SUPPORT + "nr_iommu_pages", +#endif #ifdef CONFIG_SWAP "nr_swapcached", #endif -- cgit From 212c5c078d83d780cf2873ca931df135771e8bb7 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Sat, 13 Apr 2024 00:25:22 +0000 Subject: iommu: account IOMMU allocated memory In order to be able to limit the amount of memory that is allocated by IOMMU subsystem, the memory must be accounted. Account IOMMU as part of the secondary pagetables as it was discussed at LPC. The value of SecPageTables now contains mmeory allocation by IOMMU and KVM. There is a difference between GFP_ACCOUNT and what NR_IOMMU_PAGES shows. GFP_ACCOUNT is set only where it makes sense to charge to user processes, i.e. IOMMU Page Tables, but there more IOMMU shared data that should not really be charged to a specific process. Signed-off-by: Pasha Tatashin Acked-by: David Rientjes Tested-by: Bagas Sanjaya Link: https://lore.kernel.org/r/20240413002522.1101315-12-pasha.tatashin@soleen.com Signed-off-by: Joerg Roedel --- Documentation/admin-guide/cgroup-v2.rst | 2 +- Documentation/filesystems/proc.rst | 4 ++-- drivers/iommu/iommu-pages.h | 2 ++ include/linux/mmzone.h | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 17e6e9565156..15f80fea8df7 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1432,7 +1432,7 @@ PAGE_SIZE multiple when read back. sec_pagetables Amount of memory allocated for secondary page tables, this currently includes KVM mmu allocations on x86 - and arm64. + and arm64 and IOMMU page tables. percpu (npn) Amount of memory used for storing per-cpu kernel diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index c6a6b9df2104..707e39280a9a 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -1110,8 +1110,8 @@ KernelStack PageTables Memory consumed by userspace page tables SecPageTables - Memory consumed by secondary page tables, this currently - currently includes KVM mmu allocations on x86 and arm64. + Memory consumed by secondary page tables, this currently includes + KVM mmu and IOMMU allocations on x86 and arm64. NFS_Unstable Always zero. Previous counted pages which had been written to the server, but has not been committed to stable storage. diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h index 1264b0f6b6c3..82ebf0033081 100644 --- a/drivers/iommu/iommu-pages.h +++ b/drivers/iommu/iommu-pages.h @@ -30,6 +30,7 @@ static inline void __iommu_alloc_account(struct page *page, int order) const long pgcnt = 1l << order; mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, pgcnt); + mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, pgcnt); } /** @@ -42,6 +43,7 @@ static inline void __iommu_free_account(struct page *page, int order) const long pgcnt = 1l << order; mod_node_page_state(page_pgdat(page), NR_IOMMU_PAGES, -pgcnt); + mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt); } /** diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index be17195f6f86..8f9c9590a42c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -205,7 +205,7 @@ enum node_stat_item { NR_KERNEL_SCS_KB, /* measured in KiB */ #endif NR_PAGETABLE, /* used for pagetables */ - NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */ + NR_SECONDARY_PAGETABLE, /* secondary pagetables, KVM & IOMMU */ #ifdef CONFIG_IOMMU_SUPPORT NR_IOMMU_PAGES, /* # of pages allocated by IOMMU */ #endif -- cgit From 57b60ec4b30df188ca31bdd95971a6ef5dfc5094 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 11 Apr 2024 20:22:32 +0300 Subject: gpiolib: acpi: Pass con_id instead of property into acpi_dev_gpio_irq_get_by() Pass the con_id instead of property so that callers won't repeat the GPIO suffixes to try. Acked-by: Mika Westerberg Signed-off-by: Andy Shevchenko --- drivers/gpio/gpio-pca953x.c | 2 +- drivers/gpio/gpiolib-acpi.c | 11 +++++------ drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c | 2 +- drivers/pinctrl/pinctrl-cy8c95x0.c | 2 +- include/linux/acpi.h | 8 ++++---- 5 files changed, 12 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 00ffa168e405..77a2812f2974 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -144,7 +144,7 @@ static int pca953x_acpi_get_irq(struct device *dev) if (ret) dev_warn(dev, "can't add GPIO ACPI mapping\n"); - ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0); + ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq", 0); if (ret < 0) return ret; diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 2b3fd43b13fc..909113312a1b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -1013,7 +1013,7 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, /** * acpi_dev_gpio_irq_wake_get_by() - Find GpioInt and translate it to Linux IRQ number * @adev: pointer to a ACPI device to get IRQ from - * @name: optional name of GpioInt resource + * @con_id: optional name of GpioInt resource * @index: index of GpioInt resource (starting from %0) * @wake_capable: Set to true if the IRQ is wake capable * @@ -1024,15 +1024,15 @@ struct gpio_desc *acpi_find_gpio(struct fwnode_handle *fwnode, * The function is idempotent, though each time it runs it will configure GPIO * pin direction according to the flags in GpioInt resource. * - * The function takes optional @name parameter. If the resource has a property - * name, then only those will be taken into account. + * The function takes optional @con_id parameter. If the resource has + * a @con_id in a property, then only those will be taken into account. * * The GPIO is considered wake capable if the GpioInt resource specifies * SharedAndWake or ExclusiveAndWake. * * Return: Linux IRQ number (> %0) on success, negative errno on failure. */ -int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index, +int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) { int idx, i; @@ -1043,9 +1043,8 @@ int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, in struct acpi_gpio_info info; struct gpio_desc *desc; - desc = acpi_get_gpiod_by_index(adev, name, i, &info); - /* Ignore -EPROBE_DEFER, it only matters if idx matches */ + desc = __acpi_find_gpio(acpi_fwnode_handle(adev), con_id, i, true, &info); if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) return PTR_ERR(desc); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index 3d09fa54598f..28a0016f1a55 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -428,7 +428,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev) priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX); priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX); - phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0); + phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0); if (phy_irq < 0) { dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead"); phy_irq = PHY_POLL; diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c index 67b5d160c027..981c569bd671 100644 --- a/drivers/pinctrl/pinctrl-cy8c95x0.c +++ b/drivers/pinctrl/pinctrl-cy8c95x0.c @@ -95,7 +95,7 @@ static int cy8c95x0_acpi_get_irq(struct device *dev) if (ret) dev_warn(dev, "can't add GPIO ACPI mapping\n"); - ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq-gpios", 0); + ret = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(dev), "irq", 0); if (ret < 0) return ret; diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 34829f2c517a..35aff121f418 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1233,7 +1233,7 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); bool acpi_gpio_get_io_resource(struct acpi_resource *ares, struct acpi_resource_gpio **agpio); -int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, int index, +int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable); #else static inline bool acpi_gpio_get_irq_resource(struct acpi_resource *ares, @@ -1246,7 +1246,7 @@ static inline bool acpi_gpio_get_io_resource(struct acpi_resource *ares, { return false; } -static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *name, +static inline int acpi_dev_gpio_irq_wake_get_by(struct acpi_device *adev, const char *con_id, int index, bool *wake_capable) { return -ENXIO; @@ -1259,10 +1259,10 @@ static inline int acpi_dev_gpio_irq_wake_get(struct acpi_device *adev, int index return acpi_dev_gpio_irq_wake_get_by(adev, NULL, index, wake_capable); } -static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *name, +static inline int acpi_dev_gpio_irq_get_by(struct acpi_device *adev, const char *con_id, int index) { - return acpi_dev_gpio_irq_wake_get_by(adev, name, index, NULL); + return acpi_dev_gpio_irq_wake_get_by(adev, con_id, index, NULL); } static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) -- cgit From ba95eb44676d68049490af617049a7bf68946527 Mon Sep 17 00:00:00 2001 From: Gergo Koteles Date: Mon, 8 Apr 2024 19:35:10 +0200 Subject: ACPI: platform-profile: add platform_profile_cycle() Some laptops have a key to switch platform profiles. Add a platform_profile_cycle() function to cycle between the enabled profiles. Signed-off-by: Gergo Koteles Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/5a97deddf72aa5e764d881eb39a7ba35c01a903e.1712597199.git.soyer@irl.hu Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/acpi/platform_profile.c | 39 +++++++++++++++++++++++++++++++++++++++ include/linux/platform_profile.h | 1 + 2 files changed, 40 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index d418462ab791..4a9704730224 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -136,6 +136,45 @@ void platform_profile_notify(void) } EXPORT_SYMBOL_GPL(platform_profile_notify); +int platform_profile_cycle(void) +{ + enum platform_profile_option profile; + enum platform_profile_option next; + int err; + + err = mutex_lock_interruptible(&profile_lock); + if (err) + return err; + + if (!cur_profile) { + mutex_unlock(&profile_lock); + return -ENODEV; + } + + err = cur_profile->profile_get(cur_profile, &profile); + if (err) { + mutex_unlock(&profile_lock); + return err; + } + + next = find_next_bit_wrap(cur_profile->choices, PLATFORM_PROFILE_LAST, + profile + 1); + + if (WARN_ON(next == PLATFORM_PROFILE_LAST)) { + mutex_unlock(&profile_lock); + return -EINVAL; + } + + err = cur_profile->profile_set(cur_profile, next); + mutex_unlock(&profile_lock); + + if (!err) + sysfs_notify(acpi_kobj, NULL, "platform_profile"); + + return err; +} +EXPORT_SYMBOL_GPL(platform_profile_cycle); + int platform_profile_register(struct platform_profile_handler *pprof) { int err; diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index e5cbb6841f3a..f5492ed413f3 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -36,6 +36,7 @@ struct platform_profile_handler { int platform_profile_register(struct platform_profile_handler *pprof); int platform_profile_remove(void); +int platform_profile_cycle(void); void platform_profile_notify(void); #endif /*_PLATFORM_PROFILE_H_*/ -- cgit From 36a005b9c66eade68f4235e612d733510a8d52ff Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Mar 2024 22:00:26 +0000 Subject: io_uring/cmd: document some uring_cmd related helpers Add comments warning users that they're only allowed to pass issue_flags that were given from io_uring. Signed-off-by: Pavel Begunkov Reviewed-by: Ming Lei Tested-by: Ming Lei Link: https://lore.kernel.org/r/82ff8a45f2c3eb5f3a04a33f0692e5e4a1320455.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring/cmd.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index e453a997c060..01b95505538a 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -26,12 +26,25 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) #if defined(CONFIG_IO_URING) int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, struct iov_iter *iter, void *ioucmd); + +/* + * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd + * and the corresponding io_uring request. + * + * Note: the caller should never hard code @issue_flags and is only allowed + * to pass the mask provided by the core io_uring code. + */ void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2, unsigned issue_flags); + void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, void (*task_work_cb)(struct io_uring_cmd *, unsigned), unsigned flags); +/* + * Note: the caller should never hard code @issue_flags and only use the + * mask provided by the core io_uring code. + */ void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, unsigned int issue_flags); -- cgit From 1afdb76038e27a3a4dd4cf522f6457868051db84 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 19 Mar 2024 17:10:50 -0600 Subject: nvme/io_uring: use helper for polled completions NVMe is making up issue_flags, which is a no-no in general, and to make matters worse, they are completely the wrong ones. For a pure polled request, which it does check for, we're already inside the ctx->uring_lock when the completions are run off io_do_iopoll(). Hence the correct flag would be '0' rather than IO_URING_F_UNLOCKED. Reviewed-by: Pavel Begunkov Signed-off-by: Jens Axboe --- drivers/nvme/host/ioctl.c | 15 +++++++++++---- include/linux/io_uring/cmd.h | 11 +++++++++++ 2 files changed, 22 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 3dfd5ae99ae0..499a8bb7cac7 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -423,13 +423,20 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req, pdu->result = le64_to_cpu(nvme_req(req)->result.u64); /* - * For iopoll, complete it directly. + * For iopoll, complete it directly. Note that using the uring_cmd + * helper for this is safe only because we check blk_rq_is_poll(). + * As that returns false if we're NOT on a polled queue, then it's + * safe to use the polled completion helper. + * * Otherwise, move the completion to task work. */ - if (blk_rq_is_poll(req)) - nvme_uring_task_cb(ioucmd, IO_URING_F_UNLOCKED); - else + if (blk_rq_is_poll(req)) { + if (pdu->bio) + blk_rq_unmap_user(pdu->bio); + io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status); + } else { io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb); + } return RQ_END_IO_FREE; } diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 01b95505538a..447fbfd32215 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -69,6 +69,17 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, } #endif +/* + * Polled completions must ensure they are coming from a poll queue, and + * hence are completed inside the usual poll handling loops. + */ +static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd, + ssize_t ret, ssize_t res2) +{ + lockdep_assert(in_task()); + io_uring_cmd_done(ioucmd, ret, res2, 0); +} + /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */ static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd, void (*task_work_cb)(struct io_uring_cmd *, unsigned)) -- cgit From 8e5b3b89ecaf6d9295e561c225b35c574a5e0fe7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Mar 2024 22:00:30 +0000 Subject: io_uring: remove struct io_tw_state::locked ctx is always locked for task_work now, so get rid of struct io_tw_state::locked. Note I'm stopping one step before removing io_tw_state altogether, which is not empty, because it still serves the purpose of indicating which function is a tw callback and forcing users not to invoke them carelessly out of a wrong context. The removal can always be done later. Signed-off-by: Pavel Begunkov Tested-by: Ming Lei Link: https://lore.kernel.org/r/e95e1ea116d0bfa54b656076e6a977bc221392a4.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 2 -- io_uring/io_uring.c | 31 ++++++++----------------------- io_uring/io_uring.h | 5 +---- io_uring/poll.c | 2 +- io_uring/rw.c | 6 ++---- io_uring/timeout.c | 8 ++------ io_uring/uring_cmd.c | 8 ++------ io_uring/waitid.c | 2 +- 8 files changed, 17 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index ac333ea81d31..90e5af401800 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -438,8 +438,6 @@ struct io_ring_ctx { }; struct io_tw_state { - /* ->uring_lock is taken, callbacks can use io_tw_lock to lock it */ - bool locked; }; enum { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 13fb5958454f..ecb59bb2418b 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -247,14 +247,12 @@ static __cold void io_fallback_req_func(struct work_struct *work) fallback_work.work); struct llist_node *node = llist_del_all(&ctx->fallback_llist); struct io_kiocb *req, *tmp; - struct io_tw_state ts = { .locked = true, }; + struct io_tw_state ts = {}; percpu_ref_get(&ctx->refs); mutex_lock(&ctx->uring_lock); llist_for_each_entry_safe(req, tmp, node, io_task_work.node) req->io_task_work.func(req, &ts); - if (WARN_ON_ONCE(!ts.locked)) - return; io_submit_flush_completions(ctx); mutex_unlock(&ctx->uring_lock); percpu_ref_put(&ctx->refs); @@ -1157,11 +1155,9 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) return; if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); - if (ts->locked) { - io_submit_flush_completions(ctx); - mutex_unlock(&ctx->uring_lock); - ts->locked = false; - } + + io_submit_flush_completions(ctx); + mutex_unlock(&ctx->uring_lock); percpu_ref_put(&ctx->refs); } @@ -1185,8 +1181,6 @@ struct llist_node *io_handle_tw_list(struct llist_node *node, if (req->ctx != ctx) { ctx_flush_and_put(ctx, &ts); ctx = req->ctx; - - ts.locked = true; mutex_lock(&ctx->uring_lock); percpu_ref_get(&ctx->refs); } @@ -1459,22 +1453,16 @@ again: static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, int min_events) { - struct io_tw_state ts = { .locked = true, }; - int ret; + struct io_tw_state ts = {}; if (llist_empty(&ctx->work_llist)) return 0; - - ret = __io_run_local_work(ctx, &ts, min_events); - /* shouldn't happen! */ - if (WARN_ON_ONCE(!ts.locked)) - mutex_lock(&ctx->uring_lock); - return ret; + return __io_run_local_work(ctx, &ts, min_events); } static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) { - struct io_tw_state ts = { .locked = true }; + struct io_tw_state ts = {}; int ret; mutex_lock(&ctx->uring_lock); @@ -1702,10 +1690,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, long min) void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts) { - if (ts->locked) - io_req_complete_defer(req); - else - io_req_complete_post(req, IO_URING_F_UNLOCKED); + io_req_complete_defer(req); } /* diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 0861d49e83de..7f921daae9c3 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -351,10 +351,7 @@ static inline bool io_task_work_pending(struct io_ring_ctx *ctx) static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) { - if (!ts->locked) { - mutex_lock(&ctx->uring_lock); - ts->locked = true; - } + lockdep_assert_held(&ctx->uring_lock); } /* diff --git a/io_uring/poll.c b/io_uring/poll.c index 6db1dcb2c797..8901dd118e50 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -322,7 +322,7 @@ static int io_poll_check_events(struct io_kiocb *req, struct io_tw_state *ts) __poll_t mask = mangle_poll(req->cqe.res & req->apoll_events); - if (!io_fill_cqe_req_aux(req, ts->locked, mask, + if (!io_fill_cqe_req_aux(req, true, mask, IORING_CQE_F_MORE)) { io_req_set_res(req, mask, 0); return IOU_POLL_REMOVE_POLL_USE_RES; diff --git a/io_uring/rw.c b/io_uring/rw.c index 0afe4f9e0e3f..674581bda8d7 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -305,11 +305,9 @@ void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) io_req_io_end(req); - if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { - unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; + if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) + req->cqe.flags |= io_put_kbuf(req, 0); - req->cqe.flags |= io_put_kbuf(req, issue_flags); - } io_req_task_complete(req, ts); } diff --git a/io_uring/timeout.c b/io_uring/timeout.c index 7fd7dbb211d6..0a48e6acd0b2 100644 --- a/io_uring/timeout.c +++ b/io_uring/timeout.c @@ -72,10 +72,7 @@ static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) struct io_ring_ctx *ctx = req->ctx; if (!io_timeout_finish(timeout, data)) { - bool filled; - filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME, - IORING_CQE_F_MORE); - if (filled) { + if (io_fill_cqe_req_aux(req, true, -ETIME, IORING_CQE_F_MORE)) { /* re-arm timer */ spin_lock_irq(&ctx->timeout_lock); list_add(&timeout->list, ctx->timeout_list.prev); @@ -301,7 +298,6 @@ int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts) { - unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); struct io_kiocb *prev = timeout->prev; int ret = -ENOENT; @@ -313,7 +309,7 @@ static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *t .data = prev->cqe.user_data, }; - ret = io_try_cancel(req->task->io_uring, &cd, issue_flags); + ret = io_try_cancel(req->task->io_uring, &cd, 0); } io_req_set_res(req, ret ?: -ETIME, 0); io_req_task_complete(req, ts); diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 759f919b14a9..4614ce734fee 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -87,13 +87,9 @@ EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable); static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts) { struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); - unsigned issue_flags = IO_URING_F_UNLOCKED; - /* locked task_work executor checks the deffered list completion */ - if (ts->locked) - issue_flags = IO_URING_F_COMPLETE_DEFER; - - ioucmd->task_work_cb(ioucmd, issue_flags); + /* task_work executor checks the deffered list completion */ + ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER); } void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, diff --git a/io_uring/waitid.c b/io_uring/waitid.c index 77d340666cb9..6362ec20abc0 100644 --- a/io_uring/waitid.c +++ b/io_uring/waitid.c @@ -118,7 +118,7 @@ static int io_waitid_finish(struct io_kiocb *req, int ret) static void io_waitid_complete(struct io_kiocb *req, int ret) { struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); - struct io_tw_state ts = { .locked = true }; + struct io_tw_state ts = {}; /* anyone completing better be holding a reference */ WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); -- cgit From 902ce82c2aa130bea5e3feca2d4ae62781865da7 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Mon, 18 Mar 2024 22:00:32 +0000 Subject: io_uring: get rid of intermediate aux cqe caches io_post_aux_cqe(), which is used for multishot requests, delays completions by putting CQEs into a temporary array for the purpose completion lock/flush batching. DEFER_TASKRUN doesn't need any locking, so for it we can put completions directly into the CQ and defer post completion handling with a flag. That leaves !DEFER_TASKRUN, which is not that interesting / hot for multishot requests, so have conditional locking with deferred flush for them. Signed-off-by: Pavel Begunkov Tested-by: Ming Lei Link: https://lore.kernel.org/r/b1d05a81fd27aaa2a07f9860af13059e7ad7a890.1710799188.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 3 +- io_uring/io_uring.c | 62 +++++++++--------------------------------- io_uring/io_uring.h | 2 +- 3 files changed, 15 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 90e5af401800..75b46119d4c8 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -205,6 +205,7 @@ struct io_submit_state { bool plug_started; bool need_plug; + bool cq_flush; unsigned short submit_nr; unsigned int cqes_count; struct blk_plug plug; @@ -341,8 +342,6 @@ struct io_ring_ctx { unsigned cq_last_tm_flush; } ____cacheline_aligned_in_smp; - struct io_uring_cqe completion_cqes[16]; - spinlock_t completion_lock; /* IRQ completion list, under ->completion_lock */ diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 9ae5e7db7c6a..77e26b037fdd 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -630,6 +630,12 @@ static inline void __io_cq_lock(struct io_ring_ctx *ctx) spin_lock(&ctx->completion_lock); } +static inline void __io_cq_unlock(struct io_ring_ctx *ctx) +{ + if (!ctx->lockless_cq) + spin_unlock(&ctx->completion_lock); +} + static inline void io_cq_lock(struct io_ring_ctx *ctx) __acquires(ctx->completion_lock) { @@ -882,31 +888,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, return false; } -static void __io_flush_post_cqes(struct io_ring_ctx *ctx) - __must_hold(&ctx->uring_lock) -{ - struct io_submit_state *state = &ctx->submit_state; - unsigned int i; - - lockdep_assert_held(&ctx->uring_lock); - for (i = 0; i < state->cqes_count; i++) { - struct io_uring_cqe *cqe = &ctx->completion_cqes[i]; - - if (!io_fill_cqe_aux(ctx, cqe->user_data, cqe->res, cqe->flags)) { - if (ctx->lockless_cq) { - spin_lock(&ctx->completion_lock); - io_cqring_event_overflow(ctx, cqe->user_data, - cqe->res, cqe->flags, 0, 0); - spin_unlock(&ctx->completion_lock); - } else { - io_cqring_event_overflow(ctx, cqe->user_data, - cqe->res, cqe->flags, 0, 0); - } - } - } - state->cqes_count = 0; -} - bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) { bool filled; @@ -927,31 +908,16 @@ bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags) { struct io_ring_ctx *ctx = req->ctx; - u64 user_data = req->cqe.user_data; - struct io_uring_cqe *cqe; + bool posted; lockdep_assert(!io_wq_current_is_worker()); lockdep_assert_held(&ctx->uring_lock); - if (ctx->submit_state.cqes_count == ARRAY_SIZE(ctx->completion_cqes)) { - __io_cq_lock(ctx); - __io_flush_post_cqes(ctx); - /* no need to flush - flush is deferred */ - __io_cq_unlock_post(ctx); - } - - /* For defered completions this is not as strict as it is otherwise, - * however it's main job is to prevent unbounded posted completions, - * and in that it works just as well. - */ - if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) - return false; - - cqe = &ctx->completion_cqes[ctx->submit_state.cqes_count++]; - cqe->user_data = user_data; - cqe->res = res; - cqe->flags = cflags; - return true; + __io_cq_lock(ctx); + posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags); + ctx->submit_state.cq_flush = true; + __io_cq_unlock_post(ctx); + return posted; } static void __io_req_complete_post(struct io_kiocb *req, unsigned issue_flags) @@ -1545,9 +1511,6 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx) struct io_wq_work_node *node; __io_cq_lock(ctx); - /* must come first to preserve CQE ordering in failure cases */ - if (state->cqes_count) - __io_flush_post_cqes(ctx); __wq_list_for_each(node, &state->compl_reqs) { struct io_kiocb *req = container_of(node, struct io_kiocb, comp_list); @@ -1569,6 +1532,7 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx) io_free_batch_list(ctx, state->compl_reqs.first); INIT_WQ_LIST(&state->compl_reqs); } + ctx->submit_state.cq_flush = false; } static unsigned io_cqring_events(struct io_ring_ctx *ctx) diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 460290e1bdec..5119265a11c2 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -156,7 +156,7 @@ static inline void io_req_task_work_add(struct io_kiocb *req) static inline void io_submit_flush_completions(struct io_ring_ctx *ctx) { if (!wq_list_empty(&ctx->submit_state.compl_reqs) || - ctx->submit_state.cqes_count) + ctx->submit_state.cq_flush) __io_submit_flush_completions(ctx); } -- cgit From a9165b83c1937eeed1f0c731468216d6371d647f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Mar 2024 16:13:01 -0600 Subject: io_uring/rw: always setup io_async_rw for read/write requests read/write requests try to put everything on the stack, and then alloc and copy if a retry is needed. This necessitates a bunch of nasty code that deals with intermediate state. Get rid of this, and have the prep side setup everything that is needed upfront, which greatly simplifies the opcode handlers. This includes adding an alloc cache for io_async_rw, to make it cheap to handle. In terms of cost, this should be basically free and transparent. For the worst case of {READ,WRITE}_FIXED which didn't need it before, performance is unaffected in the normal peak workload that is being used to test that. Still runs at 122M IOPS. Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 1 + io_uring/io_uring.c | 3 + io_uring/opdef.c | 15 +- io_uring/rw.c | 538 +++++++++++++++++++---------------------- io_uring/rw.h | 19 +- 5 files changed, 278 insertions(+), 298 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 75b46119d4c8..60d7e35fc303 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -300,6 +300,7 @@ struct io_ring_ctx { struct io_hash_table cancel_table_locked; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; + struct io_alloc_cache rw_cache; /* * Any cancelable uring_cmd is added to this list in diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index e6e7794d497f..b0554d122931 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -311,6 +311,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) sizeof(struct async_poll)); io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_msghdr)); + io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, + sizeof(struct io_async_rw)); io_futex_cache_init(ctx); init_completion(&ctx->ref_comp); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); @@ -2823,6 +2825,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) io_eventfd_unregister(ctx); io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); + io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); io_futex_cache_free(ctx); io_destroy_buffers(ctx); mutex_unlock(&ctx->uring_lock); diff --git a/io_uring/opdef.c b/io_uring/opdef.c index dd4a1e1425e1..fcae75a08f2c 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -67,7 +67,7 @@ const struct io_issue_def io_issue_defs[] = { .iopoll = 1, .iopoll_queue = 1, .vectored = 1, - .prep = io_prep_rwv, + .prep = io_prep_readv, .issue = io_read, }, [IORING_OP_WRITEV] = { @@ -81,7 +81,7 @@ const struct io_issue_def io_issue_defs[] = { .iopoll = 1, .iopoll_queue = 1, .vectored = 1, - .prep = io_prep_rwv, + .prep = io_prep_writev, .issue = io_write, }, [IORING_OP_FSYNC] = { @@ -99,7 +99,7 @@ const struct io_issue_def io_issue_defs[] = { .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, - .prep = io_prep_rw_fixed, + .prep = io_prep_read_fixed, .issue = io_read, }, [IORING_OP_WRITE_FIXED] = { @@ -112,7 +112,7 @@ const struct io_issue_def io_issue_defs[] = { .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, - .prep = io_prep_rw_fixed, + .prep = io_prep_write_fixed, .issue = io_write, }, [IORING_OP_POLL_ADD] = { @@ -239,7 +239,7 @@ const struct io_issue_def io_issue_defs[] = { .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, - .prep = io_prep_rw, + .prep = io_prep_read, .issue = io_read, }, [IORING_OP_WRITE] = { @@ -252,7 +252,7 @@ const struct io_issue_def io_issue_defs[] = { .ioprio = 1, .iopoll = 1, .iopoll_queue = 1, - .prep = io_prep_rw, + .prep = io_prep_write, .issue = io_write, }, [IORING_OP_FADVISE] = { @@ -490,14 +490,12 @@ const struct io_cold_def io_cold_defs[] = { [IORING_OP_READV] = { .async_size = sizeof(struct io_async_rw), .name = "READV", - .prep_async = io_readv_prep_async, .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, [IORING_OP_WRITEV] = { .async_size = sizeof(struct io_async_rw), .name = "WRITEV", - .prep_async = io_writev_prep_async, .cleanup = io_readv_writev_cleanup, .fail = io_rw_fail, }, @@ -699,6 +697,7 @@ const struct io_cold_def io_cold_defs[] = { #endif }, [IORING_OP_READ_MULTISHOT] = { + .async_size = sizeof(struct io_async_rw), .name = "READ_MULTISHOT", }, [IORING_OP_WAITID] = { diff --git a/io_uring/rw.c b/io_uring/rw.c index b5165d1c9686..2625e177c9bc 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -75,7 +75,153 @@ static int io_iov_buffer_select_prep(struct io_kiocb *req) return 0; } -int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int __io_import_iovec(int ddir, struct io_kiocb *req, + struct io_async_rw *io, + unsigned int issue_flags) +{ + const struct io_issue_def *def = &io_issue_defs[req->opcode]; + struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); + void __user *buf; + size_t sqe_len; + + buf = u64_to_user_ptr(rw->addr); + sqe_len = rw->len; + + if (!def->vectored || req->flags & REQ_F_BUFFER_SELECT) { + if (io_do_buffer_select(req)) { + buf = io_buffer_select(req, &sqe_len, issue_flags); + if (!buf) + return -ENOBUFS; + rw->addr = (unsigned long) buf; + rw->len = sqe_len; + } + + return import_ubuf(ddir, buf, sqe_len, &io->s.iter); + } + + io->free_iovec = io->s.fast_iov; + return __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &io->free_iovec, + &io->s.iter, req->ctx->compat); +} + +static inline int io_import_iovec(int rw, struct io_kiocb *req, + struct io_async_rw *io, + unsigned int issue_flags) +{ + int ret; + + ret = __io_import_iovec(rw, req, io, issue_flags); + if (unlikely(ret < 0)) + return ret; + + iov_iter_save_state(&io->s.iter, &io->s.iter_state); + return 0; +} + +static void io_rw_iovec_free(struct io_async_rw *rw) +{ + if (rw->free_iovec) { + kfree(rw->free_iovec); + rw->free_iovec = NULL; + } +} + +static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_async_rw *rw = req->async_data; + + if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { + io_rw_iovec_free(rw); + return; + } + if (io_alloc_cache_put(&req->ctx->rw_cache, &rw->cache)) { + req->async_data = NULL; + req->flags &= ~REQ_F_ASYNC_DATA; + } +} + +static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) +{ + /* + * Disable quick recycling for anything that's gone through io-wq. + * In theory, this should be fine to cleanup. However, some read or + * write iter handling touches the iovec AFTER having called into the + * handler, eg to reexpand or revert. This means we can have: + * + * task io-wq + * issue + * punt to io-wq + * issue + * blkdev_write_iter() + * ->ki_complete() + * io_complete_rw() + * queue tw complete + * run tw + * req_rw_cleanup + * iov_iter_count() <- look at iov_iter again + * + * which can lead to a UAF. This is only possible for io-wq offload + * as the cleanup can run in parallel. As io-wq is not the fast path, + * just leave cleanup to the end. + * + * This is really a bug in the core code that does this, any issue + * path should assume that a successful (or -EIOCBQUEUED) return can + * mean that the underlying data can be gone at any time. But that + * should be fixed seperately, and then this check could be killed. + */ + if (!(req->flags & REQ_F_REFCOUNT)) { + req->flags &= ~REQ_F_NEED_CLEANUP; + io_rw_recycle(req, issue_flags); + } +} + +static int io_rw_alloc_async(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_cache_entry *entry; + struct io_async_rw *rw; + + entry = io_alloc_cache_get(&ctx->rw_cache); + if (entry) { + rw = container_of(entry, struct io_async_rw, cache); + req->flags |= REQ_F_ASYNC_DATA; + req->async_data = rw; + goto done; + } + + if (!io_alloc_async_data(req)) { + rw = req->async_data; +done: + rw->free_iovec = NULL; + rw->bytes_done = 0; + return 0; + } + + return -ENOMEM; +} + +static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import) +{ + struct io_async_rw *rw; + int ret; + + if (io_rw_alloc_async(req)) + return -ENOMEM; + + if (!do_import || io_do_buffer_select(req)) + return 0; + + rw = req->async_data; + ret = io_import_iovec(ddir, req, rw, 0); + if (unlikely(ret < 0)) + return ret; + + iov_iter_save_state(&rw->s.iter, &rw->s.iter_state); + return 0; +} + +static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int ddir, bool do_import) { struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); unsigned ioprio; @@ -100,34 +246,58 @@ int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) rw->addr = READ_ONCE(sqe->addr); rw->len = READ_ONCE(sqe->len); rw->flags = READ_ONCE(sqe->rw_flags); - return 0; + return io_prep_rw_setup(req, ddir, do_import); } -int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe) +int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe) { + return io_prep_rw(req, sqe, ITER_DEST, true); +} + +int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + return io_prep_rw(req, sqe, ITER_SOURCE, true); +} + +static int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int ddir) +{ + const bool do_import = !(req->flags & REQ_F_BUFFER_SELECT); int ret; - ret = io_prep_rw(req, sqe); + ret = io_prep_rw(req, sqe, ddir, do_import); if (unlikely(ret)) return ret; + if (do_import) + return 0; /* * Have to do this validation here, as this is in io_read() rw->len * might have chanaged due to buffer selection */ - if (req->flags & REQ_F_BUFFER_SELECT) - return io_iov_buffer_select_prep(req); + return io_iov_buffer_select_prep(req); +} - return 0; +int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + return io_prep_rwv(req, sqe, ITER_DEST); +} + +int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + return io_prep_rwv(req, sqe, ITER_SOURCE); } -int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe, + int ddir) { + struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); struct io_ring_ctx *ctx = req->ctx; + struct io_async_rw *io; u16 index; int ret; - ret = io_prep_rw(req, sqe); + ret = io_prep_rw(req, sqe, ddir, false); if (unlikely(ret)) return ret; @@ -136,7 +306,21 @@ int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); req->imu = ctx->user_bufs[index]; io_req_set_rsrc_node(req, ctx, 0); - return 0; + + io = req->async_data; + ret = io_import_fixed(ddir, &io->s.iter, req->imu, rw->addr, rw->len); + iov_iter_save_state(&io->s.iter, &io->s.iter_state); + return ret; +} + +int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + return io_prep_rw_fixed(req, sqe, ITER_DEST); +} + +int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe) +{ + return io_prep_rw_fixed(req, sqe, ITER_SOURCE); } /* @@ -152,7 +336,7 @@ int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (!(req->flags & REQ_F_BUFFER_SELECT)) return -EINVAL; - ret = io_prep_rw(req, sqe); + ret = io_prep_rw(req, sqe, ITER_DEST, false); if (unlikely(ret)) return ret; @@ -165,9 +349,7 @@ int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) void io_readv_writev_cleanup(struct io_kiocb *req) { - struct io_async_rw *io = req->async_data; - - kfree(io->free_iovec); + io_rw_iovec_free(req->async_data); } static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) @@ -188,14 +370,11 @@ static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) } #ifdef CONFIG_BLOCK -static bool io_resubmit_prep(struct io_kiocb *req) +static void io_resubmit_prep(struct io_kiocb *req) { struct io_async_rw *io = req->async_data; - if (!req_has_async_data(req)) - return !io_req_prep_async(req); iov_iter_restore(&io->s.iter, &io->s.iter_state); - return true; } static bool io_rw_should_reissue(struct io_kiocb *req) @@ -224,9 +403,8 @@ static bool io_rw_should_reissue(struct io_kiocb *req) return true; } #else -static bool io_resubmit_prep(struct io_kiocb *req) +static void io_resubmit_prep(struct io_kiocb *req) { - return false; } static bool io_rw_should_reissue(struct io_kiocb *req) { @@ -308,6 +486,7 @@ void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) req->cqe.flags |= io_put_kbuf(req, 0); + io_req_rw_cleanup(req, 0); io_req_task_complete(req, ts); } @@ -388,6 +567,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, io_req_io_end(req); io_req_set_res(req, final_ret, io_put_kbuf(req, issue_flags)); + io_req_rw_cleanup(req, issue_flags); return IOU_OK; } } else { @@ -396,71 +576,12 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret, if (req->flags & REQ_F_REISSUE) { req->flags &= ~REQ_F_REISSUE; - if (io_resubmit_prep(req)) - return -EAGAIN; - else - io_req_task_queue_fail(req, final_ret); + io_resubmit_prep(req); + return -EAGAIN; } return IOU_ISSUE_SKIP_COMPLETE; } -static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, - struct io_rw_state *s, - unsigned int issue_flags) -{ - struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); - struct iov_iter *iter = &s->iter; - u8 opcode = req->opcode; - struct iovec *iovec; - void __user *buf; - size_t sqe_len; - ssize_t ret; - - if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) { - ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); - if (ret) - return ERR_PTR(ret); - return NULL; - } - - buf = u64_to_user_ptr(rw->addr); - sqe_len = rw->len; - - if (!io_issue_defs[opcode].vectored || req->flags & REQ_F_BUFFER_SELECT) { - if (io_do_buffer_select(req)) { - buf = io_buffer_select(req, &sqe_len, issue_flags); - if (!buf) - return ERR_PTR(-ENOBUFS); - rw->addr = (unsigned long) buf; - rw->len = sqe_len; - } - - ret = import_ubuf(ddir, buf, sqe_len, iter); - if (ret) - return ERR_PTR(ret); - return NULL; - } - - iovec = s->fast_iov; - ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter, - req->ctx->compat); - if (unlikely(ret < 0)) - return ERR_PTR(ret); - return iovec; -} - -static inline int io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct io_rw_state *s, - unsigned int issue_flags) -{ - *iovec = __io_import_iovec(rw, req, s, issue_flags); - if (IS_ERR(*iovec)) - return PTR_ERR(*iovec); - - iov_iter_save_state(&s->iter, &s->iter_state); - return 0; -} - static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) { return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos; @@ -532,89 +653,6 @@ static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter) return ret; } -static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, - const struct iovec *fast_iov, struct iov_iter *iter) -{ - struct io_async_rw *io = req->async_data; - - memcpy(&io->s.iter, iter, sizeof(*iter)); - io->free_iovec = iovec; - io->bytes_done = 0; - /* can only be fixed buffers, no need to do anything */ - if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter)) - return; - if (!iovec) { - unsigned iov_off = 0; - - io->s.iter.__iov = io->s.fast_iov; - if (iter->__iov != fast_iov) { - iov_off = iter_iov(iter) - fast_iov; - io->s.iter.__iov += iov_off; - } - if (io->s.fast_iov != fast_iov) - memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off, - sizeof(struct iovec) * iter->nr_segs); - } else { - req->flags |= REQ_F_NEED_CLEANUP; - } -} - -static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, - struct io_rw_state *s, bool force) -{ - if (!force && !io_cold_defs[req->opcode].prep_async) - return 0; - /* opcode type doesn't need async data */ - if (!io_cold_defs[req->opcode].async_size) - return 0; - if (!req_has_async_data(req)) { - struct io_async_rw *iorw; - - if (io_alloc_async_data(req)) { - kfree(iovec); - return -ENOMEM; - } - - io_req_map_rw(req, iovec, s->fast_iov, &s->iter); - iorw = req->async_data; - /* we've copied and mapped the iter, ensure state is saved */ - iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state); - } - return 0; -} - -static inline int io_rw_prep_async(struct io_kiocb *req, int rw) -{ - struct io_async_rw *iorw = req->async_data; - struct iovec *iov; - int ret; - - iorw->bytes_done = 0; - iorw->free_iovec = NULL; - - /* submission path, ->uring_lock should already be taken */ - ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); - if (unlikely(ret < 0)) - return ret; - - if (iov) { - iorw->free_iovec = iov; - req->flags |= REQ_F_NEED_CLEANUP; - } - - return 0; -} - -int io_readv_prep_async(struct io_kiocb *req) -{ - return io_rw_prep_async(req, ITER_DEST); -} - -int io_writev_prep_async(struct io_kiocb *req) -{ - return io_rw_prep_async(req, ITER_SOURCE); -} - /* * This is our waitqueue callback handler, registered through __folio_lock_async() * when we initially tried to do the IO with the iocb armed our waitqueue. @@ -754,54 +792,28 @@ static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) static int __io_read(struct io_kiocb *req, unsigned int issue_flags) { + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); - struct io_rw_state __s, *s = &__s; - struct iovec *iovec; + struct io_async_rw *io = req->async_data; struct kiocb *kiocb = &rw->kiocb; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; - struct io_async_rw *io; - ssize_t ret, ret2; + ssize_t ret; loff_t *ppos; - if (!req_has_async_data(req)) { - ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); + if (io_do_buffer_select(req)) { + ret = io_import_iovec(ITER_DEST, req, io, issue_flags); if (unlikely(ret < 0)) return ret; - } else { - io = req->async_data; - s = &io->s; - - /* - * Safe and required to re-import if we're using provided - * buffers, as we dropped the selected one before retry. - */ - if (io_do_buffer_select(req)) { - ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); - if (unlikely(ret < 0)) - return ret; - } - - /* - * We come here from an earlier attempt, restore our state to - * match in case it doesn't. It's cheap enough that we don't - * need to make this conditional. - */ - iov_iter_restore(&s->iter, &s->iter_state); - iovec = NULL; } + ret = io_rw_init_file(req, FMODE_READ); - if (unlikely(ret)) { - kfree(iovec); + if (unlikely(ret)) return ret; - } - req->cqe.res = iov_iter_count(&s->iter); + req->cqe.res = iov_iter_count(&io->s.iter); if (force_nonblock) { /* If the file doesn't support async, just async punt */ - if (unlikely(!io_file_supports_nowait(req))) { - ret = io_setup_async_rw(req, iovec, s, true); - return ret ?: -EAGAIN; - } + if (unlikely(!io_file_supports_nowait(req))) + return -EAGAIN; kiocb->ki_flags |= IOCB_NOWAIT; } else { /* Ensure we clear previously set non-block flag */ @@ -811,20 +823,15 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) ppos = io_kiocb_update_pos(req); ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); - if (unlikely(ret)) { - kfree(iovec); + if (unlikely(ret)) return ret; - } - ret = io_iter_do_read(rw, &s->iter); + ret = io_iter_do_read(rw, &io->s.iter); if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { req->flags &= ~REQ_F_REISSUE; - /* - * If we can poll, just do that. For a vectored read, we'll - * need to copy state first. - */ - if (io_file_can_poll(req) && !io_issue_defs[req->opcode].vectored) + /* If we can poll, just do that. */ + if (io_file_can_poll(req)) return -EAGAIN; /* IOPOLL retry should happen for io-wq threads */ if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) @@ -834,8 +841,6 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) goto done; ret = 0; } else if (ret == -EIOCBQUEUED) { - if (iovec) - kfree(iovec); return IOU_ISSUE_SKIP_COMPLETE; } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { @@ -848,21 +853,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) * untouched in case of error. Restore it and we'll advance it * manually if we need to. */ - iov_iter_restore(&s->iter, &s->iter_state); - - ret2 = io_setup_async_rw(req, iovec, s, true); - iovec = NULL; - if (ret2) { - ret = ret > 0 ? ret : ret2; - goto done; - } - - io = req->async_data; - s = &io->s; - /* - * Now use our persistent iterator and state, if we aren't already. - * We've restored and mapped the iter to match. - */ + iov_iter_restore(&io->s.iter, &io->s.iter_state); do { /* @@ -870,11 +861,11 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) * above or inside this loop. Advance the iter by the bytes * that were consumed. */ - iov_iter_advance(&s->iter, ret); - if (!iov_iter_count(&s->iter)) + iov_iter_advance(&io->s.iter, ret); + if (!iov_iter_count(&io->s.iter)) break; io->bytes_done += ret; - iov_iter_save_state(&s->iter, &s->iter_state); + iov_iter_save_state(&io->s.iter, &io->s.iter_state); /* if we can retry, do so with the callbacks armed */ if (!io_rw_should_retry(req)) { @@ -882,24 +873,22 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags) return -EAGAIN; } - req->cqe.res = iov_iter_count(&s->iter); + req->cqe.res = iov_iter_count(&io->s.iter); /* * Now retry read with the IOCB_WAITQ parts set in the iocb. If * we get -EIOCBQUEUED, then we'll get a notification when the * desired page gets unlocked. We can also get a partial read * here, and if we do, then just retry at the new offset. */ - ret = io_iter_do_read(rw, &s->iter); + ret = io_iter_do_read(rw, &io->s.iter); if (ret == -EIOCBQUEUED) return IOU_ISSUE_SKIP_COMPLETE; /* we got some bytes, but not all. retry. */ kiocb->ki_flags &= ~IOCB_WAITQ; - iov_iter_restore(&s->iter, &s->iter_state); + iov_iter_restore(&io->s.iter, &io->s.iter_state); } while (ret > 0); done: /* it's faster to check here then delegate to kfree */ - if (iovec) - kfree(iovec); return ret; } @@ -908,8 +897,9 @@ int io_read(struct io_kiocb *req, unsigned int issue_flags) int ret; ret = __io_read(req, issue_flags); - if (ret >= 0) - return kiocb_done(req, ret, issue_flags); + if (ret >= 0) { + ret = kiocb_done(req, ret, issue_flags); + } return ret; } @@ -981,6 +971,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) * multishot request, hitting overflow will terminate it. */ io_req_set_res(req, ret, cflags); + io_req_rw_cleanup(req, issue_flags); if (issue_flags & IO_URING_F_MULTISHOT) return IOU_STOP_MULTISHOT; return IOU_OK; @@ -988,42 +979,28 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags) int io_write(struct io_kiocb *req, unsigned int issue_flags) { + bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); - struct io_rw_state __s, *s = &__s; - struct iovec *iovec; + struct io_async_rw *io = req->async_data; struct kiocb *kiocb = &rw->kiocb; - bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; ssize_t ret, ret2; loff_t *ppos; - if (!req_has_async_data(req)) { - ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags); - if (unlikely(ret < 0)) - return ret; - } else { - struct io_async_rw *io = req->async_data; - - s = &io->s; - iov_iter_restore(&s->iter, &s->iter_state); - iovec = NULL; - } ret = io_rw_init_file(req, FMODE_WRITE); - if (unlikely(ret)) { - kfree(iovec); + if (unlikely(ret)) return ret; - } - req->cqe.res = iov_iter_count(&s->iter); + req->cqe.res = iov_iter_count(&io->s.iter); if (force_nonblock) { /* If the file doesn't support async, just async punt */ if (unlikely(!io_file_supports_nowait(req))) - goto copy_iov; + goto ret_eagain; /* File path supports NOWAIT for non-direct_IO only for block devices. */ if (!(kiocb->ki_flags & IOCB_DIRECT) && !(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) && (req->flags & REQ_F_ISREG)) - goto copy_iov; + goto ret_eagain; kiocb->ki_flags |= IOCB_NOWAIT; } else { @@ -1034,19 +1011,17 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) ppos = io_kiocb_update_pos(req); ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); - if (unlikely(ret)) { - kfree(iovec); + if (unlikely(ret)) return ret; - } if (req->flags & REQ_F_ISREG) kiocb_start_write(kiocb); kiocb->ki_flags |= IOCB_WRITE; if (likely(req->file->f_op->write_iter)) - ret2 = call_write_iter(req->file, kiocb, &s->iter); + ret2 = call_write_iter(req->file, kiocb, &io->s.iter); else if (req->file->f_op->write) - ret2 = loop_rw_iter(WRITE, rw, &s->iter); + ret2 = loop_rw_iter(WRITE, rw, &io->s.iter); else ret2 = -EINVAL; @@ -1067,11 +1042,9 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) if (!force_nonblock || ret2 != -EAGAIN) { /* IOPOLL retry should happen for io-wq threads */ if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) - goto copy_iov; + goto ret_eagain; if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { - struct io_async_rw *io; - trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, req->cqe.res, ret2); @@ -1080,33 +1053,22 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) * in the worker. Also update bytes_done to account for * the bytes already written. */ - iov_iter_save_state(&s->iter, &s->iter_state); - ret = io_setup_async_rw(req, iovec, s, true); - - io = req->async_data; - if (io) - io->bytes_done += ret2; + iov_iter_save_state(&io->s.iter, &io->s.iter_state); + io->bytes_done += ret2; if (kiocb->ki_flags & IOCB_WRITE) io_req_end_write(req); - return ret ? ret : -EAGAIN; + return -EAGAIN; } done: ret = kiocb_done(req, ret2, issue_flags); } else { -copy_iov: - iov_iter_restore(&s->iter, &s->iter_state); - ret = io_setup_async_rw(req, iovec, s, false); - if (!ret) { - if (kiocb->ki_flags & IOCB_WRITE) - io_req_end_write(req); - return -EAGAIN; - } - return ret; +ret_eagain: + iov_iter_restore(&io->s.iter, &io->s.iter_state); + if (kiocb->ki_flags & IOCB_WRITE) + io_req_end_write(req); + return -EAGAIN; } - /* it's reportedly faster than delegating the null check to kfree() */ - if (iovec) - kfree(iovec); return ret; } @@ -1181,6 +1143,8 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) break; nr_events++; req->cqe.flags = io_put_kbuf(req, 0); + if (req->opcode != IORING_OP_URING_CMD) + io_req_rw_cleanup(req, 0); } if (unlikely(!nr_events)) return 0; @@ -1194,3 +1158,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) __io_submit_flush_completions(ctx); return nr_events; } + +void io_rw_cache_free(struct io_cache_entry *entry) +{ + struct io_async_rw *rw; + + rw = container_of(entry, struct io_async_rw, cache); + kfree(rw); +} diff --git a/io_uring/rw.h b/io_uring/rw.h index f9e89b4fe4da..f7905070d10b 100644 --- a/io_uring/rw.h +++ b/io_uring/rw.h @@ -9,21 +9,26 @@ struct io_rw_state { }; struct io_async_rw { + union { + size_t bytes_done; + struct io_cache_entry cache; + }; struct io_rw_state s; - const struct iovec *free_iovec; - size_t bytes_done; + struct iovec *free_iovec; struct wait_page_queue wpq; }; -int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe); -int io_prep_rwv(struct io_kiocb *req, const struct io_uring_sqe *sqe); -int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_prep_read_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_prep_readv(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_prep_writev(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_prep_read(struct io_kiocb *req, const struct io_uring_sqe *sqe); +int io_prep_write(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_read(struct io_kiocb *req, unsigned int issue_flags); -int io_readv_prep_async(struct io_kiocb *req); int io_write(struct io_kiocb *req, unsigned int issue_flags); -int io_writev_prep_async(struct io_kiocb *req); void io_readv_writev_cleanup(struct io_kiocb *req); void io_rw_fail(struct io_kiocb *req); void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts); int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags); +void io_rw_cache_free(struct io_cache_entry *entry); -- cgit From d10f19dff56eac5ae44dc270336b18071a8bd51c Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Mon, 18 Mar 2024 20:41:58 -0600 Subject: io_uring/uring_cmd: switch to always allocating async data Basic conversion ensuring async_data is allocated off the prep path. Adds a basic alloc cache as well, as passthrough IO can be quite high in rate. Tested-by: Anuj Gupta Reviewed-by: Anuj Gupta Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 1 + io_uring/io_uring.c | 3 ++ io_uring/opdef.c | 1 - io_uring/uring_cmd.c | 77 ++++++++++++++++++++++++++++++------------ io_uring/uring_cmd.h | 10 +++++- 5 files changed, 69 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 60d7e35fc303..0f24fdad19c2 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -301,6 +301,7 @@ struct io_ring_ctx { struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; struct io_alloc_cache rw_cache; + struct io_alloc_cache uring_cache; /* * Any cancelable uring_cmd is added to this list in diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index b0554d122931..a868c0a253a6 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -313,6 +313,8 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) sizeof(struct io_async_msghdr)); io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_rw)); + io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX, + sizeof(struct uring_cache)); io_futex_cache_init(ctx); init_completion(&ctx->ref_comp); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); @@ -2826,6 +2828,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); + io_alloc_cache_free(&ctx->uring_cache, io_uring_cache_free); io_futex_cache_free(ctx); io_destroy_buffers(ctx); mutex_unlock(&ctx->uring_lock); diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 1951107210d4..745246086c23 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -677,7 +677,6 @@ const struct io_cold_def io_cold_defs[] = { [IORING_OP_URING_CMD] = { .name = "URING_CMD", .async_size = 2 * sizeof(struct io_uring_sqe), - .prep_async = io_uring_cmd_prep_async, }, [IORING_OP_SEND_ZC] = { .name = "SEND_ZC", diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 4614ce734fee..9bd0ba87553f 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -14,6 +14,38 @@ #include "rsrc.h" #include "uring_cmd.h" +static struct uring_cache *io_uring_async_get(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_cache_entry *entry; + struct uring_cache *cache; + + entry = io_alloc_cache_get(&ctx->uring_cache); + if (entry) { + cache = container_of(entry, struct uring_cache, cache); + req->flags |= REQ_F_ASYNC_DATA; + req->async_data = cache; + return cache; + } + if (!io_alloc_async_data(req)) + return req->async_data; + return NULL; +} + +static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + struct uring_cache *cache = req->async_data; + + if (issue_flags & IO_URING_F_UNLOCKED) + return; + if (io_alloc_cache_put(&req->ctx->uring_cache, &cache->cache)) { + ioucmd->sqe = NULL; + req->async_data = NULL; + req->flags &= ~REQ_F_ASYNC_DATA; + } +} + bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, struct task_struct *task, bool cancel_all) { @@ -128,6 +160,7 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, io_req_set_res(req, ret, 0); if (req->ctx->flags & IORING_SETUP_CQE32) io_req_set_cqe32_extra(req, res2, 0); + io_req_uring_cleanup(req, issue_flags); if (req->ctx->flags & IORING_SETUP_IOPOLL) { /* order with io_iopoll_req_issued() checking ->iopoll_complete */ smp_store_release(&req->iopoll_completed, 1); @@ -142,13 +175,19 @@ void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2, } EXPORT_SYMBOL_GPL(io_uring_cmd_done); -int io_uring_cmd_prep_async(struct io_kiocb *req) +static int io_uring_cmd_prep_setup(struct io_kiocb *req, + const struct io_uring_sqe *sqe) { struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); + struct uring_cache *cache; - memcpy(req->async_data, ioucmd->sqe, uring_sqe_size(req->ctx)); - ioucmd->sqe = req->async_data; - return 0; + cache = io_uring_async_get(req); + if (cache) { + memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx)); + ioucmd->sqe = req->async_data; + return 0; + } + return -ENOMEM; } int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) @@ -173,9 +212,9 @@ int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) req->imu = ctx->user_bufs[index]; io_req_set_rsrc_node(req, ctx, 0); } - ioucmd->sqe = sqe; ioucmd->cmd_op = READ_ONCE(sqe->cmd_op); - return 0; + + return io_uring_cmd_prep_setup(req, sqe); } int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) @@ -206,23 +245,14 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags) } ret = file->f_op->uring_cmd(ioucmd, issue_flags); - if (ret == -EAGAIN) { - if (!req_has_async_data(req)) { - if (io_alloc_async_data(req)) - return -ENOMEM; - io_uring_cmd_prep_async(req); - } - return -EAGAIN; - } - - if (ret != -EIOCBQUEUED) { - if (ret < 0) - req_set_fail(req); - io_req_set_res(req, ret, 0); + if (ret == -EAGAIN || ret == -EIOCBQUEUED) return ret; - } - return IOU_ISSUE_SKIP_COMPLETE; + if (ret < 0) + req_set_fail(req); + io_req_uring_cleanup(req, issue_flags); + io_req_set_res(req, ret, 0); + return ret; } int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, @@ -311,3 +341,8 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) } EXPORT_SYMBOL_GPL(io_uring_cmd_sock); #endif + +void io_uring_cache_free(struct io_cache_entry *entry) +{ + kfree(container_of(entry, struct uring_cache, cache)); +} diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h index 7356bf9aa655..b0ccff7091ee 100644 --- a/io_uring/uring_cmd.h +++ b/io_uring/uring_cmd.h @@ -1,8 +1,16 @@ // SPDX-License-Identifier: GPL-2.0 +struct uring_cache { + union { + struct io_cache_entry cache; + struct io_uring_sqe sqes[2]; + }; +}; + int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags); int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_uring_cmd_prep_async(struct io_kiocb *req); +void io_uring_cache_free(struct io_cache_entry *entry); bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, - struct task_struct *task, bool cancel_all); \ No newline at end of file + struct task_struct *task, bool cancel_all); -- cgit From 414d0f45c316221acbf066658afdbae5b354a5cc Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 20 Mar 2024 15:19:44 -0600 Subject: io_uring/alloc_cache: switch to array based caching Currently lists are being used to manage this, but best practice is usually to have these in an array instead as that it cheaper to manage. Outside of that detail, games are also played with KASAN as the list is inside the cached entry itself. Finally, all users of this need a struct io_cache_entry embedded in their struct, which is union'ized with something else in there that isn't used across the free -> realloc cycle. Get rid of all of that, and simply have it be an array. This will not change the memory used, as we're just trading an 8-byte member entry for the per-elem array size. This reduces the overhead of the recycled allocations, and it reduces the amount of code code needed to support recycling to about half of what it currently is. Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 2 +- io_uring/alloc_cache.h | 57 +++++++++++++++++++----------------------- io_uring/futex.c | 30 +++++++++------------- io_uring/futex.h | 5 ++-- io_uring/io_uring.c | 34 ++++++++++++++----------- io_uring/net.c | 13 ++++------ io_uring/net.h | 18 ++++--------- io_uring/poll.c | 12 +++------ io_uring/poll.h | 9 +------ io_uring/rsrc.c | 10 +++----- io_uring/rsrc.h | 7 +----- io_uring/rw.c | 14 +++++------ io_uring/rw.h | 7 ++---- io_uring/uring_cmd.c | 14 +++-------- io_uring/uring_cmd.h | 6 +---- 15 files changed, 93 insertions(+), 145 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 0f24fdad19c2..ef45b8bd1b35 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -220,7 +220,7 @@ struct io_ev_fd { }; struct io_alloc_cache { - struct io_wq_work_node list; + void **entries; unsigned int nr_cached; unsigned int max_cached; size_t elem_size; diff --git a/io_uring/alloc_cache.h b/io_uring/alloc_cache.h index 138ad14b0b12..b7a38a2069cf 100644 --- a/io_uring/alloc_cache.h +++ b/io_uring/alloc_cache.h @@ -6,61 +6,56 @@ */ #define IO_ALLOC_CACHE_MAX 128 -struct io_cache_entry { - struct io_wq_work_node node; -}; - static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, - struct io_cache_entry *entry) + void *entry) { if (cache->nr_cached < cache->max_cached) { - cache->nr_cached++; - wq_stack_add_head(&entry->node, &cache->list); - kasan_mempool_poison_object(entry); + if (!kasan_mempool_poison_object(entry)) + return false; + cache->entries[cache->nr_cached++] = entry; return true; } return false; } -static inline bool io_alloc_cache_empty(struct io_alloc_cache *cache) -{ - return !cache->list.next; -} - -static inline struct io_cache_entry *io_alloc_cache_get(struct io_alloc_cache *cache) +static inline void *io_alloc_cache_get(struct io_alloc_cache *cache) { - if (cache->list.next) { - struct io_cache_entry *entry; + if (cache->nr_cached) { + void *entry = cache->entries[--cache->nr_cached]; - entry = container_of(cache->list.next, struct io_cache_entry, node); kasan_mempool_unpoison_object(entry, cache->elem_size); - cache->list.next = cache->list.next->next; - cache->nr_cached--; return entry; } return NULL; } -static inline void io_alloc_cache_init(struct io_alloc_cache *cache, +/* returns false if the cache was initialized properly */ +static inline bool io_alloc_cache_init(struct io_alloc_cache *cache, unsigned max_nr, size_t size) { - cache->list.next = NULL; - cache->nr_cached = 0; - cache->max_cached = max_nr; - cache->elem_size = size; + cache->entries = kvmalloc_array(max_nr, sizeof(void *), GFP_KERNEL); + if (cache->entries) { + cache->nr_cached = 0; + cache->max_cached = max_nr; + cache->elem_size = size; + return false; + } + return true; } static inline void io_alloc_cache_free(struct io_alloc_cache *cache, - void (*free)(struct io_cache_entry *)) + void (*free)(const void *)) { - while (1) { - struct io_cache_entry *entry = io_alloc_cache_get(cache); + void *entry; + + if (!cache->entries) + return; - if (!entry) - break; + while ((entry = io_alloc_cache_get(cache)) != NULL) free(entry); - } - cache->nr_cached = 0; + + kvfree(cache->entries); + cache->entries = NULL; } #endif diff --git a/io_uring/futex.c b/io_uring/futex.c index 792a03df58de..914848f46beb 100644 --- a/io_uring/futex.c +++ b/io_uring/futex.c @@ -9,7 +9,7 @@ #include "../kernel/futex/futex.h" #include "io_uring.h" -#include "rsrc.h" +#include "alloc_cache.h" #include "futex.h" struct io_futex { @@ -27,27 +27,21 @@ struct io_futex { }; struct io_futex_data { - union { - struct futex_q q; - struct io_cache_entry cache; - }; + struct futex_q q; struct io_kiocb *req; }; -void io_futex_cache_init(struct io_ring_ctx *ctx) -{ - io_alloc_cache_init(&ctx->futex_cache, IO_NODE_ALLOC_CACHE_MAX, - sizeof(struct io_futex_data)); -} +#define IO_FUTEX_ALLOC_CACHE_MAX 32 -static void io_futex_cache_entry_free(struct io_cache_entry *entry) +bool io_futex_cache_init(struct io_ring_ctx *ctx) { - kfree(container_of(entry, struct io_futex_data, cache)); + return io_alloc_cache_init(&ctx->futex_cache, IO_FUTEX_ALLOC_CACHE_MAX, + sizeof(struct io_futex_data)); } void io_futex_cache_free(struct io_ring_ctx *ctx) { - io_alloc_cache_free(&ctx->futex_cache, io_futex_cache_entry_free); + io_alloc_cache_free(&ctx->futex_cache, kfree); } static void __io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts) @@ -63,7 +57,7 @@ static void io_futex_complete(struct io_kiocb *req, struct io_tw_state *ts) struct io_ring_ctx *ctx = req->ctx; io_tw_lock(ctx, ts); - if (!io_alloc_cache_put(&ctx->futex_cache, &ifd->cache)) + if (!io_alloc_cache_put(&ctx->futex_cache, ifd)) kfree(ifd); __io_futex_complete(req, ts); } @@ -259,11 +253,11 @@ static void io_futex_wake_fn(struct wake_q_head *wake_q, struct futex_q *q) static struct io_futex_data *io_alloc_ifd(struct io_ring_ctx *ctx) { - struct io_cache_entry *entry; + struct io_futex_data *ifd; - entry = io_alloc_cache_get(&ctx->futex_cache); - if (entry) - return container_of(entry, struct io_futex_data, cache); + ifd = io_alloc_cache_get(&ctx->futex_cache); + if (ifd) + return ifd; return kmalloc(sizeof(struct io_futex_data), GFP_NOWAIT); } diff --git a/io_uring/futex.h b/io_uring/futex.h index 0847e9e8a127..b8bb09873d57 100644 --- a/io_uring/futex.h +++ b/io_uring/futex.h @@ -13,7 +13,7 @@ int io_futex_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, unsigned int issue_flags); bool io_futex_remove_all(struct io_ring_ctx *ctx, struct task_struct *task, bool cancel_all); -void io_futex_cache_init(struct io_ring_ctx *ctx); +bool io_futex_cache_init(struct io_ring_ctx *ctx); void io_futex_cache_free(struct io_ring_ctx *ctx); #else static inline int io_futex_cancel(struct io_ring_ctx *ctx, @@ -27,8 +27,9 @@ static inline bool io_futex_remove_all(struct io_ring_ctx *ctx, { return false; } -static inline void io_futex_cache_init(struct io_ring_ctx *ctx) +static inline bool io_futex_cache_init(struct io_ring_ctx *ctx) { + return false; } static inline void io_futex_cache_free(struct io_ring_ctx *ctx) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 579618fad833..1d453eb8e49f 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -276,6 +276,7 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; int hash_bits; + bool ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -305,17 +306,19 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->io_buffers_cache); INIT_HLIST_HEAD(&ctx->io_buf_list); - io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, + ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, sizeof(struct io_rsrc_node)); - io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX, + ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_ALLOC_CACHE_MAX, sizeof(struct async_poll)); - io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, + ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_msghdr)); - io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, + ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX, sizeof(struct io_async_rw)); - io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX, + ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX, sizeof(struct uring_cache)); - io_futex_cache_init(ctx); + ret |= io_futex_cache_init(ctx); + if (ret) + goto err; init_completion(&ctx->ref_comp); xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1); mutex_init(&ctx->uring_lock); @@ -345,6 +348,12 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) return ctx; err: + io_alloc_cache_free(&ctx->rsrc_node_cache, kfree); + io_alloc_cache_free(&ctx->apoll_cache, kfree); + io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); + io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); + io_alloc_cache_free(&ctx->uring_cache, kfree); + io_futex_cache_free(ctx); kfree(ctx->cancel_table.hbs); kfree(ctx->cancel_table_locked.hbs); xa_destroy(&ctx->io_bl_xa); @@ -1482,7 +1491,7 @@ static void io_free_batch_list(struct io_ring_ctx *ctx, if (apoll->double_poll) kfree(apoll->double_poll); - if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache)) + if (!io_alloc_cache_put(&ctx->apoll_cache, apoll)) kfree(apoll); req->flags &= ~REQ_F_POLLED; } @@ -2778,11 +2787,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) mutex_unlock(&ctx->uring_lock); } -static void io_rsrc_node_cache_free(struct io_cache_entry *entry) -{ - kfree(container_of(entry, struct io_rsrc_node, cache)); -} - static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_sq_thread_finish(ctx); @@ -2797,10 +2801,10 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) __io_sqe_files_unregister(ctx); io_cqring_overflow_kill(ctx); io_eventfd_unregister(ctx); - io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free); + io_alloc_cache_free(&ctx->apoll_cache, kfree); io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free); io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free); - io_alloc_cache_free(&ctx->uring_cache, io_uring_cache_free); + io_alloc_cache_free(&ctx->uring_cache, kfree); io_futex_cache_free(ctx); io_destroy_buffers(ctx); mutex_unlock(&ctx->uring_lock); @@ -2816,7 +2820,7 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)); WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list)); - io_alloc_cache_free(&ctx->rsrc_node_cache, io_rsrc_node_cache_free); + io_alloc_cache_free(&ctx->rsrc_node_cache, kfree); if (ctx->mm_account) { mmdrop(ctx->mm_account); ctx->mm_account = NULL; diff --git a/io_uring/net.c b/io_uring/net.c index 3bef562b67de..d0abc5689066 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -137,7 +137,7 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) /* Let normal cleanup path reap it if we fail adding to the cache */ iov = hdr->free_iov; - if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { + if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) { if (iov) kasan_mempool_poison_object(iov); req->async_data = NULL; @@ -148,12 +148,10 @@ static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_cache_entry *entry; struct io_async_msghdr *hdr; - entry = io_alloc_cache_get(&ctx->netmsg_cache); - if (entry) { - hdr = container_of(entry, struct io_async_msghdr, cache); + hdr = io_alloc_cache_get(&ctx->netmsg_cache); + if (hdr) { if (hdr->free_iov) { kasan_mempool_unpoison_object(hdr->free_iov, hdr->free_iov_nr * sizeof(struct iovec)); @@ -1490,11 +1488,10 @@ out: return IOU_OK; } -void io_netmsg_cache_free(struct io_cache_entry *entry) +void io_netmsg_cache_free(const void *entry) { - struct io_async_msghdr *kmsg; + struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; - kmsg = container_of(entry, struct io_async_msghdr, cache); if (kmsg->free_iov) { kasan_mempool_unpoison_object(kmsg->free_iov, kmsg->free_iov_nr * sizeof(struct iovec)); diff --git a/io_uring/net.h b/io_uring/net.h index b47b43ec6459..0eb1c1920fc9 100644 --- a/io_uring/net.h +++ b/io_uring/net.h @@ -3,23 +3,15 @@ #include #include -#include "alloc_cache.h" - struct io_async_msghdr { #if defined(CONFIG_NET) - union { - struct iovec fast_iov; - struct { - struct io_cache_entry cache; - /* entry size of ->free_iov, if valid */ - int free_iov_nr; - }; - }; + struct iovec fast_iov; /* points to an allocated iov, if NULL we use fast_iov instead */ struct iovec *free_iov; + int free_iov_nr; + int namelen; __kernel_size_t controllen; __kernel_size_t payloadlen; - int namelen; struct sockaddr __user *uaddr; struct msghdr msg; struct sockaddr_storage addr; @@ -57,9 +49,9 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags); int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); void io_send_zc_cleanup(struct io_kiocb *req); -void io_netmsg_cache_free(struct io_cache_entry *entry); +void io_netmsg_cache_free(const void *entry); #else -static inline void io_netmsg_cache_free(struct io_cache_entry *entry) +static inline void io_netmsg_cache_free(const void *entry) { } #endif diff --git a/io_uring/poll.c b/io_uring/poll.c index 5d55bbf1de15..0a8e02944689 100644 --- a/io_uring/poll.c +++ b/io_uring/poll.c @@ -14,6 +14,7 @@ #include #include "io_uring.h" +#include "alloc_cache.h" #include "refs.h" #include "napi.h" #include "opdef.h" @@ -686,17 +687,15 @@ static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, unsigned issue_flags) { struct io_ring_ctx *ctx = req->ctx; - struct io_cache_entry *entry; struct async_poll *apoll; if (req->flags & REQ_F_POLLED) { apoll = req->apoll; kfree(apoll->double_poll); } else if (!(issue_flags & IO_URING_F_UNLOCKED)) { - entry = io_alloc_cache_get(&ctx->apoll_cache); - if (entry == NULL) + apoll = io_alloc_cache_get(&ctx->apoll_cache); + if (!apoll) goto alloc_apoll; - apoll = container_of(entry, struct async_poll, cache); apoll->poll.retries = APOLL_MAX_RETRY; } else { alloc_apoll: @@ -1055,8 +1054,3 @@ out: io_req_set_res(req, ret, 0); return IOU_OK; } - -void io_apoll_cache_free(struct io_cache_entry *entry) -{ - kfree(container_of(entry, struct async_poll, cache)); -} diff --git a/io_uring/poll.h b/io_uring/poll.h index 1dacae9e816c..5c240f11069a 100644 --- a/io_uring/poll.h +++ b/io_uring/poll.h @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -#include "alloc_cache.h" - enum { IO_APOLL_OK, IO_APOLL_ABORTED, @@ -17,10 +15,7 @@ struct io_poll { }; struct async_poll { - union { - struct io_poll poll; - struct io_cache_entry cache; - }; + struct io_poll poll; struct io_poll *double_poll; }; @@ -46,6 +41,4 @@ int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags); bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, bool cancel_all); -void io_apoll_cache_free(struct io_cache_entry *entry); - void io_poll_task_func(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 4818b79231dd..7b8a056f98ed 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -13,6 +13,7 @@ #include #include "io_uring.h" +#include "alloc_cache.h" #include "openclose.h" #include "rsrc.h" @@ -169,7 +170,7 @@ static void io_rsrc_put_work(struct io_rsrc_node *node) void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node) { - if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache)) + if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node)) kfree(node); } @@ -197,12 +198,9 @@ void io_rsrc_node_ref_zero(struct io_rsrc_node *node) struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) { struct io_rsrc_node *ref_node; - struct io_cache_entry *entry; - entry = io_alloc_cache_get(&ctx->rsrc_node_cache); - if (entry) { - ref_node = container_of(entry, struct io_rsrc_node, cache); - } else { + ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache); + if (!ref_node) { ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); if (!ref_node) return NULL; diff --git a/io_uring/rsrc.h b/io_uring/rsrc.h index e21000238954..83c079a707f8 100644 --- a/io_uring/rsrc.h +++ b/io_uring/rsrc.h @@ -2,8 +2,6 @@ #ifndef IOU_RSRC_H #define IOU_RSRC_H -#include "alloc_cache.h" - #define IO_NODE_ALLOC_CACHE_MAX 32 #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) @@ -36,10 +34,7 @@ struct io_rsrc_data { }; struct io_rsrc_node { - union { - struct io_cache_entry cache; - struct io_ring_ctx *ctx; - }; + struct io_ring_ctx *ctx; int refs; bool empty; u16 type; diff --git a/io_uring/rw.c b/io_uring/rw.c index e84d322a6150..3134a6ece1be 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -18,6 +18,7 @@ #include "io_uring.h" #include "opdef.h" #include "kbuf.h" +#include "alloc_cache.h" #include "rsrc.h" #include "poll.h" #include "rw.h" @@ -154,7 +155,7 @@ static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) return; } iov = rw->free_iovec; - if (io_alloc_cache_put(&req->ctx->rw_cache, &rw->cache)) { + if (io_alloc_cache_put(&req->ctx->rw_cache, rw)) { if (iov) kasan_mempool_poison_object(iov); req->async_data = NULL; @@ -200,12 +201,10 @@ static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) static int io_rw_alloc_async(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_cache_entry *entry; struct io_async_rw *rw; - entry = io_alloc_cache_get(&ctx->rw_cache); - if (entry) { - rw = container_of(entry, struct io_async_rw, cache); + rw = io_alloc_cache_get(&ctx->rw_cache); + if (rw) { if (rw->free_iovec) { kasan_mempool_unpoison_object(rw->free_iovec, rw->free_iov_nr * sizeof(struct iovec)); @@ -1168,11 +1167,10 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin) return nr_events; } -void io_rw_cache_free(struct io_cache_entry *entry) +void io_rw_cache_free(const void *entry) { - struct io_async_rw *rw; + struct io_async_rw *rw = (struct io_async_rw *) entry; - rw = container_of(entry, struct io_async_rw, cache); if (rw->free_iovec) { kasan_mempool_unpoison_object(rw->free_iovec, rw->free_iov_nr * sizeof(struct iovec)); diff --git a/io_uring/rw.h b/io_uring/rw.h index cf51d0eb407a..3f432dc75441 100644 --- a/io_uring/rw.h +++ b/io_uring/rw.h @@ -3,10 +3,7 @@ #include struct io_async_rw { - union { - size_t bytes_done; - struct io_cache_entry cache; - }; + size_t bytes_done; struct iov_iter iter; struct iov_iter_state iter_state; struct iovec fast_iov; @@ -28,4 +25,4 @@ void io_rw_fail(struct io_kiocb *req); void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts); int io_read_mshot_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags); -void io_rw_cache_free(struct io_cache_entry *entry); +void io_rw_cache_free(const void *entry); diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 92346b5d9f5b..334d31dd6628 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -11,18 +11,17 @@ #include #include "io_uring.h" +#include "alloc_cache.h" #include "rsrc.h" #include "uring_cmd.h" static struct uring_cache *io_uring_async_get(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_cache_entry *entry; struct uring_cache *cache; - entry = io_alloc_cache_get(&ctx->uring_cache); - if (entry) { - cache = container_of(entry, struct uring_cache, cache); + cache = io_alloc_cache_get(&ctx->uring_cache); + if (cache) { req->flags |= REQ_F_ASYNC_DATA; req->async_data = cache; return cache; @@ -39,7 +38,7 @@ static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags) if (issue_flags & IO_URING_F_UNLOCKED) return; - if (io_alloc_cache_put(&req->ctx->uring_cache, &cache->cache)) { + if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) { ioucmd->sqe = NULL; req->async_data = NULL; req->flags &= ~REQ_F_ASYNC_DATA; @@ -354,8 +353,3 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags) } EXPORT_SYMBOL_GPL(io_uring_cmd_sock); #endif - -void io_uring_cache_free(struct io_cache_entry *entry) -{ - kfree(container_of(entry, struct uring_cache, cache)); -} diff --git a/io_uring/uring_cmd.h b/io_uring/uring_cmd.h index 477ea8865639..a361f98664d2 100644 --- a/io_uring/uring_cmd.h +++ b/io_uring/uring_cmd.h @@ -1,15 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 struct uring_cache { - union { - struct io_cache_entry cache; - struct io_uring_sqe sqes[2]; - }; + struct io_uring_sqe sqes[2]; }; int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags); int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); -void io_uring_cache_free(struct io_cache_entry *entry); bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx, struct task_struct *task, bool cancel_all); -- cgit From 87585b05757dc70545efb434669708d276125559 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 12 Mar 2024 20:24:21 -0600 Subject: io_uring/kbuf: use vm_insert_pages() for mmap'ed pbuf ring Rather than use remap_pfn_range() for this and manually free later, switch to using vm_insert_page() and have it Just Work. This requires a bit of effort on the mmap lookup side, as the ctx uring_lock isn't held, which otherwise protects buffer_lists from being torn down, and it's not safe to grab from mmap context that would introduce an ABBA deadlock between the mmap lock and the ctx uring_lock. Instead, lookup the buffer_list under RCU, as the the list is RCU freed already. Use the existing reference count to determine whether it's possible to safely grab a reference to it (eg if it's not zero already), and drop that reference when done with the mapping. If the mmap reference is the last one, the buffer_list and the associated memory can go away, since the vma insertion has references to the inserted pages at that point. Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 3 - io_uring/io_uring.c | 58 +++++------------- io_uring/io_uring.h | 6 +- io_uring/kbuf.c | 134 ++++++++--------------------------------- io_uring/kbuf.h | 3 +- 5 files changed, 47 insertions(+), 157 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index ef45b8bd1b35..d34c8433caf9 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -372,9 +372,6 @@ struct io_ring_ctx { struct list_head io_buffers_cache; - /* deferred free list, protected by ->uring_lock */ - struct hlist_head io_buf_list; - /* Keep this last, we don't need it for the fast path */ struct wait_queue_head poll_wq; struct io_restriction restrictions; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 0ef418faac33..50e859da59e1 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -303,7 +303,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) INIT_LIST_HEAD(&ctx->sqd_list); INIT_LIST_HEAD(&ctx->cq_overflow_list); INIT_LIST_HEAD(&ctx->io_buffers_cache); - INIT_HLIST_HEAD(&ctx->io_buf_list); ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX, sizeof(struct io_rsrc_node)); ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX, @@ -2598,15 +2597,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; } -static void io_pages_unmap(void *ptr, struct page ***pages, - unsigned short *npages) +void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages, + bool put_pages) { bool do_vunmap = false; if (!ptr) return; - if (*npages) { + if (put_pages && *npages) { struct page **to_free = *pages; int i; @@ -2628,14 +2627,6 @@ static void io_pages_unmap(void *ptr, struct page ***pages, *npages = 0; } -void io_mem_free(void *ptr) -{ - if (!ptr) - return; - - folio_put(virt_to_folio(ptr)); -} - static void io_pages_free(struct page ***pages, int npages) { struct page **page_array = *pages; @@ -2730,8 +2721,10 @@ static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, static void io_rings_free(struct io_ring_ctx *ctx) { if (!(ctx->flags & IORING_SETUP_NO_MMAP)) { - io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages); - io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages); + io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages, + true); + io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages, + true); } else { io_pages_free(&ctx->ring_pages, ctx->n_ring_pages); ctx->n_ring_pages = 0; @@ -2788,8 +2781,8 @@ err: return ERR_PTR(-ENOMEM); } -static void *io_pages_map(struct page ***out_pages, unsigned short *npages, - size_t size) +void *io_pages_map(struct page ***out_pages, unsigned short *npages, + size_t size) { gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN; struct page **pages; @@ -2819,17 +2812,6 @@ done: return ret; } -void *io_mem_alloc(size_t size) -{ - gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP; - void *ret; - - ret = (void *) __get_free_pages(gfp, get_order(size)); - if (ret) - return ret; - return ERR_PTR(-ENOMEM); -} - static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, unsigned int cq_entries, size_t *sq_offset) { @@ -2926,7 +2908,6 @@ static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) ctx->mm_account = NULL; } io_rings_free(ctx); - io_kbuf_mmap_list_free(ctx); percpu_ref_exit(&ctx->refs); free_uid(ctx->user); @@ -3396,10 +3377,8 @@ static void *io_uring_validate_mmap_request(struct file *file, { struct io_ring_ctx *ctx = file->private_data; loff_t offset = pgoff << PAGE_SHIFT; - struct page *page; - void *ptr; - switch (offset & IORING_OFF_MMAP_MASK) { + switch ((pgoff << PAGE_SHIFT) & IORING_OFF_MMAP_MASK) { case IORING_OFF_SQ_RING: case IORING_OFF_CQ_RING: /* Don't allow mmap if the ring was setup without it */ @@ -3414,6 +3393,7 @@ static void *io_uring_validate_mmap_request(struct file *file, case IORING_OFF_PBUF_RING: { struct io_buffer_list *bl; unsigned int bgid; + void *ptr; bgid = (offset & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; bl = io_pbuf_get_bl(ctx, bgid); @@ -3421,17 +3401,11 @@ static void *io_uring_validate_mmap_request(struct file *file, return bl; ptr = bl->buf_ring; io_put_bl(ctx, bl); - break; + return ptr; } - default: - return ERR_PTR(-EINVAL); } - page = virt_to_head_page(ptr); - if (sz > page_size(page)) - return ERR_PTR(-EINVAL); - - return ptr; + return ERR_PTR(-EINVAL); } int io_uring_mmap_pages(struct io_ring_ctx *ctx, struct vm_area_struct *vma, @@ -3450,7 +3424,6 @@ static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) struct io_ring_ctx *ctx = file->private_data; size_t sz = vma->vm_end - vma->vm_start; long offset = vma->vm_pgoff << PAGE_SHIFT; - unsigned long pfn; void *ptr; ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz); @@ -3465,10 +3438,11 @@ static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma) case IORING_OFF_SQES: return io_uring_mmap_pages(ctx, vma, ctx->sqe_pages, ctx->n_sqe_pages); + case IORING_OFF_PBUF_RING: + return io_pbuf_mmap(file, vma); } - pfn = virt_to_phys(ptr) >> PAGE_SHIFT; - return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot); + return -EINVAL; } static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp, diff --git a/io_uring/io_uring.h b/io_uring/io_uring.h index 75230d914007..dec996a1c789 100644 --- a/io_uring/io_uring.h +++ b/io_uring/io_uring.h @@ -109,8 +109,10 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx); bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, bool cancel_all); -void *io_mem_alloc(size_t size); -void io_mem_free(void *ptr); +void *io_pages_map(struct page ***out_pages, unsigned short *npages, + size_t size); +void io_pages_unmap(void *ptr, struct page ***pages, unsigned short *npages, + bool put_pages); enum { IO_EVENTFD_OP_SIGNAL_BIT, diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 4289f4a92693..820ac599d003 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -32,25 +32,12 @@ struct io_provide_buf { __u16 bid; }; -struct io_buf_free { - struct hlist_node list; - void *mem; - size_t size; - int inuse; -}; - -static inline struct io_buffer_list *__io_buffer_get_list(struct io_ring_ctx *ctx, - unsigned int bgid) -{ - return xa_load(&ctx->io_bl_xa, bgid); -} - static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, unsigned int bgid) { lockdep_assert_held(&ctx->uring_lock); - return __io_buffer_get_list(ctx, bgid); + return xa_load(&ctx->io_bl_xa, bgid); } static int io_buffer_add_list(struct io_ring_ctx *ctx, @@ -191,24 +178,6 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len, return ret; } -/* - * Mark the given mapped range as free for reuse - */ -static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl) -{ - struct io_buf_free *ibf; - - hlist_for_each_entry(ibf, &ctx->io_buf_list, list) { - if (bl->buf_ring == ibf->mem) { - ibf->inuse = 0; - return; - } - } - - /* can't happen... */ - WARN_ON_ONCE(1); -} - static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer_list *bl, unsigned nbufs) { @@ -220,23 +189,16 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, if (bl->is_buf_ring) { i = bl->buf_ring->tail - bl->head; - if (bl->is_mmap) { - /* - * io_kbuf_list_free() will free the page(s) at - * ->release() time. - */ - io_kbuf_mark_free(ctx, bl); - bl->buf_ring = NULL; - bl->is_mmap = 0; - } else if (bl->buf_nr_pages) { + if (bl->buf_nr_pages) { int j; - for (j = 0; j < bl->buf_nr_pages; j++) - unpin_user_page(bl->buf_pages[j]); - kvfree(bl->buf_pages); - vunmap(bl->buf_ring); - bl->buf_pages = NULL; - bl->buf_nr_pages = 0; + if (!bl->is_mmap) { + for (j = 0; j < bl->buf_nr_pages; j++) + unpin_user_page(bl->buf_pages[j]); + } + io_pages_unmap(bl->buf_ring, &bl->buf_pages, + &bl->buf_nr_pages, bl->is_mmap); + bl->is_mmap = 0; } /* make sure it's seen as empty */ INIT_LIST_HEAD(&bl->buf_list); @@ -537,63 +499,18 @@ error_unpin: return ret; } -/* - * See if we have a suitable region that we can reuse, rather than allocate - * both a new io_buf_free and mem region again. We leave it on the list as - * even a reused entry will need freeing at ring release. - */ -static struct io_buf_free *io_lookup_buf_free_entry(struct io_ring_ctx *ctx, - size_t ring_size) -{ - struct io_buf_free *ibf, *best = NULL; - size_t best_dist; - - hlist_for_each_entry(ibf, &ctx->io_buf_list, list) { - size_t dist; - - if (ibf->inuse || ibf->size < ring_size) - continue; - dist = ibf->size - ring_size; - if (!best || dist < best_dist) { - best = ibf; - if (!dist) - break; - best_dist = dist; - } - } - - return best; -} - static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx, struct io_uring_buf_reg *reg, struct io_buffer_list *bl) { - struct io_buf_free *ibf; size_t ring_size; - void *ptr; ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring); - /* Reuse existing entry, if we can */ - ibf = io_lookup_buf_free_entry(ctx, ring_size); - if (!ibf) { - ptr = io_mem_alloc(ring_size); - if (IS_ERR(ptr)) - return PTR_ERR(ptr); - - /* Allocate and store deferred free entry */ - ibf = kmalloc(sizeof(*ibf), GFP_KERNEL_ACCOUNT); - if (!ibf) { - io_mem_free(ptr); - return -ENOMEM; - } - ibf->mem = ptr; - ibf->size = ring_size; - hlist_add_head(&ibf->list, &ctx->io_buf_list); - } - ibf->inuse = 1; - bl->buf_ring = ibf->mem; + bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size); + if (!bl->buf_ring) + return -ENOMEM; + bl->is_buf_ring = 1; bl->is_mmap = 1; return 0; @@ -741,18 +658,19 @@ struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, return ERR_PTR(-EINVAL); } -/* - * Called at or after ->release(), free the mmap'ed buffers that we used - * for memory mapped provided buffer rings. - */ -void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx) +int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma) { - struct io_buf_free *ibf; - struct hlist_node *tmp; + struct io_ring_ctx *ctx = file->private_data; + loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT; + struct io_buffer_list *bl; + int bgid, ret; - hlist_for_each_entry_safe(ibf, tmp, &ctx->io_buf_list, list) { - hlist_del(&ibf->list); - io_mem_free(ibf->mem); - kfree(ibf); - } + bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; + bl = io_pbuf_get_bl(ctx, bgid); + if (IS_ERR(bl)) + return PTR_ERR(bl); + + ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages); + io_put_bl(ctx, bl); + return ret; } diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index df365b8860cf..53c141d9a8b2 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -55,8 +55,6 @@ int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); -void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx); - void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); @@ -64,6 +62,7 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl); struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, unsigned long bgid); +int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma); static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) { -- cgit From d9713ad3fa227726a0b4d544c5a4cdd393c1933e Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 5 Apr 2024 16:50:04 +0100 Subject: io_uring: remove async request cache io_req_complete_post() was a sole user of ->locked_free_list, but since we just gutted the function, the cache is not used anymore and can be removed. ->locked_free_list served as an asynhronous counterpart of the main request (i.e. struct io_kiocb) cache for all unlocked cases like io-wq. Now they're all forced to be completed into the main cache directly, off of the normal completion path or via io_free_req(). Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/7bffccd213e370abd4de480e739d8b08ab6c1326.1712331455.git.asml.silence@gmail.com Reviewed-by: Ming Lei Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 4 ---- io_uring/io_uring.c | 22 ---------------------- 2 files changed, 26 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index d34c8433caf9..c47f412cf18e 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -346,10 +346,6 @@ struct io_ring_ctx { spinlock_t completion_lock; - /* IRQ completion list, under ->completion_lock */ - unsigned int locked_free_nr; - struct io_wq_work_list locked_free_list; - struct list_head io_buffers_comp; struct list_head cq_overflow_list; struct io_hash_table cancel_table; diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 8078d6944411..e291f227a1a0 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -334,7 +334,6 @@ static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) init_llist_head(&ctx->work_llist); INIT_LIST_HEAD(&ctx->tctx_list); ctx->submit_state.free_list.next = NULL; - INIT_WQ_LIST(&ctx->locked_free_list); INIT_HLIST_HEAD(&ctx->waitid_list); #ifdef CONFIG_FUTEX INIT_HLIST_HEAD(&ctx->futex_list); @@ -988,15 +987,6 @@ static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) memset(&req->big_cqe, 0, sizeof(req->big_cqe)); } -static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx, - struct io_submit_state *state) -{ - spin_lock(&ctx->completion_lock); - wq_list_splice(&ctx->locked_free_list, &state->free_list); - ctx->locked_free_nr = 0; - spin_unlock(&ctx->completion_lock); -} - /* * A request might get retired back into the request caches even before opcode * handlers and io_issue_sqe() are done with it, e.g. inline completion path. @@ -1010,17 +1000,6 @@ __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) void *reqs[IO_REQ_ALLOC_BATCH]; int ret; - /* - * If we have more than a batch's worth of requests in our IRQ side - * locked cache, grab the lock and move them over to our submission - * side cache. - */ - if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) { - io_flush_cached_locked_reqs(ctx, &ctx->submit_state); - if (!io_req_cache_empty(ctx)) - return true; - } - ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs); /* @@ -2654,7 +2633,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx) int nr = 0; mutex_lock(&ctx->uring_lock); - io_flush_cached_locked_reqs(ctx, &ctx->submit_state); while (!io_req_cache_empty(ctx)) { req = io_extract_req(ctx); -- cgit From 8c9a6f549e65912825e31dc1e0e3f7995984649d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 9 Apr 2024 14:05:53 -0700 Subject: io_uring: separate header for exported net bits We're exporting some io_uring bits to networking, e.g. for implementing a net callback for io_uring cmds, but we don't want to expose more than needed. Add a separate header for networking. Signed-off-by: Pavel Begunkov Signed-off-by: David Wei Link: https://lore.kernel.org/r/20240409210554.1878789-1-dw@davidwei.uk Signed-off-by: Jens Axboe --- include/linux/io_uring.h | 6 ------ include/linux/io_uring/net.h | 18 ++++++++++++++++++ io_uring/uring_cmd.c | 1 + net/socket.c | 2 +- 4 files changed, 20 insertions(+), 7 deletions(-) create mode 100644 include/linux/io_uring/net.h (limited to 'include/linux') diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 68ed6697fece..e123d5e17b52 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -11,7 +11,6 @@ void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); void io_uring_unreg_ringfd(void); const char *io_uring_get_opcode(u8 opcode); -int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); bool io_is_uring_fops(struct file *file); static inline void io_uring_files_cancel(void) @@ -45,11 +44,6 @@ static inline const char *io_uring_get_opcode(u8 opcode) { return ""; } -static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd, - unsigned int issue_flags) -{ - return -EOPNOTSUPP; -} static inline bool io_is_uring_fops(struct file *file) { return false; diff --git a/include/linux/io_uring/net.h b/include/linux/io_uring/net.h new file mode 100644 index 000000000000..b58f39fed4d5 --- /dev/null +++ b/include/linux/io_uring/net.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_IO_URING_NET_H +#define _LINUX_IO_URING_NET_H + +struct io_uring_cmd; + +#if defined(CONFIG_IO_URING) +int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags); + +#else +static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd, + unsigned int issue_flags) +{ + return -EOPNOTSUPP; +} +#endif + +#endif diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c index 334d31dd6628..21ac5fb2d5f0 100644 --- a/io_uring/uring_cmd.c +++ b/io_uring/uring_cmd.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include diff --git a/net/socket.c b/net/socket.c index e5f3af49a8b6..01a71ae10c35 100644 --- a/net/socket.c +++ b/net/socket.c @@ -88,7 +88,7 @@ #include #include #include -#include +#include #include #include -- cgit From e964fc77577a9afe528e54b50527cf49e24aa211 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 11 Apr 2024 20:51:07 +0300 Subject: vfs, swap: compile out IS_SWAPFILE() on swapless configs No swap support -- no swapfiles possible. Signed-off-by: Alexey Dobriyan (Yandex) Link: https://lore.kernel.org/r/2391c7f5-0f83-4188-ae56-4ec7ccbf2576@p183 Signed-off-by: Christian Brauner --- include/linux/fs.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index b053f98393e6..d376dd7057d5 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2260,7 +2260,13 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags #define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD) #define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME) + +#ifdef CONFIG_SWAP #define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE) +#else +#define IS_SWAPFILE(inode) ((void)(inode), 0U) +#endif + #define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE) #define IS_IMA(inode) ((inode)->i_flags & S_IMA) #define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) -- cgit From 10c9e40f297d3f1b0d4df6e73618d4466a0b2cfc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Fri, 15 Mar 2024 23:29:59 -0700 Subject: rcu: Remove redundant CONFIG_PROVE_RCU #if condition The #if condition controlling the rcu_preempt_sleep_check() definition has a redundant check for CONFIG_PREEMPT_RCU, which is already checked for by an enclosing #ifndef. This commit therefore removes this redundant condition from the inner #if. Signed-off-by: Paul E. McKenney Signed-off-by: Uladzislau Rezki (Sony) --- include/linux/rcupdate.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 2c54750e36a0..382780bb60f4 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -401,15 +401,15 @@ static inline int debug_lockdep_rcu_enabled(void) } \ } while (0) -#if defined(CONFIG_PROVE_RCU) && !defined(CONFIG_PREEMPT_RCU) +#ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { RCU_LOCKDEP_WARN(lock_is_held(&rcu_lock_map), "Illegal context switch in RCU read-side critical section"); } -#else /* #ifdef CONFIG_PROVE_RCU */ +#else // #ifndef CONFIG_PREEMPT_RCU static inline void rcu_preempt_sleep_check(void) { } -#endif /* #else #ifdef CONFIG_PROVE_RCU */ +#endif // #else // #ifndef CONFIG_PREEMPT_RCU #define rcu_sleep_check() \ do { \ -- cgit From bfc7bc539392f681194de341dd2bb9e83f31f588 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Thu, 28 Mar 2024 14:53:23 +0000 Subject: dma-buf: Do not build debugfs related code when !CONFIG_DEBUG_FS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no point in compiling in the list and mutex operations which are only used from the dma-buf debugfs code, if debugfs is not compiled in. Put the code in questions behind some kconfig guards and so save some text and maybe even a pointer per object at runtime when not enabled. Signed-off-by: Tvrtko Ursulin Cc: Sumit Semwal Cc: Christian König Cc: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Cc: linux-kernel@vger.kernel.org Cc: kernel-dev@igalia.com Reviewed-by: T.J. Mercier Reviewed-by: Christian König Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20240328145323.68872-1-tursulin@igalia.com --- drivers/dma-buf/dma-buf.c | 56 ++++++++++++++++++++++++++++------------------- include/linux/dma-buf.h | 2 ++ 2 files changed, 36 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 8fe5aa67b167..8892bc701a66 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -35,12 +35,35 @@ static inline int is_dma_buf_file(struct file *); -struct dma_buf_list { - struct list_head head; - struct mutex lock; -}; +#if IS_ENABLED(CONFIG_DEBUG_FS) +static DEFINE_MUTEX(debugfs_list_mutex); +static LIST_HEAD(debugfs_list); -static struct dma_buf_list db_list; +static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) +{ + mutex_lock(&debugfs_list_mutex); + list_add(&dmabuf->list_node, &debugfs_list); + mutex_unlock(&debugfs_list_mutex); +} + +static void __dma_buf_debugfs_list_del(struct dma_buf *dmabuf) +{ + if (!dmabuf) + return; + + mutex_lock(&debugfs_list_mutex); + list_del(&dmabuf->list_node); + mutex_unlock(&debugfs_list_mutex); +} +#else +static void __dma_buf_debugfs_list_add(struct dma_buf *dmabuf) +{ +} + +static void __dma_buf_debugfs_list_del(struct file *file) +{ +} +#endif static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen) { @@ -89,17 +112,10 @@ static void dma_buf_release(struct dentry *dentry) static int dma_buf_file_release(struct inode *inode, struct file *file) { - struct dma_buf *dmabuf; - if (!is_dma_buf_file(file)) return -EINVAL; - dmabuf = file->private_data; - if (dmabuf) { - mutex_lock(&db_list.lock); - list_del(&dmabuf->list_node); - mutex_unlock(&db_list.lock); - } + __dma_buf_debugfs_list_del(file->private_data); return 0; } @@ -672,9 +688,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file->f_path.dentry->d_fsdata = dmabuf; dmabuf->file = file; - mutex_lock(&db_list.lock); - list_add(&dmabuf->list_node, &db_list.head); - mutex_unlock(&db_list.lock); + __dma_buf_debugfs_list_add(dmabuf); return dmabuf; @@ -1611,7 +1625,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) size_t size = 0; int ret; - ret = mutex_lock_interruptible(&db_list.lock); + ret = mutex_lock_interruptible(&debugfs_list_mutex); if (ret) return ret; @@ -1620,7 +1634,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n", "size", "flags", "mode", "count", "ino"); - list_for_each_entry(buf_obj, &db_list.head, list_node) { + list_for_each_entry(buf_obj, &debugfs_list, list_node) { ret = dma_resv_lock_interruptible(buf_obj->resv, NULL); if (ret) @@ -1657,11 +1671,11 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused) seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size); - mutex_unlock(&db_list.lock); + mutex_unlock(&debugfs_list_mutex); return 0; error_unlock: - mutex_unlock(&db_list.lock); + mutex_unlock(&debugfs_list_mutex); return ret; } @@ -1718,8 +1732,6 @@ static int __init dma_buf_init(void) if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); - mutex_init(&db_list.lock); - INIT_LIST_HEAD(&db_list.head); dma_buf_init_debugfs(); return 0; } diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8ff4add71f88..36216d28d8bd 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -370,8 +370,10 @@ struct dma_buf { */ struct module *owner; +#if IS_ENABLED(CONFIG_DEBUG_FS) /** @list_node: node for dma_buf accounting and debugging. */ struct list_head list_node; +#endif /** @priv: exporter specific private data for this buffer object. */ void *priv; -- cgit From 80cd613a9ae091dbf52e27a409d58da988ffc8f3 Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Mon, 25 Mar 2024 18:39:08 +0100 Subject: rcu: Mollify sparse with RCU guard When using "guard(rcu)();" sparse will complain, because even though it now understands the cleanup attribute, it doesn't evaluate the calls from it at function exit, and thus doesn't count the context correctly. Given that there's a conditional in the resulting code: static inline void class_rcu_destructor(class_rcu_t *_T) { if (_T->lock) { rcu_read_unlock(); } } it seems that even trying to teach sparse to evalulate the cleanup attribute function it'd still be difficult to really make it understand the full context here. Suppress the sparse warning by just releasing the context in the acquisition part of the function, after all we know it's safe with the guard, that's the whole point of it. Signed-off-by: Johannes Berg Reviewed-by: Boqun Feng Signed-off-by: Paul E. McKenney Signed-off-by: Uladzislau Rezki (Sony) --- include/linux/rcupdate.h | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 382780bb60f4..dfd2399f2cde 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -1090,6 +1090,18 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) extern int rcu_expedited; extern int rcu_normal; -DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock()) +DEFINE_LOCK_GUARD_0(rcu, + do { + rcu_read_lock(); + /* + * sparse doesn't call the cleanup function, + * so just release immediately and don't track + * the context. We don't need to anyway, since + * the whole point of the guard is to not need + * the explicit unlock. + */ + __release(RCU); + } while (0), + rcu_read_unlock()) #endif /* __LINUX_RCUPDATE_H */ -- cgit From a9c3475dd67bd828d999e95d0ba985e7ac4cbbb7 Mon Sep 17 00:00:00 2001 From: Vignesh Balasubramanian Date: Fri, 12 Apr 2024 11:51:39 +0530 Subject: Replace macro "ARCH_HAVE_EXTRA_ELF_NOTES" with kconfig "ARCH_HAVE_EXTRA_ELF_NOTES" enables an extra note section in the core dump. Kconfig variable is preferred over ARCH_HAVE_* macro. Co-developed-by: Jini Susan George Signed-off-by: Jini Susan George Signed-off-by: Vignesh Balasubramanian Acked-by: Michael Ellerman Link: https://lore.kernel.org/r/20240412062138.1132841-2-vigbalas@amd.com Signed-off-by: Kees Cook --- arch/Kconfig | 9 +++++++++ arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/elf.h | 2 -- include/linux/elf.h | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/Kconfig b/arch/Kconfig index 9f066785bb71..143f021c8a76 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -502,6 +502,15 @@ config MMU_LAZY_TLB_SHOOTDOWN config ARCH_HAVE_NMI_SAFE_CMPXCHG bool +config ARCH_HAVE_EXTRA_ELF_NOTES + bool + help + An architecture should select this in order to enable adding an + arch-specific ELF note section to core files. It must provide two + functions: elf_coredump_extra_notes_size() and + elf_coredump_extra_notes_write() which are invoked by the ELF core + dumper. + config ARCH_HAS_NMI_SAFE_THIS_CPU_OPS bool diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1c4be3373686..c45fa9d7fb76 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -156,6 +156,7 @@ config PPC select ARCH_HAS_UACCESS_FLUSHCACHE select ARCH_HAS_UBSAN select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_HAVE_EXTRA_ELF_NOTES if SPU_BASE select ARCH_KEEP_MEMBLOCK select ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE if PPC_RADIX_MMU select ARCH_MIGHT_HAVE_PC_PARPORT diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 79f1c480b5eb..bb4b94444d3e 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -127,8 +127,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, /* Notes used in ET_CORE. Note name is "SPU//". */ #define NT_SPU 1 -#define ARCH_HAVE_EXTRA_ELF_NOTES - #endif /* CONFIG_SPU_BASE */ #ifdef CONFIG_PPC64 diff --git a/include/linux/elf.h b/include/linux/elf.h index c9a46c4e183b..5c402788da19 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h @@ -65,7 +65,7 @@ extern Elf64_Dyn _DYNAMIC []; struct file; struct coredump_params; -#ifndef ARCH_HAVE_EXTRA_ELF_NOTES +#ifndef CONFIG_ARCH_HAVE_EXTRA_ELF_NOTES static inline int elf_coredump_extra_notes_size(void) { return 0; } static inline int elf_coredump_extra_notes_write(struct coredump_params *cprm) { return 0; } #else -- cgit From 4854b463c4b27c94a7de86d16ad84f235f4c1a72 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Thu, 11 Apr 2024 12:22:29 -0700 Subject: net: dql: Avoid calling BUG() when WARN() is enough If the dql_queued() function receives an invalid argument, WARN about it and continue, instead of crashing the kernel. This was raised by checkpatch, when I am refactoring this code (see following patch/commit) WARNING: Do not crash the kernel unless it is absolutely unavoidable--use WARN_ON_ONCE() plus recovery code (if feasible) instead of BUG() or variants Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20240411192241.2498631-2-leitao@debian.org Signed-off-by: Jakub Kicinski --- include/linux/dynamic_queue_limits.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 5693a4be0d9a..ff9c65841ae8 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -91,7 +91,8 @@ static inline void dql_queued(struct dql *dql, unsigned int count) { unsigned long map, now, now_hi, i; - BUG_ON(count > DQL_MAX_OBJECT); + if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) + return; dql->last_obj_cnt = count; -- cgit From cbe481a1b7410b0f2f303e8fd4867ece388a9729 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Thu, 11 Apr 2024 12:22:30 -0700 Subject: net: dql: Separate queue function responsibilities The dql_queued() function currently handles both queuing object counts and populating bitmaps for reporting stalls. This commit splits the bitmap population into a separate function, allowing for conditional invocation in scenarios where the feature is disabled. This refactor maintains functionality while improving code organization. Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20240411192241.2498631-3-leitao@debian.org Signed-off-by: Jakub Kicinski --- include/linux/dynamic_queue_limits.h | 44 ++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index ff9c65841ae8..9980df0b7247 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -83,28 +83,11 @@ struct dql { #define DQL_MAX_OBJECT (UINT_MAX / 16) #define DQL_MAX_LIMIT ((UINT_MAX / 2) - DQL_MAX_OBJECT) -/* - * Record number of objects queued. Assumes that caller has already checked - * availability in the queue with dql_avail. - */ -static inline void dql_queued(struct dql *dql, unsigned int count) +/* Populate the bitmap to be processed later in dql_check_stall() */ +static inline void dql_queue_stall(struct dql *dql) { unsigned long map, now, now_hi, i; - if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) - return; - - dql->last_obj_cnt = count; - - /* We want to force a write first, so that cpu do not attempt - * to get cache line containing last_obj_cnt, num_queued, adj_limit - * in Shared state, but directly does a Request For Ownership - * It is only a hint, we use barrier() only. - */ - barrier(); - - dql->num_queued += count; - now = jiffies; now_hi = now / BITS_PER_LONG; @@ -134,6 +117,29 @@ static inline void dql_queued(struct dql *dql, unsigned int count) WRITE_ONCE(DQL_HIST_ENT(dql, now_hi), map | BIT_MASK(now)); } +/* + * Record number of objects queued. Assumes that caller has already checked + * availability in the queue with dql_avail. + */ +static inline void dql_queued(struct dql *dql, unsigned int count) +{ + if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) + return; + + dql->last_obj_cnt = count; + + /* We want to force a write first, so that cpu do not attempt + * to get cache line containing last_obj_cnt, num_queued, adj_limit + * in Shared state, but directly does a Request For Ownership + * It is only a hint, we use barrier() only. + */ + barrier(); + + dql->num_queued += count; + + dql_queue_stall(dql); +} + /* Returns how many objects can be queued, < 0 indicates over limit. */ static inline int dql_avail(const struct dql *dql) { -- cgit From 721f076b62cb05108565adf17c27875ef5015307 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Thu, 11 Apr 2024 12:22:31 -0700 Subject: net: dql: Optimize stall information population When Dynamic Queue Limit (DQL) is set, it always populate stall information through dql_queue_stall(). However, this information is only necessary if a stall threshold is set, stored in struct dql->stall_thrs. dql_queue_stall() is cheap, but not free, since it does have memory barriers and so forth. Do not call dql_queue_stall() if there is no stall threshold set, and save some CPU cycles. Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20240411192241.2498631-4-leitao@debian.org Signed-off-by: Jakub Kicinski --- include/linux/dynamic_queue_limits.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 9980df0b7247..869afb800ea1 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -137,7 +137,9 @@ static inline void dql_queued(struct dql *dql, unsigned int count) dql->num_queued += count; - dql_queue_stall(dql); + /* Only populate stall information if the threshold is set */ + if (READ_ONCE(dql->stall_thrs)) + dql_queue_stall(dql); } /* Returns how many objects can be queued, < 0 indicates over limit. */ -- cgit From 4ba67ef3a1fbb7d8dc5f00de9b93a583d05b38cc Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Thu, 11 Apr 2024 12:22:32 -0700 Subject: net: dqs: make struct dql more cache efficient With the previous change, struct dqs->stall_thrs will be in the hot path (at queue side), even if DQS is disabled. The other fields accessed in this function (last_obj_cnt and num_queued) are in the first cache line, let's move this field (stall_thrs) to the very first cache line, since there is a hole there. This does not change the structure size, since it moves an short (2 bytes) to 4-bytes whole in the first cache line. This is the new structure format now: struct dql { unsigned int num_queued; unsigned int last_obj_cnt; ... short unsigned int stall_thrs; /* XXX 2 bytes hole, try to pack */ ... /* --- cacheline 1 boundary (64 bytes) --- */ ... /* Longest stall detected, reported to user */ short unsigned int stall_max; /* XXX 2 bytes hole, try to pack */ }; Also, read the stall_thrs (now in the very first cache line) earlier, together with dql->num_queued (also in the first cache line). Suggested-by: Jakub Kicinski Suggested-by: Eric Dumazet Signed-off-by: Breno Leitao Link: https://lore.kernel.org/r/20240411192241.2498631-5-leitao@debian.org Signed-off-by: Jakub Kicinski --- include/linux/dynamic_queue_limits.h | 5 +++-- lib/dynamic_queue_limits.c | 13 +++++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 869afb800ea1..281298e77a15 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -50,6 +50,9 @@ struct dql { unsigned int adj_limit; /* limit + num_completed */ unsigned int last_obj_cnt; /* Count at last queuing */ + /* Stall threshold (in jiffies), defined by user */ + unsigned short stall_thrs; + unsigned long history_head; /* top 58 bits of jiffies */ /* stall entries, a bit per entry */ unsigned long history[DQL_HIST_LEN]; @@ -71,8 +74,6 @@ struct dql { unsigned int min_limit; /* Minimum limit */ unsigned int slack_hold_time; /* Time to measure slack */ - /* Stall threshold (in jiffies), defined by user */ - unsigned short stall_thrs; /* Longest stall detected, reported to user */ unsigned short stall_max; unsigned long last_reap; /* Last reap (in jiffies) */ diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c index a1389db1c30a..e49deddd3de9 100644 --- a/lib/dynamic_queue_limits.c +++ b/lib/dynamic_queue_limits.c @@ -15,12 +15,10 @@ #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0) #define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0) -static void dql_check_stall(struct dql *dql) +static void dql_check_stall(struct dql *dql, unsigned short stall_thrs) { - unsigned short stall_thrs; unsigned long now; - stall_thrs = READ_ONCE(dql->stall_thrs); if (!stall_thrs) return; @@ -86,9 +84,16 @@ void dql_completed(struct dql *dql, unsigned int count) { unsigned int inprogress, prev_inprogress, limit; unsigned int ovlimit, completed, num_queued; + unsigned short stall_thrs; bool all_prev_completed; num_queued = READ_ONCE(dql->num_queued); + /* Read stall_thrs in advance since it belongs to the same (first) + * cache line as ->num_queued. This way, dql_check_stall() does not + * need to touch the first cache line again later, reducing the window + * of possible false sharing. + */ + stall_thrs = READ_ONCE(dql->stall_thrs); /* Can't complete more than what's in queue */ BUG_ON(count > num_queued - dql->num_completed); @@ -178,7 +183,7 @@ void dql_completed(struct dql *dql, unsigned int count) dql->num_completed = completed; dql->prev_num_queued = num_queued; - dql_check_stall(dql); + dql_check_stall(dql, stall_thrs); } EXPORT_SYMBOL(dql_completed); -- cgit From f60d374d2cc88034385265d193a38e3f4a4b430c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 4 Jan 2024 21:35:38 -0500 Subject: close_on_exec(): pass files_struct instead of fdtable both callers are happier that way... Reviewed-by: Christian Brauner Signed-off-by: Al Viro --- fs/file.c | 5 +---- fs/proc/fd.c | 4 +--- include/linux/fdtable.h | 10 +++++----- 3 files changed, 7 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/fs/file.c b/fs/file.c index 3b683b9101d8..eff5ce79f66a 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1219,12 +1219,9 @@ void set_close_on_exec(unsigned int fd, int flag) bool get_close_on_exec(unsigned int fd) { - struct files_struct *files = current->files; - struct fdtable *fdt; bool res; rcu_read_lock(); - fdt = files_fdtable(files); - res = close_on_exec(fd, fdt); + res = close_on_exec(fd, current->files); rcu_read_unlock(); return res; } diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 6e72e5ad42bc..0139aae19651 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -39,10 +39,8 @@ static int seq_show(struct seq_file *m, void *v) spin_lock(&files->file_lock); file = files_lookup_fd_locked(files, fd); if (file) { - struct fdtable *fdt = files_fdtable(files); - f_flags = file->f_flags; - if (close_on_exec(fd, fdt)) + if (close_on_exec(fd, files)) f_flags |= O_CLOEXEC; get_file(file); diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 78c8326d74ae..cc060b20502b 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -33,11 +33,6 @@ struct fdtable { struct rcu_head rcu; }; -static inline bool close_on_exec(unsigned int fd, const struct fdtable *fdt) -{ - return test_bit(fd, fdt->close_on_exec); -} - static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) { return test_bit(fd, fdt->open_fds); @@ -107,6 +102,11 @@ struct file *lookup_fdget_rcu(unsigned int fd); struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd); struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd); +static inline bool close_on_exec(unsigned int fd, const struct files_struct *files) +{ + return test_bit(fd, files_fdtable(files)->close_on_exec); +} + struct task_struct; void put_files_struct(struct files_struct *fs); -- cgit From c4aab26253cd1f302279b8d6b5b66ccf1b120520 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 4 Jan 2024 21:45:47 -0500 Subject: fd_is_open(): move to fs/file.c no users outside that... Reviewed-by: Christian Brauner Signed-off-by: Al Viro --- fs/file.c | 5 +++++ include/linux/fdtable.h | 5 ----- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/fs/file.c b/fs/file.c index eff5ce79f66a..ab38b005633c 100644 --- a/fs/file.c +++ b/fs/file.c @@ -271,6 +271,11 @@ static inline void __clear_open_fd(unsigned int fd, struct fdtable *fdt) __clear_bit(fd / BITS_PER_LONG, fdt->full_fds_bits); } +static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) +{ + return test_bit(fd, fdt->open_fds); +} + static unsigned int count_open_files(struct fdtable *fdt) { unsigned int size = fdt->max_fds; diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index cc060b20502b..2944d4aa413b 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -33,11 +33,6 @@ struct fdtable { struct rcu_head rcu; }; -static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt) -{ - return test_bit(fd, fdt->open_fds); -} - /* * Open file table structure */ -- cgit From af58dc1f50c1946018773beca23ebaad587b9cc9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 20 Jan 2024 06:24:55 -0500 Subject: kernel_file_open(): get rid of inode argument always equal to ->dentry->d_inode of the path argument these days. Reviewed-by: Christian Brauner Signed-off-by: Al Viro --- fs/cachefiles/namei.c | 3 +-- fs/open.c | 5 ++--- fs/overlayfs/util.c | 2 +- include/linux/fs.h | 2 +- 4 files changed, 5 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 7ade836beb58..f53977169db4 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -563,8 +563,7 @@ static bool cachefiles_open_file(struct cachefiles_object *object, */ path.mnt = cache->mnt; path.dentry = dentry; - file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, - d_backing_inode(dentry), cache->cache_cred); + file = kernel_file_open(&path, O_RDWR | O_LARGEFILE | O_DIRECT, cache->cache_cred); if (IS_ERR(file)) { trace_cachefiles_vfs_error(object, d_backing_inode(dentry), PTR_ERR(file), diff --git a/fs/open.c b/fs/open.c index ee8460c83c77..ec287ac67e7f 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1155,7 +1155,6 @@ EXPORT_SYMBOL(dentry_create); * kernel_file_open - open a file for kernel internal use * @path: path of the file to open * @flags: open flags - * @inode: the inode * @cred: credentials for open * * Open a file for use by in-kernel consumers. The file is not accounted @@ -1165,7 +1164,7 @@ EXPORT_SYMBOL(dentry_create); * Return: Opened file on success, an error pointer on failure. */ struct file *kernel_file_open(const struct path *path, int flags, - struct inode *inode, const struct cred *cred) + const struct cred *cred) { struct file *f; int error; @@ -1175,7 +1174,7 @@ struct file *kernel_file_open(const struct path *path, int flags, return f; f->f_path = *path; - error = do_dentry_open(f, inode, NULL); + error = do_dentry_open(f, d_inode(path->dentry), NULL); if (error) { fput(f); f = ERR_PTR(error); diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index d285d1d7baad..edc9216f6e27 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c @@ -1376,7 +1376,7 @@ int ovl_ensure_verity_loaded(struct path *datapath) * If this inode was not yet opened, the verity info hasn't been * loaded yet, so we need to do that here to force it into memory. */ - filp = kernel_file_open(datapath, O_RDONLY, inode, current_cred()); + filp = kernel_file_open(datapath, O_RDONLY, current_cred()); if (IS_ERR(filp)) return PTR_ERR(filp); fput(filp); diff --git a/include/linux/fs.h b/include/linux/fs.h index 00fc429b0af0..143e967a3af2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1906,7 +1906,7 @@ struct file *kernel_tmpfile_open(struct mnt_idmap *idmap, umode_t mode, int open_flag, const struct cred *cred); struct file *kernel_file_open(const struct path *path, int flags, - struct inode *inode, const struct cred *cred); + const struct cred *cred); int vfs_mkobj(struct dentry *, umode_t, int (*f)(struct dentry *, umode_t, void *), -- cgit From 7c98f7cb8fda964fbc60b9307ad35e94735fa35f Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 28 Aug 2023 17:13:18 +0200 Subject: remove call_{read,write}_iter() functions These have no clear purpose. This is effectively a revert of commit bb7462b6fd64 ("vfs: use helpers for calling f_op->{read,write}_iter()"). The patch was created with the help of a coccinelle script. Fixes: bb7462b6fd64 ("vfs: use helpers for calling f_op->{read,write}_iter()") Reviewed-by: Christian Brauner Signed-off-by: Miklos Szeredi Signed-off-by: Al Viro --- drivers/block/loop.c | 4 ++-- drivers/target/target_core_file.c | 4 ++-- fs/aio.c | 4 ++-- fs/read_write.c | 12 ++++++------ fs/splice.c | 4 ++-- include/linux/fs.h | 12 ------------ io_uring/rw.c | 4 ++-- 7 files changed, 16 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 28a95fd366fe..93780f41646b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -445,9 +445,9 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); if (rw == ITER_SOURCE) - ret = call_write_iter(file, &cmd->iocb, &iter); + ret = file->f_op->write_iter(&cmd->iocb, &iter); else - ret = call_read_iter(file, &cmd->iocb, &iter); + ret = file->f_op->read_iter(&cmd->iocb, &iter); lo_rw_aio_do_completion(cmd); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 4d447520bab8..94e6cd4e7e43 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -299,9 +299,9 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, aio_cmd->iocb.ki_flags |= IOCB_DSYNC; if (is_write) - ret = call_write_iter(file, &aio_cmd->iocb, &iter); + ret = file->f_op->write_iter(&aio_cmd->iocb, &iter); else - ret = call_read_iter(file, &aio_cmd->iocb, &iter); + ret = file->f_op->read_iter(&aio_cmd->iocb, &iter); if (ret != -EIOCBQUEUED) cmd_rw_aio_complete(&aio_cmd->iocb, ret); diff --git a/fs/aio.c b/fs/aio.c index 9cdaa2faa536..13ca81c7c744 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1605,7 +1605,7 @@ static int aio_read(struct kiocb *req, const struct iocb *iocb, return ret; ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); if (!ret) - aio_rw_done(req, call_read_iter(file, req, &iter)); + aio_rw_done(req, file->f_op->read_iter(req, &iter)); kfree(iovec); return ret; } @@ -1636,7 +1636,7 @@ static int aio_write(struct kiocb *req, const struct iocb *iocb, if (S_ISREG(file_inode(file)->i_mode)) kiocb_start_write(req); req->ki_flags |= IOCB_WRITE; - aio_rw_done(req, call_write_iter(file, req, &iter)); + aio_rw_done(req, file->f_op->write_iter(req, &iter)); } kfree(iovec); return ret; diff --git a/fs/read_write.c b/fs/read_write.c index d4c036e82b6c..2de7f6adb33d 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -392,7 +392,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo kiocb.ki_pos = (ppos ? *ppos : 0); iov_iter_ubuf(&iter, ITER_DEST, buf, len); - ret = call_read_iter(filp, &kiocb, &iter); + ret = filp->f_op->read_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ppos) *ppos = kiocb.ki_pos; @@ -494,7 +494,7 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t kiocb.ki_pos = (ppos ? *ppos : 0); iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)buf, len); - ret = call_write_iter(filp, &kiocb, &iter); + ret = filp->f_op->write_iter(&kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0 && ppos) *ppos = kiocb.ki_pos; @@ -736,9 +736,9 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, kiocb.ki_pos = (ppos ? *ppos : 0); if (type == READ) - ret = call_read_iter(filp, &kiocb, iter); + ret = filp->f_op->read_iter(&kiocb, iter); else - ret = call_write_iter(filp, &kiocb, iter); + ret = filp->f_op->write_iter(&kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ppos) *ppos = kiocb.ki_pos; @@ -799,7 +799,7 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, if (ret < 0) return ret; - ret = call_read_iter(file, iocb, iter); + ret = file->f_op->read_iter(iocb, iter); out: if (ret >= 0) fsnotify_access(file); @@ -860,7 +860,7 @@ ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, return ret; kiocb_start_write(iocb); - ret = call_write_iter(file, iocb, iter); + ret = file->f_op->write_iter(iocb, iter); if (ret != -EIOCBQUEUED) kiocb_end_write(iocb); if (ret > 0) diff --git a/fs/splice.c b/fs/splice.c index 218e24b1ac40..60aed8de21f8 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -362,7 +362,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos, iov_iter_bvec(&to, ITER_DEST, bv, npages, len); init_sync_kiocb(&kiocb, in); kiocb.ki_pos = *ppos; - ret = call_read_iter(in, &kiocb, &to); + ret = in->f_op->read_iter(&kiocb, &to); if (ret > 0) { keep = DIV_ROUND_UP(ret, PAGE_SIZE); @@ -740,7 +740,7 @@ iter_file_splice_write(struct pipe_inode_info *pipe, struct file *out, iov_iter_bvec(&from, ITER_SOURCE, array, n, sd.total_len - left); init_sync_kiocb(&kiocb, out); kiocb.ki_pos = sd.pos; - ret = call_write_iter(out, &kiocb, &from); + ret = out->f_op->write_iter(&kiocb, &from); sd.pos = kiocb.ki_pos; if (ret <= 0) break; diff --git a/include/linux/fs.h b/include/linux/fs.h index 143e967a3af2..a94343f567cb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2096,18 +2096,6 @@ struct inode_operations { struct offset_ctx *(*get_offset_ctx)(struct inode *inode); } ____cacheline_aligned; -static inline ssize_t call_read_iter(struct file *file, struct kiocb *kio, - struct iov_iter *iter) -{ - return file->f_op->read_iter(kio, iter); -} - -static inline ssize_t call_write_iter(struct file *file, struct kiocb *kio, - struct iov_iter *iter) -{ - return file->f_op->write_iter(kio, iter); -} - static inline int call_mmap(struct file *file, struct vm_area_struct *vma) { return file->f_op->mmap(file, vma); diff --git a/io_uring/rw.c b/io_uring/rw.c index 0585ebcc9773..7d335b7e00ed 100644 --- a/io_uring/rw.c +++ b/io_uring/rw.c @@ -701,7 +701,7 @@ static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter) struct file *file = rw->kiocb.ki_filp; if (likely(file->f_op->read_iter)) - return call_read_iter(file, &rw->kiocb, iter); + return file->f_op->read_iter(&rw->kiocb, iter); else if (file->f_op->read) return loop_rw_iter(READ, rw, iter); else @@ -1047,7 +1047,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags) kiocb->ki_flags |= IOCB_WRITE; if (likely(req->file->f_op->write_iter)) - ret2 = call_write_iter(req->file, kiocb, &s->iter); + ret2 = req->file->f_op->write_iter(kiocb, &s->iter); else if (req->file->f_op->write) ret2 = loop_rw_iter(WRITE, rw, &s->iter); else -- cgit From 5b9932f6001c70b984e8c9c2fe09e443beb4baba Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 15 Apr 2024 14:54:13 -0700 Subject: vfs: export remap and write check helpers Export these functions so that the next patch can use them to check the file ranges being passed to the XFS_IOC_EXCHANGE_RANGE operation. Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Darrick J. Wong Reviewed-by: Christoph Hellwig --- fs/read_write.c | 1 + fs/remap_range.c | 4 ++-- include/linux/fs.h | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/read_write.c b/fs/read_write.c index d4c036e82b6c..85c096f2c0d0 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -1667,6 +1667,7 @@ int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count) return 0; } +EXPORT_SYMBOL_GPL(generic_write_check_limits); /* Like generic_write_checks(), but takes size of write instead of iter. */ int generic_write_checks_count(struct kiocb *iocb, loff_t *count) diff --git a/fs/remap_range.c b/fs/remap_range.c index de07f978ce3e..28246dfc8485 100644 --- a/fs/remap_range.c +++ b/fs/remap_range.c @@ -99,8 +99,7 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in, return 0; } -static int remap_verify_area(struct file *file, loff_t pos, loff_t len, - bool write) +int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write) { int mask = write ? MAY_WRITE : MAY_READ; loff_t tmp; @@ -118,6 +117,7 @@ static int remap_verify_area(struct file *file, loff_t pos, loff_t len, return fsnotify_file_area_perm(file, mask, &pos, len); } +EXPORT_SYMBOL_GPL(remap_verify_area); /* * Ensure that we don't remap a partial EOF block in the middle of something diff --git a/include/linux/fs.h b/include/linux/fs.h index 8dfd53b52744..0835faeebe7b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2119,6 +2119,7 @@ extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); extern ssize_t vfs_copy_file_range(struct file *, loff_t , struct file *, loff_t, size_t, unsigned int); +int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write); int __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags, -- cgit From 075b7cd7ad7dec8651a6a6654fa5ebae436ac00f Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 14 Mar 2024 11:28:36 +0530 Subject: coresight: Add helpers registering/removing both AMBA and platform drivers This adds two different helpers i.e coresight_init_driver()/remove_driver() enabling coresight devices to register or remove AMBA and platform drivers. This changes replicator and funnel devices to use above new helpers. Cc: Suzuki K Poulose Cc: Mike Leach Cc: James Clark Cc: Leo Yan Cc: Alexander Shishkin Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Cc: coresight@lists.linaro.org Reviewed-by: James Clark Signed-off-by: Anshuman Khandual Signed-off-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20240314055843.2625883-5-anshuman.khandual@arm.com --- drivers/hwtracing/coresight/coresight-core.c | 29 ++++++++++++++++++++++ drivers/hwtracing/coresight/coresight-funnel.c | 19 ++------------ drivers/hwtracing/coresight/coresight-replicator.c | 20 +++------------ include/linux/coresight.h | 7 ++++++ 4 files changed, 41 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index b83613e34289..9fc6f6b863e0 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1398,6 +1398,35 @@ static void __exit coresight_exit(void) module_init(coresight_init); module_exit(coresight_exit); +int coresight_init_driver(const char *drv, struct amba_driver *amba_drv, + struct platform_driver *pdev_drv) +{ + int ret; + + ret = amba_driver_register(amba_drv); + if (ret) { + pr_err("%s: error registering AMBA driver\n", drv); + return ret; + } + + ret = platform_driver_register(pdev_drv); + if (!ret) + return 0; + + pr_err("%s: error registering platform driver\n", drv); + amba_driver_unregister(amba_drv); + return ret; +} +EXPORT_SYMBOL_GPL(coresight_init_driver); + +void coresight_remove_driver(struct amba_driver *amba_drv, + struct platform_driver *pdev_drv) +{ + amba_driver_unregister(amba_drv); + platform_driver_unregister(pdev_drv); +} +EXPORT_SYMBOL_GPL(coresight_remove_driver); + MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Pratik Patel "); MODULE_AUTHOR("Mathieu Poirier "); diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index ef1a0abfee4e..ff3ea0670a5b 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -410,27 +410,12 @@ static struct amba_driver dynamic_funnel_driver = { static int __init funnel_init(void) { - int ret; - - ret = platform_driver_register(&static_funnel_driver); - if (ret) { - pr_info("Error registering platform driver\n"); - return ret; - } - - ret = amba_driver_register(&dynamic_funnel_driver); - if (ret) { - pr_info("Error registering amba driver\n"); - platform_driver_unregister(&static_funnel_driver); - } - - return ret; + return coresight_init_driver("funnel", &dynamic_funnel_driver, &static_funnel_driver); } static void __exit funnel_exit(void) { - platform_driver_unregister(&static_funnel_driver); - amba_driver_unregister(&dynamic_funnel_driver); + coresight_remove_driver(&dynamic_funnel_driver, &static_funnel_driver); } module_init(funnel_init); diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index 73452d9dc13b..ddb530a8436f 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -416,27 +416,13 @@ static struct amba_driver dynamic_replicator_driver = { static int __init replicator_init(void) { - int ret; - - ret = platform_driver_register(&static_replicator_driver); - if (ret) { - pr_info("Error registering platform driver\n"); - return ret; - } - - ret = amba_driver_register(&dynamic_replicator_driver); - if (ret) { - pr_info("Error registering amba driver\n"); - platform_driver_unregister(&static_replicator_driver); - } - - return ret; + return coresight_init_driver("replicator", &dynamic_replicator_driver, + &static_replicator_driver); } static void __exit replicator_exit(void) { - platform_driver_unregister(&static_replicator_driver); - amba_driver_unregister(&dynamic_replicator_driver); + coresight_remove_driver(&dynamic_replicator_driver, &static_replicator_driver); } module_init(replicator_init); diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 5f288d475490..653f1712eb77 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -12,6 +12,8 @@ #include #include #include +#include +#include /* Peripheral id registers (0xFD0-0xFEC) */ #define CORESIGHT_PERIPHIDR4 0xfd0 @@ -658,4 +660,9 @@ coresight_find_output_type(struct coresight_platform_data *pdata, enum coresight_dev_type type, union coresight_dev_subtype subtype); +int coresight_init_driver(const char *drv, struct amba_driver *amba_drv, + struct platform_driver *pdev_drv); + +void coresight_remove_driver(struct amba_driver *amba_drv, + struct platform_driver *pdev_drv); #endif /* _LINUX_COREISGHT_H */ -- cgit From 9d50f95bc0d5df56f2591b950a251d90bffad094 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Tue, 16 Apr 2024 11:09:01 +0100 Subject: gpio: swnode: Add ability to specify native chip selects for SPI SPI devices can specify a cs-gpios property to enumerate their chip selects. Under device tree, a zero entry in this property can be used to specify that a particular chip select is using the SPI controllers native chip select, for example: cs-gpios = <&gpio1 0 0>, <0>; Here, the second chip select is native. However, when using swnodes there is currently no way to specify a native chip select. The proposal here is to register a swnode_gpio_undefined software node, that can be specified to allow the indication of a native chip select. For example: static const struct software_node_ref_args device_cs_refs[] = { { .node = &device_gpiochip_swnode, .nargs = 2, .args = { 0, GPIO_ACTIVE_LOW }, }, { .node = &swnode_gpio_undefined, .nargs = 0, }, }; Register the swnode as the gpiolib is initialised and check in swnode_get_gpio_device() if the returned node matches swnode_gpio_undefined and return -ENOENT, which matches the behaviour of the device tree system when it encounters a 0 phandle. Reviewed-by: Linus Walleij Reviewed-by: Andy Shevchenko Signed-off-by: Charles Keepax Link: https://lore.kernel.org/r/20240416100904.3738093-2-ckeepax@opensource.cirrus.com Signed-off-by: Mark Brown --- drivers/gpio/Kconfig | 9 +++++++++ drivers/gpio/gpiolib-swnode.c | 44 +++++++++++++++++++++++++++++++++++++++++++ include/linux/gpio/property.h | 4 ++++ 3 files changed, 57 insertions(+) (limited to 'include/linux') diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b50d0b470849..00b5c007a2bb 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -103,6 +103,15 @@ config GPIO_REGMAP select REGMAP tristate +config GPIO_SWNODE_UNDEFINED + bool + help + This adds a special place holder for software nodes to contain an + undefined GPIO reference, this is primarily used by SPI to allow a + list of GPIO chip selects to mark a certain chip select as being + controlled the SPI device's internal chip select mechanism and not + a GPIO. + # put drivers in the right section, in alphabetical order # This symbol is selected by both I2C and SPI expanders diff --git a/drivers/gpio/gpiolib-swnode.c b/drivers/gpio/gpiolib-swnode.c index fa52bdb1a29a..cec1ab878af8 100644 --- a/drivers/gpio/gpiolib-swnode.c +++ b/drivers/gpio/gpiolib-swnode.c @@ -4,8 +4,13 @@ * * Copyright 2022 Google LLC */ + +#define pr_fmt(fmt) "gpiolib: swnode: " fmt + #include #include +#include +#include #include #include #include @@ -17,6 +22,8 @@ #include "gpiolib.h" #include "gpiolib-swnode.h" +#define GPIOLIB_SWNODE_UNDEFINED_NAME "swnode-gpio-undefined" + static void swnode_format_propname(const char *con_id, char *propname, size_t max_size) { @@ -40,6 +47,14 @@ static struct gpio_device *swnode_get_gpio_device(struct fwnode_handle *fwnode) if (!gdev_node || !gdev_node->name) return ERR_PTR(-EINVAL); + /* + * Check for a special node that identifies undefined GPIOs, this is + * primarily used as a key for internal chip selects in SPI bindings. + */ + if (IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED) && + !strcmp(gdev_node->name, GPIOLIB_SWNODE_UNDEFINED_NAME)) + return ERR_PTR(-ENOENT); + gdev = gpio_device_find_by_label(gdev_node->name); return gdev ?: ERR_PTR(-EPROBE_DEFER); } @@ -121,3 +136,32 @@ int swnode_gpio_count(const struct fwnode_handle *fwnode, const char *con_id) return count ?: -ENOENT; } + +#if IS_ENABLED(CONFIG_GPIO_SWNODE_UNDEFINED) +/* + * A special node that identifies undefined GPIOs, this is primarily used as + * a key for internal chip selects in SPI bindings. + */ +const struct software_node swnode_gpio_undefined = { + .name = GPIOLIB_SWNODE_UNDEFINED_NAME, +}; +EXPORT_SYMBOL_NS_GPL(swnode_gpio_undefined, GPIO_SWNODE); + +static int __init swnode_gpio_init(void) +{ + int ret; + + ret = software_node_register(&swnode_gpio_undefined); + if (ret < 0) + pr_err("failed to register swnode: %d\n", ret); + + return ret; +} +subsys_initcall(swnode_gpio_init); + +static void __exit swnode_gpio_cleanup(void) +{ + software_node_unregister(&swnode_gpio_undefined); +} +__exitcall(swnode_gpio_cleanup); +#endif diff --git a/include/linux/gpio/property.h b/include/linux/gpio/property.h index 6c75c8bd44a0..832a60c2e0b9 100644 --- a/include/linux/gpio/property.h +++ b/include/linux/gpio/property.h @@ -5,7 +5,11 @@ #include /* for GPIO_* flags */ #include +struct software_node; + #define PROPERTY_ENTRY_GPIO(_name_, _chip_node_, _idx_, _flags_) \ PROPERTY_ENTRY_REF(_name_, _chip_node_, _idx_, _flags_) +extern const struct software_node swnode_gpio_undefined; + #endif /* __LINUX_GPIO_PROPERTY_H */ -- cgit From fc29e04ae1ad4c99422c0b8ae4b43cfe99c70429 Mon Sep 17 00:00:00 2001 From: Jesper Dangaard Brouer Date: Tue, 16 Apr 2024 19:51:26 +0200 Subject: cgroup/rstat: add cgroup_rstat_lock helpers and tracepoints This commit enhances the ability to troubleshoot the global cgroup_rstat_lock by introducing wrapper helper functions for the lock along with associated tracepoints. Although global, the cgroup_rstat_lock helper APIs and tracepoints take arguments such as cgroup pointer and cpu_in_loop variable. This adjustment is made because flushing occurs per cgroup despite the lock being global. Hence, when troubleshooting, it's important to identify the relevant cgroup. The cpu_in_loop variable is necessary because the global lock may be released within the main flushing loop that traverses CPUs. In the tracepoints, the cpu_in_loop value is set to -1 when acquiring the main lock; otherwise, it denotes the CPU number processed last. The new feature in this patchset is detecting when lock is contended. The tracepoints are implemented with production in mind. For minimum overhead attach to cgroup:cgroup_rstat_lock_contended, which only gets activated when trylock detects lock is contended. A quick production check for issues could be done via this perf commands: perf record -g -e cgroup:cgroup_rstat_lock_contended Next natural question would be asking how long time do lock contenders wait for obtaining the lock. This can be answered by measuring the time between cgroup:cgroup_rstat_lock_contended and cgroup:cgroup_rstat_locked when args->contended is set. Like this bpftrace script: bpftrace -e ' tracepoint:cgroup:cgroup_rstat_lock_contended {@start[tid]=nsecs} tracepoint:cgroup:cgroup_rstat_locked { if (args->contended) { @wait_ns=hist(nsecs-@start[tid]); delete(@start[tid]);}} interval:s:1 {time("%H:%M:%S "); print(@wait_ns); }' Extending with time spend holding the lock will be more expensive as this also looks at all the non-contended cases. Like this bpftrace script: bpftrace -e ' tracepoint:cgroup:cgroup_rstat_lock_contended {@start[tid]=nsecs} tracepoint:cgroup:cgroup_rstat_locked { @locked[tid]=nsecs; if (args->contended) { @wait_ns=hist(nsecs-@start[tid]); delete(@start[tid]);}} tracepoint:cgroup:cgroup_rstat_unlock { @locked_ns=hist(nsecs-@locked[tid]); delete(@locked[tid]);} interval:s:1 {time("%H:%M:%S "); print(@wait_ns);print(@locked_ns); }' Signed-off-by: Jesper Dangaard Brouer Signed-off-by: Tejun Heo --- include/linux/cgroup.h | 2 +- include/trace/events/cgroup.h | 48 +++++++++++++++++++++++++++++++++++++++++++ kernel/cgroup/rstat.c | 47 ++++++++++++++++++++++++++++++++++-------- 3 files changed, 88 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 34aaf0e87def..2150ca60394b 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -690,7 +690,7 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) void cgroup_rstat_updated(struct cgroup *cgrp, int cpu); void cgroup_rstat_flush(struct cgroup *cgrp); void cgroup_rstat_flush_hold(struct cgroup *cgrp); -void cgroup_rstat_flush_release(void); +void cgroup_rstat_flush_release(struct cgroup *cgrp); /* * Basic resource stats. diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h index dd7d7c9efecd..13f375800135 100644 --- a/include/trace/events/cgroup.h +++ b/include/trace/events/cgroup.h @@ -204,6 +204,54 @@ DEFINE_EVENT(cgroup_event, cgroup_notify_frozen, TP_ARGS(cgrp, path, val) ); +DECLARE_EVENT_CLASS(cgroup_rstat, + + TP_PROTO(struct cgroup *cgrp, int cpu_in_loop, bool contended), + + TP_ARGS(cgrp, cpu_in_loop, contended), + + TP_STRUCT__entry( + __field( int, root ) + __field( int, level ) + __field( u64, id ) + __field( int, cpu_in_loop ) + __field( bool, contended ) + ), + + TP_fast_assign( + __entry->root = cgrp->root->hierarchy_id; + __entry->id = cgroup_id(cgrp); + __entry->level = cgrp->level; + __entry->cpu_in_loop = cpu_in_loop; + __entry->contended = contended; + ), + + TP_printk("root=%d id=%llu level=%d cpu_in_loop=%d lock contended:%d", + __entry->root, __entry->id, __entry->level, + __entry->cpu_in_loop, __entry->contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_lock_contended, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_locked, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + +DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock, + + TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), + + TP_ARGS(cgrp, cpu, contended) +); + #endif /* _TRACE_CGROUP_H */ /* This part must be outside protection */ diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 07e2284bb499..ff68c904e647 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -7,6 +7,8 @@ #include #include +#include + static DEFINE_SPINLOCK(cgroup_rstat_lock); static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); @@ -222,6 +224,35 @@ __weak noinline void bpf_rstat_flush(struct cgroup *cgrp, __bpf_hook_end(); +/* + * Helper functions for locking cgroup_rstat_lock. + * + * This makes it easier to diagnose locking issues and contention in + * production environments. The parameter @cpu_in_loop indicate lock + * was released and re-taken when collection data from the CPUs. The + * value -1 is used when obtaining the main lock else this is the CPU + * number processed last. + */ +static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop) + __acquires(&cgroup_rstat_lock) +{ + bool contended; + + contended = !spin_trylock_irq(&cgroup_rstat_lock); + if (contended) { + trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended); + spin_lock_irq(&cgroup_rstat_lock); + } + trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended); +} + +static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop) + __releases(&cgroup_rstat_lock) +{ + trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false); + spin_unlock_irq(&cgroup_rstat_lock); +} + /* see cgroup_rstat_flush() */ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock) @@ -248,10 +279,10 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp) /* play nice and yield if necessary */ if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) { - spin_unlock_irq(&cgroup_rstat_lock); + __cgroup_rstat_unlock(cgrp, cpu); if (!cond_resched()) cpu_relax(); - spin_lock_irq(&cgroup_rstat_lock); + __cgroup_rstat_lock(cgrp, cpu); } } } @@ -273,9 +304,9 @@ __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp) { might_sleep(); - spin_lock_irq(&cgroup_rstat_lock); + __cgroup_rstat_lock(cgrp, -1); cgroup_rstat_flush_locked(cgrp); - spin_unlock_irq(&cgroup_rstat_lock); + __cgroup_rstat_unlock(cgrp, -1); } /** @@ -291,17 +322,17 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp) __acquires(&cgroup_rstat_lock) { might_sleep(); - spin_lock_irq(&cgroup_rstat_lock); + __cgroup_rstat_lock(cgrp, -1); cgroup_rstat_flush_locked(cgrp); } /** * cgroup_rstat_flush_release - release cgroup_rstat_flush_hold() */ -void cgroup_rstat_flush_release(void) +void cgroup_rstat_flush_release(struct cgroup *cgrp) __releases(&cgroup_rstat_lock) { - spin_unlock_irq(&cgroup_rstat_lock); + __cgroup_rstat_unlock(cgrp, -1); } int cgroup_rstat_init(struct cgroup *cgrp) @@ -533,7 +564,7 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE forceidle_time = cgrp->bstat.forceidle_sum; #endif - cgroup_rstat_flush_release(); + cgroup_rstat_flush_release(cgrp); } else { root_cgroup_cputime(&bstat); usage = bstat.cputime.sum_exec_runtime; -- cgit From b010bf72da5ada39f4e722fa28d4275df8673229 Mon Sep 17 00:00:00 2001 From: "Kory Maincent (Dent Project)" Date: Sun, 14 Apr 2024 16:21:52 +0200 Subject: net: pse-pd: Rectify and adapt the naming of admin_cotrol member of struct pse_control_config In commit 18ff0bcda6d1 ("ethtool: add interface to interact with Ethernet Power Equipment"), the 'pse_control_config' structure was introduced, housing a single member labeled 'admin_cotrol' responsible for maintaining the operational state of the PoDL PSE functions. A noticeable typographical error exists in the naming of this field ('cotrol' should be corrected to 'control'), which this commit aims to rectify. Furthermore, with upcoming extensions of this structure to encompass PoE functionalities, the field is being renamed to 'podl_admin_state' to distinctly indicate that this state is tailored specifically for PoDL." Reviewed-by: Oleksij Rempel Reviewed-by: Andrew Lunn Signed-off-by: Kory Maincent Link: https://lore.kernel.org/r/20240414-feature_poe-v8-3-e4bf1e860da5@bootlin.com Signed-off-by: Jakub Kicinski --- drivers/net/pse-pd/pse_regulator.c | 8 ++++---- include/linux/pse-pd/pse.h | 4 ++-- net/ethtool/pse-pd.c | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c index e2bf8306ca90..1dedf4de296e 100644 --- a/drivers/net/pse-pd/pse_regulator.c +++ b/drivers/net/pse-pd/pse_regulator.c @@ -31,10 +31,10 @@ pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id, struct pse_reg_priv *priv = to_pse_reg(pcdev); int ret; - if (priv->admin_state == config->admin_cotrol) + if (priv->admin_state == config->podl_admin_control) return 0; - switch (config->admin_cotrol) { + switch (config->podl_admin_control) { case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: ret = regulator_enable(priv->ps); break; @@ -43,14 +43,14 @@ pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id, break; default: dev_err(pcdev->dev, "Unknown admin state %i\n", - config->admin_cotrol); + config->podl_admin_control); ret = -ENOTSUPP; } if (ret) return ret; - priv->admin_state = config->admin_cotrol; + priv->admin_state = config->podl_admin_control; return 0; } diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index fb724c65c77b..199cf4ae3cf2 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -15,11 +15,11 @@ struct pse_controller_dev; /** * struct pse_control_config - PSE control/channel configuration. * - * @admin_cotrol: set PoDL PSE admin control as described in + * @podl_admin_control: set PoDL PSE admin control as described in * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl */ struct pse_control_config { - enum ethtool_podl_pse_admin_state admin_cotrol; + enum ethtool_podl_pse_admin_state podl_admin_control; }; /** diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c index cc478af77111..aef57a058f0d 100644 --- a/net/ethtool/pse-pd.c +++ b/net/ethtool/pse-pd.c @@ -130,7 +130,7 @@ ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) struct phy_device *phydev; /* this values are already validated by the ethnl_pse_set_policy */ - config.admin_cotrol = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); + config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); phydev = dev->phydev; if (!phydev) { -- cgit From 2a49b45cd0e7e8c9a0cd5e2f3993b558469ed744 Mon Sep 17 00:00:00 2001 From: Guanbing Huang Date: Tue, 16 Apr 2024 11:16:18 +0800 Subject: PNP: Add dev_is_pnp() macro Add dev_is_pnp() macro to determine whether the device is a PNP device. Signed-off-by: Guanbing Huang Suggested-by: Andy Shevchenko Reviewed-by: Bing Fan Tested-by: Linheng Du Reviewed-by: Andy Shevchenko Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/4e68f5557ad53b671ca8103e572163eca52a8f29.1713234515.git.albanhuang@tencent.com Signed-off-by: Greg Kroah-Hartman --- include/linux/pnp.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/pnp.h b/include/linux/pnp.h index ddbe7c3ca4ce..82561242cda4 100644 --- a/include/linux/pnp.h +++ b/include/linux/pnp.h @@ -469,6 +469,8 @@ int compare_pnp_id(struct pnp_id *pos, const char *id); int pnp_register_driver(struct pnp_driver *drv); void pnp_unregister_driver(struct pnp_driver *drv); +#define dev_is_pnp(d) ((d)->bus == &pnp_bus_type) + #else /* device management */ @@ -500,6 +502,8 @@ static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -E static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; } static inline void pnp_unregister_driver(struct pnp_driver *drv) { } +#define dev_is_pnp(d) false + #endif /* CONFIG_PNP */ /** -- cgit From 37f36cab53c9e903823b327f964227563766071a Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Wed, 10 Apr 2024 17:09:44 +0200 Subject: sched/vtime: Remove confusing arch_vtime_task_switch() declaration Callback arch_vtime_task_switch() is only defined when CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is selected. Yet, the function prototype forward declaration is present for CONFIG_VIRT_CPU_ACCOUNTING_GEN variant. Remove it. Signed-off-by: Alexander Gordeev Signed-off-by: Ingo Molnar Reviewed-by: Frederic Weisbecker Reviewed-by: Nicholas Piggin Link: https://lore.kernel.org/r/783d7c611864f82b0fb9edf01890b9396f3a549a.1712760275.git.agordeev@linux.ibm.com --- include/linux/vtime.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 3684487d01e1..593466ceebed 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -18,7 +18,6 @@ extern void vtime_account_idle(struct task_struct *tsk); #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */ #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -extern void arch_vtime_task_switch(struct task_struct *tsk); extern void vtime_user_enter(struct task_struct *tsk); extern void vtime_user_exit(struct task_struct *tsk); extern void vtime_guest_enter(struct task_struct *tsk); -- cgit From 08a36a48544d73bf153960245aec6c5fa23960de Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Wed, 10 Apr 2024 17:09:48 +0200 Subject: sched/vtime: Do not include header There is no architecture-specific code or data left that generic needs to know about. Thus, avoid the inclusion of header. Signed-off-by: Alexander Gordeev Signed-off-by: Ingo Molnar Reviewed-by: Frederic Weisbecker Acked-by: Nicholas Piggin Link: https://lore.kernel.org/r/f7cd245668b9ae61a55184871aec494ec9199c4a.1712760275.git.agordeev@linux.ibm.com --- arch/powerpc/include/asm/Kbuild | 1 - include/asm-generic/vtime.h | 1 - include/linux/vtime.h | 4 ---- 3 files changed, 6 deletions(-) delete mode 100644 include/asm-generic/vtime.h (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 61a8d5555cd7..e5fdc336c9b2 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -6,5 +6,4 @@ generic-y += agp.h generic-y += kvm_types.h generic-y += mcs_spinlock.h generic-y += qrwlock.h -generic-y += vtime.h generic-y += early_ioremap.h diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h deleted file mode 100644 index b1a49677fe25..000000000000 --- a/include/asm-generic/vtime.h +++ /dev/null @@ -1 +0,0 @@ -/* no content, but patch(1) dislikes empty files */ diff --git a/include/linux/vtime.h b/include/linux/vtime.h index 593466ceebed..29dd5b91dd7d 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h @@ -5,10 +5,6 @@ #include #include -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE -#include -#endif - /* * Common vtime APIs */ -- cgit From 5a1a25be995e1014abd01600479915683e356f5c Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 15 Apr 2024 11:20:55 -0400 Subject: libfs: Add simple_offset_rename() API I'm about to fix a tmpfs rename bug that requires the use of internal simple_offset helpers that are not available in mm/shmem.c Signed-off-by: Chuck Lever Link: https://lore.kernel.org/r/20240415152057.4605-3-cel@kernel.org Signed-off-by: Christian Brauner --- fs/libfs.c | 21 +++++++++++++++++++++ include/linux/fs.h | 2 ++ mm/shmem.c | 3 +-- 3 files changed, 24 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/libfs.c b/fs/libfs.c index ab61fae92cde..c392a6edd393 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -357,6 +357,27 @@ int simple_offset_empty(struct dentry *dentry) return ret; } +/** + * simple_offset_rename - handle directory offsets for rename + * @old_dir: parent directory of source entry + * @old_dentry: dentry of source entry + * @new_dir: parent_directory of destination entry + * @new_dentry: dentry of destination + * + * Caller provides appropriate serialization. + * + * Returns zero on success, a negative errno value on failure. + */ +int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + struct offset_ctx *old_ctx = old_dir->i_op->get_offset_ctx(old_dir); + struct offset_ctx *new_ctx = new_dir->i_op->get_offset_ctx(new_dir); + + simple_offset_remove(old_ctx, old_dentry); + return simple_offset_add(new_ctx, old_dentry); +} + /** * simple_offset_rename_exchange - exchange rename with directory offsets * @old_dir: parent of dentry being moved diff --git a/include/linux/fs.h b/include/linux/fs.h index 00fc429b0af0..82c707e40fea 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3338,6 +3338,8 @@ void simple_offset_init(struct offset_ctx *octx); int simple_offset_add(struct offset_ctx *octx, struct dentry *dentry); void simple_offset_remove(struct offset_ctx *octx, struct dentry *dentry); int simple_offset_empty(struct dentry *dentry); +int simple_offset_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); int simple_offset_rename_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, diff --git a/mm/shmem.c b/mm/shmem.c index 0aad0d9a621b..c0fb65223963 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3473,8 +3473,7 @@ static int shmem_rename2(struct mnt_idmap *idmap, return error; } - simple_offset_remove(shmem_get_offset_ctx(old_dir), old_dentry); - error = simple_offset_add(shmem_get_offset_ctx(new_dir), old_dentry); + error = simple_offset_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; -- cgit From 795bb82d12a16a4cee42845b0e4c7e3276574e5d Mon Sep 17 00:00:00 2001 From: Amir Goldstein Date: Tue, 16 Apr 2024 21:14:52 +0300 Subject: fsnotify: fix UAF from FS_ERROR event on a shutting down filesystem Protect against use after free when filesystem calls fsnotify_sb_error() during fs shutdown. Move freeing of sb->s_fsnotify_info to destroy_super_work(), because it may be accessed from fs shutdown context. Reported-by: syzbot+5e3f9b2a67b45f16d4e6@syzkaller.appspotmail.com Suggested-by: Jan Kara Link: https://lore.kernel.org/linux-fsdevel/20240416173211.4lnmgctyo4jn5fha@quack3/ Fixes: 07a3b8d0bf72 ("fsnotify: lazy attach fsnotify_sb_info state to sb") Reviewed-by: Christian Brauner Signed-off-by: Amir Goldstein Signed-off-by: Jan Kara Message-Id: <20240416181452.567070-1-amir73il@gmail.com> --- fs/notify/fsnotify.c | 6 +++++- fs/super.c | 1 + include/linux/fsnotify_backend.h | 4 ++++ 3 files changed, 10 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 2ae965ef37e8..ff69ae24c4e8 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -103,7 +103,11 @@ void fsnotify_sb_delete(struct super_block *sb) WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_CONTENT)); WARN_ON(fsnotify_sb_has_priority_watchers(sb, FSNOTIFY_PRIO_PRE_CONTENT)); - kfree(sbinfo); +} + +void fsnotify_sb_free(struct super_block *sb) +{ + kfree(sb->s_fsnotify_info); } /* diff --git a/fs/super.c b/fs/super.c index 71d9779c42b1..81656c7807db 100644 --- a/fs/super.c +++ b/fs/super.c @@ -274,6 +274,7 @@ static void destroy_super_work(struct work_struct *work) { struct super_block *s = container_of(work, struct super_block, destroy_work); + fsnotify_sb_free(s); security_sb_free(s); put_user_ns(s->s_user_ns); kfree(s->s_subtype); diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 7f1ab8264e41..4dd6143db271 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h @@ -576,6 +576,7 @@ extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); +extern void fsnotify_sb_free(struct super_block *sb); extern u32 fsnotify_get_cookie(void); static inline __u32 fsnotify_parent_needed_mask(__u32 mask) @@ -880,6 +881,9 @@ static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) static inline void fsnotify_sb_delete(struct super_block *sb) {} +static inline void fsnotify_sb_free(struct super_block *sb) +{} + static inline void fsnotify_update_flags(struct dentry *dentry) {} -- cgit From b85a3c1b7978f942fa5bf8cfe22b6a6aaa49d3b7 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:04 +0900 Subject: block: Introduce bio_straddles_zones() and bio_offset_from_zone_start() Implement the inline helper functions bio_straddles_zones() and bio_offset_from_zone_start() to respectively test if a BIO crosses a zone boundary (the start sector and last sector belong to different zones) and to obtain the offset of a BIO from the start sector of its target zone. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240408014128.205141-5-dlemoal@kernel.org Signed-off-by: Jens Axboe --- include/linux/blkdev.h | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c3e8f7cf96be..ec7bd7091467 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -853,6 +853,13 @@ static inline unsigned int bio_zone_no(struct bio *bio) return disk_zone_no(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); } +static inline bool bio_straddles_zones(struct bio *bio) +{ + return bio_sectors(bio) && + bio_zone_no(bio) != + disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); +} + static inline unsigned int bio_zone_is_seq(struct bio *bio) { return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); @@ -1328,6 +1335,12 @@ static inline sector_t bdev_offset_from_zone_start(struct block_device *bdev, return sector & (bdev_zone_sectors(bdev) - 1); } +static inline sector_t bio_offset_from_zone_start(struct bio *bio) +{ + return bdev_offset_from_zone_start(bio->bi_bdev, + bio->bi_iter.bi_sector); +} + static inline bool bdev_is_zone_start(struct block_device *bdev, sector_t sector) { -- cgit From ecfe43b11b02ffeb24c203af7d3947417d412cf7 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:06 +0900 Subject: block: Remember zone capacity when revalidating zones In preparation for adding zone write plugging, modify blk_revalidate_disk_zones() to get the capacity of zones of a zoned block device. This capacity value as a number of 512B sectors is stored in the gendisk zone_capacity field. Given that host-managed SMR disks (including zoned UFS drives) and all known NVMe ZNS devices have the same zone capacity for all zones blk_revalidate_disk_zones() returns an error if different capacities are detected for different zones. This also adds check to verify that the values reported by the device for zone capacities are correct, that is, that the zone capacity is never 0, does not exceed the zone size and is equal to the zone size for conventional zones. Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Link: https://lore.kernel.org/r/20240408014128.205141-7-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-zoned.c | 26 ++++++++++++++++++++++++++ include/linux/blkdev.h | 1 + 2 files changed, 27 insertions(+) (limited to 'include/linux') diff --git a/block/blk-zoned.c b/block/blk-zoned.c index da0f4b2a8fa0..23d9bb21c459 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -438,6 +438,7 @@ struct blk_revalidate_zone_args { unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; unsigned int nr_zones; + unsigned int zone_capacity; sector_t sector; }; @@ -482,9 +483,20 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, return -ENODEV; } + if (!zone->capacity || zone->capacity > zone->len) { + pr_warn("%s: Invalid zone capacity\n", + disk->disk_name); + return -ENODEV; + } + /* Check zone type */ switch (zone->type) { case BLK_ZONE_TYPE_CONVENTIONAL: + if (zone->capacity != zone->len) { + pr_warn("%s: Invalid conventional zone capacity\n", + disk->disk_name); + return -ENODEV; + } if (!args->conv_zones_bitmap) { args->conv_zones_bitmap = blk_alloc_zone_bitmap(q->node, args->nr_zones); @@ -500,6 +512,18 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, if (!args->seq_zones_wlock) return -ENOMEM; } + + /* + * Remember the capacity of the first sequential zone and check + * if it is constant for all zones. + */ + if (!args->zone_capacity) + args->zone_capacity = zone->capacity; + if (zone->capacity != args->zone_capacity) { + pr_warn("%s: Invalid variable zone capacity\n", + disk->disk_name); + return -ENODEV; + } break; case BLK_ZONE_TYPE_SEQWRITE_PREF: default: @@ -595,6 +619,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk, blk_mq_freeze_queue(q); if (ret > 0) { disk->nr_zones = args.nr_zones; + disk->zone_capacity = args.zone_capacity; swap(disk->seq_zones_wlock, args.seq_zones_wlock); swap(disk->conv_zones_bitmap, args.conv_zones_bitmap); if (update_driver_data) @@ -608,6 +633,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk, kfree(args.seq_zones_wlock); kfree(args.conv_zones_bitmap); + return ret; } EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ec7bd7091467..4e81f714cca7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -191,6 +191,7 @@ struct gendisk { * blk_mq_unfreeze_queue(). */ unsigned int nr_zones; + unsigned int zone_capacity; unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; #endif /* CONFIG_BLK_DEV_ZONED */ -- cgit From dd291d77cc90eb6a86e9860ba8e6e38eebd57d12 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:07 +0900 Subject: block: Introduce zone write plugging Zone write plugging implements a per-zone "plug" for write operations to control the submission and execution order of write operations to sequential write required zones of a zoned block device. Per-zone plugging guarantees that at any time there is at most only one write request per zone being executed. This mechanism is intended to replace zone write locking which implements a similar per-zone write throttling at the scheduler level, but is implemented only by mq-deadline. Unlike zone write locking which operates on requests, zone write plugging operates on BIOs. A zone write plug is simply a BIO list that is atomically manipulated using a spinlock and a kblockd submission work. A write BIO to a zone is "plugged" to delay its execution if a write BIO for the same zone was already issued, that is, if a write request for the same zone is being executed. The next plugged BIO is unplugged and issued once the write request completes. This mechanism allows to: - Untangle zone write ordering from block IO schedulers. This allows removing the restriction on using mq-deadline for writing to zoned block devices. Any block IO scheduler, including "none" can be used. - Zone write plugging operates on BIOs instead of requests. Plugged BIOs waiting for execution thus do not hold scheduling tags and thus are not preventing other BIOs from executing (reads or writes to other zones). Depending on the workload, this can significantly improve the device use (higher queue depth operation) and performance. - Both blk-mq (request based) zoned devices and BIO-based zoned devices (e.g. device mapper) can use zone write plugging. It is mandatory for the former but optional for the latter. BIO-based drivers can use zone write plugging to implement write ordering guarantees, or the drivers can implement their own if needed. - The code is less invasive in the block layer and is mostly limited to blk-zoned.c with some small changes in blk-mq.c, blk-merge.c and bio.c. Zone write plugging is implemented using struct blk_zone_wplug. This structure includes a spinlock, a BIO list and a work structure to handle the submission of plugged BIOs. Zone write plugs structures are managed using a per-disk hash table. Plugging of zone write BIOs is done using the function blk_zone_write_plug_bio() which returns false if a BIO execution does not need to be delayed and true otherwise. This function is called from blk_mq_submit_bio() after a BIO is split to avoid large BIOs spanning multiple zones which would cause mishandling of zone write plugs. This ichange enables by default zone write plugging for any mq request-based block device. BIO-based device drivers can also use zone write plugging by expliclty calling blk_zone_write_plug_bio() in their ->submit_bio method. For such devices, the driver must ensure that a BIO passed to blk_zone_write_plug_bio() is already split and not straddling zone boundaries. Only write and write zeroes BIOs are plugged. Zone write plugging does not introduce any significant overhead for other operations. A BIO that is being handled through zone write plugging is flagged using the new BIO flag BIO_ZONE_WRITE_PLUGGING. A request handling a BIO flagged with this new flag is flagged with the new RQF_ZONE_WRITE_PLUGGING flag. The completion of BIOs and requests flagged trigger respectively calls to the functions blk_zone_write_bio_endio() and blk_zone_write_complete_request(). The latter function is used to trigger submission of the next plugged BIO using the zone plug work. blk_zone_write_bio_endio() does the same for BIO-based devices. This ensures that at any time, at most one request (blk-mq devices) or one BIO (BIO-based devices) is being executed for any zone. The handling of zone write plugs using a per-zone plug spinlock maximizes parallelism and device usage by allowing multiple zones to be writen simultaneously without lock contention. Zone write plugging ignores flush BIOs without data. Hovever, any flush BIO that has data is always plugged so that the write part of the flush sequence is serialized with other regular writes. Given that any BIO handled through zone write plugging will be the only BIO in flight for the target zone when it is executed, the unplugging and submission of a BIO will have no chance of successfully merging with plugged requests or requests in the scheduler. To overcome this potential performance degradation, blk_mq_submit_bio() calls the function blk_zone_write_plug_attempt_merge() to try to merge other plugged BIOs with the one just unplugged and submitted. Successful merging is signaled using blk_zone_write_plug_bio_merged(), called from bio_attempt_back_merge(). Furthermore, to avoid recalculating the number of segments of plugged BIOs to attempt merging, the number of segments of a plugged BIO is saved using the new struct bio field __bi_nr_segments. To avoid growing the size of struct bio, this field is added as a union with the bio_cookie field. This is safe to do as polling is always disabled for plugged BIOs. When BIOs are plugged in a zone write plug, the device request queue usage counter is always incremented. This reference is kept and reused for blk-mq devices when the plugged BIO is unplugged and submitted again using submit_bio_noacct_nocheck(). For this case, the unplugged BIO is already flagged with BIO_ZONE_WRITE_PLUGGING and blk_mq_submit_bio() proceeds directly to allocating a new request for the BIO, re-using the usage reference count taken when the BIO was plugged. This extra reference count is dropped in blk_zone_write_plug_attempt_merge() for any plugged BIO that is successfully merged. Given that BIO-based devices will not take this path, the extra reference is dropped after a plugged BIO is unplugged and submitted. Zone write plugs are dynamically allocated and managed using a hash table (an array of struct hlist_head) with RCU protection. A zone write plug is allocated when a write BIO is received for the zone and not freed until the zone is fully written, reset or finished. To detect when a zone write plug can be freed, the write state of each zone is tracked using a write pointer offset which corresponds to the offset of a zone write pointer relative to the zone start. Write operations always increment this write pointer offset. Zone reset operations set it to 0 and zone finish operations set it to the zone size. If a write error happens, the wp_offset value of a zone write plug may become incorrect and out of sync with the device managed write pointer. This is handled using the zone write plug flag BLK_ZONE_WPLUG_ERROR. The function blk_zone_wplug_handle_error() is called from the new disk zone write plug work when this flag is set. This function executes a report zone to update the zone write pointer offset to the current value as indicated by the device. The disk zone write plug work is scheduled whenever a BIO flagged with BIO_ZONE_WRITE_PLUGGING completes with an error or when bio_zone_wplug_prepare_bio() detects an unaligned write. Once scheduled, the disk zone write plugs work keeps running until all zone errors are handled. To match the new data structures used for zoned disks, the function disk_free_zone_bitmaps() is renamed to the more generic disk_free_zone_resources(). The function disk_init_zone_resources() is also introduced to initialize zone write plugs resources when a gendisk is allocated. In order to guarantee that the user can simultaneously write up to a number of zones equal to a device max active zone limit or max open zone limit, zone write plugs are allocated using a mempool sized to the maximum of these 2 device limits. For a device that does not have active and open zone limits, 128 is used as the default mempool size. If a change to the device active and open zone limits is detected, the disk mempool is resized when blk_revalidate_disk_zones() is executed. This commit contains contributions from Christoph Hellwig . Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20240408014128.205141-8-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/bio.c | 2 + block/blk-merge.c | 12 + block/blk-mq.c | 32 +- block/blk-zoned.c | 1091 ++++++++++++++++++++++++++++++++++++++++++++- block/blk.h | 51 ++- block/genhd.c | 3 +- include/linux/blk-mq.h | 2 + include/linux/blk_types.h | 8 +- include/linux/blkdev.h | 11 + 9 files changed, 1201 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/block/bio.c b/block/bio.c index d24420ed1c4c..38baedb39c6f 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1576,6 +1576,8 @@ again: if (!bio_integrity_endio(bio)) return; + blk_zone_bio_endio(bio); + rq_qos_done_bio(bio); if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { diff --git a/block/blk-merge.c b/block/blk-merge.c index 3363b1321908..7f8a808b74c1 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -377,6 +377,7 @@ struct bio *__bio_split_to_limits(struct bio *bio, blkcg_bio_issue_init(split); bio_chain(split, bio); trace_block_split(split, bio->bi_iter.bi_sector); + WARN_ON_ONCE(bio_zone_write_plugging(bio)); submit_bio_noacct(bio); return split; } @@ -988,6 +989,9 @@ enum bio_merge_status bio_attempt_back_merge(struct request *req, blk_update_mixed_merge(req, bio, false); + if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) + blk_zone_write_plug_bio_merged(bio); + req->biotail->bi_next = bio; req->biotail = bio; req->__data_len += bio->bi_iter.bi_size; @@ -1003,6 +1007,14 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req, { const blk_opf_t ff = bio_failfast(bio); + /* + * A front merge for writes to sequential zones of a zoned block device + * can happen only if the user submitted writes out of order. Do not + * merge such write to let it fail. + */ + if (req->rq_flags & RQF_ZONE_WRITE_PLUGGING) + return BIO_MERGE_FAILED; + if (!ll_front_merge_fn(req, bio, nr_segs)) return BIO_MERGE_FAILED; diff --git a/block/blk-mq.c b/block/blk-mq.c index 88b541e8873f..48eb7dd049d1 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -828,6 +828,8 @@ static void blk_complete_request(struct request *req) bio = next; } while (bio); + blk_zone_complete_request(req); + /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request @@ -939,6 +941,7 @@ bool blk_update_request(struct request *req, blk_status_t error, * completely done */ if (!req->bio) { + blk_zone_complete_request(req); /* * Reset counters so that the request stacking driver * can find how many bytes remain in the request @@ -2963,15 +2966,30 @@ void blk_mq_submit_bio(struct bio *bio) struct request *rq; blk_status_t ret; + /* + * If the plug has a cached request for this queue, try to use it. + */ + rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); + + /* + * A BIO that was released from a zone write plug has already been + * through the preparation in this function, already holds a reference + * on the queue usage counter, and is the only write BIO in-flight for + * the target zone. Go straight to preparing a request for it. + */ + if (bio_zone_write_plugging(bio)) { + nr_segs = bio->__bi_nr_segments; + if (rq) + blk_queue_exit(q); + goto new_request; + } + bio = blk_queue_bounce(bio, q); /* - * If the plug has a cached request for this queue, try use it. - * * The cached request already holds a q_usage_counter reference and we * don't have to acquire a new one if we use it. */ - rq = blk_mq_peek_cached_request(plug, q, bio->bi_opf); if (!rq) { if (unlikely(bio_queue_enter(bio))) return; @@ -2988,6 +3006,10 @@ void blk_mq_submit_bio(struct bio *bio) if (blk_mq_attempt_bio_merge(q, bio, nr_segs)) goto queue_exit; + if (blk_queue_is_zoned(q) && blk_zone_plug_bio(bio, nr_segs)) + goto queue_exit; + +new_request: if (!rq) { rq = blk_mq_get_new_requests(q, plug, bio, nr_segs); if (unlikely(!rq)) @@ -3006,6 +3028,7 @@ void blk_mq_submit_bio(struct bio *bio) if (ret != BLK_STS_OK) { bio->bi_status = ret; bio_endio(bio); + blk_zone_complete_request(rq); blk_mq_free_request(rq); return; } @@ -3013,6 +3036,9 @@ void blk_mq_submit_bio(struct bio *bio) if (op_is_flush(bio->bi_opf) && blk_insert_flush(rq)) return; + if (bio_zone_write_plugging(bio)) + blk_zone_write_plug_attempt_merge(rq); + if (plug) { blk_add_rq_to_plug(plug, rq); return; diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 23d9bb21c459..fefcebd70445 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -7,6 +7,7 @@ * * Copyright (c) 2016, Damien Le Moal * Copyright (c) 2016, Western Digital + * Copyright (c) 2024, Western Digital Corporation or its affiliates. */ #include @@ -16,8 +17,12 @@ #include #include #include +#include +#include +#include #include "blk.h" +#include "blk-mq-sched.h" #define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name static const char *const zone_cond_name[] = { @@ -32,6 +37,64 @@ static const char *const zone_cond_name[] = { }; #undef ZONE_COND_NAME +/* + * Per-zone write plug. + * @node: hlist_node structure for managing the plug using a hash table. + * @link: To list the plug in the zone write plug error list of the disk. + * @ref: Zone write plug reference counter. A zone write plug reference is + * always at least 1 when the plug is hashed in the disk plug hash table. + * The reference is incremented whenever a new BIO needing plugging is + * submitted and when a function needs to manipulate a plug. The + * reference count is decremented whenever a plugged BIO completes and + * when a function that referenced the plug returns. The initial + * reference is dropped whenever the zone of the zone write plug is reset, + * finished and when the zone becomes full (last write BIO to the zone + * completes). + * @lock: Spinlock to atomically manipulate the plug. + * @flags: Flags indicating the plug state. + * @zone_no: The number of the zone the plug is managing. + * @wp_offset: The zone write pointer location relative to the start of the zone + * as a number of 512B sectors. + * @bio_list: The list of BIOs that are currently plugged. + * @bio_work: Work struct to handle issuing of plugged BIOs + * @rcu_head: RCU head to free zone write plugs with an RCU grace period. + * @disk: The gendisk the plug belongs to. + */ +struct blk_zone_wplug { + struct hlist_node node; + struct list_head link; + atomic_t ref; + spinlock_t lock; + unsigned int flags; + unsigned int zone_no; + unsigned int wp_offset; + struct bio_list bio_list; + struct work_struct bio_work; + struct rcu_head rcu_head; + struct gendisk *disk; +}; + +/* + * Zone write plug flags bits: + * - BLK_ZONE_WPLUG_PLUGGED: Indicates that the zone write plug is plugged, + * that is, that write BIOs are being throttled due to a write BIO already + * being executed or the zone write plug bio list is not empty. + * - BLK_ZONE_WPLUG_ERROR: Indicates that a write error happened which will be + * recovered with a report zone to update the zone write pointer offset. + * - BLK_ZONE_WPLUG_UNHASHED: Indicates that the zone write plug was removed + * from the disk hash table and that the initial reference to the zone + * write plug set when the plug was first added to the hash table has been + * dropped. This flag is set when a zone is reset, finished or become full, + * to prevent new references to the zone write plug to be taken for + * newly incoming BIOs. A zone write plug flagged with this flag will be + * freed once all remaining references from BIOs or functions are dropped. + */ +#define BLK_ZONE_WPLUG_PLUGGED (1U << 0) +#define BLK_ZONE_WPLUG_ERROR (1U << 1) +#define BLK_ZONE_WPLUG_UNHASHED (1U << 2) + +#define BLK_ZONE_WPLUG_BUSY (BLK_ZONE_WPLUG_PLUGGED | BLK_ZONE_WPLUG_ERROR) + /** * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX. * @zone_cond: BLK_ZONE_COND_XXX. @@ -425,12 +488,1010 @@ fail: return ret; } -void disk_free_zone_bitmaps(struct gendisk *disk) +static inline bool disk_zone_is_conv(struct gendisk *disk, sector_t sector) +{ + if (!disk->conv_zones_bitmap) + return false; + return test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); +} + +static bool disk_insert_zone_wplug(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + struct blk_zone_wplug *zwplg; + unsigned long flags; + unsigned int idx = + hash_32(zwplug->zone_no, disk->zone_wplugs_hash_bits); + + /* + * Add the new zone write plug to the hash table, but carefully as we + * are racing with other submission context, so we may already have a + * zone write plug for the same zone. + */ + spin_lock_irqsave(&disk->zone_wplugs_lock, flags); + hlist_for_each_entry_rcu(zwplg, &disk->zone_wplugs_hash[idx], node) { + if (zwplg->zone_no == zwplug->zone_no) { + spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); + return false; + } + } + hlist_add_head_rcu(&zwplug->node, &disk->zone_wplugs_hash[idx]); + spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); + + return true; +} + +static void disk_remove_zone_wplug(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + unsigned long flags; + + spin_lock_irqsave(&disk->zone_wplugs_lock, flags); + zwplug->flags |= BLK_ZONE_WPLUG_UNHASHED; + atomic_dec(&zwplug->ref); + hlist_del_init_rcu(&zwplug->node); + spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); +} + +static inline bool disk_should_remove_zone_wplug(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + /* If the zone is still busy, the plug cannot be removed. */ + if (zwplug->flags & BLK_ZONE_WPLUG_BUSY) + return false; + + /* We can remove zone write plugs for zones that are empty or full. */ + return !zwplug->wp_offset || zwplug->wp_offset >= disk->zone_capacity; +} + +static struct blk_zone_wplug *disk_get_zone_wplug(struct gendisk *disk, + sector_t sector) +{ + unsigned int zno = disk_zone_no(disk, sector); + unsigned int idx = hash_32(zno, disk->zone_wplugs_hash_bits); + struct blk_zone_wplug *zwplug; + + rcu_read_lock(); + + hlist_for_each_entry_rcu(zwplug, &disk->zone_wplugs_hash[idx], node) { + if (zwplug->zone_no == zno && + atomic_inc_not_zero(&zwplug->ref)) { + rcu_read_unlock(); + return zwplug; + } + } + + rcu_read_unlock(); + + return NULL; +} + +static void disk_free_zone_wplug_rcu(struct rcu_head *rcu_head) +{ + struct blk_zone_wplug *zwplug = + container_of(rcu_head, struct blk_zone_wplug, rcu_head); + + mempool_free(zwplug, zwplug->disk->zone_wplugs_pool); +} + +static inline void disk_put_zone_wplug(struct blk_zone_wplug *zwplug) +{ + if (atomic_dec_and_test(&zwplug->ref)) { + WARN_ON_ONCE(!bio_list_empty(&zwplug->bio_list)); + WARN_ON_ONCE(!list_empty(&zwplug->link)); + + call_rcu(&zwplug->rcu_head, disk_free_zone_wplug_rcu); + } +} + +static void blk_zone_wplug_bio_work(struct work_struct *work); + +/* + * Get a reference on the write plug for the zone containing @sector. + * If the plug does not exist, it is allocated and hashed. + * Return a pointer to the zone write plug with the plug spinlock held. + */ +static struct blk_zone_wplug *disk_get_and_lock_zone_wplug(struct gendisk *disk, + sector_t sector, gfp_t gfp_mask, + unsigned long *flags) { + unsigned int zno = disk_zone_no(disk, sector); + struct blk_zone_wplug *zwplug; + +again: + zwplug = disk_get_zone_wplug(disk, sector); + if (zwplug) { + /* + * Check that a BIO completion or a zone reset or finish + * operation has not already removed the zone write plug from + * the hash table and dropped its reference count. In such case, + * we need to get a new plug so start over from the beginning. + */ + spin_lock_irqsave(&zwplug->lock, *flags); + if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) { + spin_unlock_irqrestore(&zwplug->lock, *flags); + disk_put_zone_wplug(zwplug); + goto again; + } + return zwplug; + } + + /* + * Allocate and initialize a zone write plug with an extra reference + * so that it is not freed when the zone write plug becomes idle without + * the zone being full. + */ + zwplug = mempool_alloc(disk->zone_wplugs_pool, gfp_mask); + if (!zwplug) + return NULL; + + INIT_HLIST_NODE(&zwplug->node); + INIT_LIST_HEAD(&zwplug->link); + atomic_set(&zwplug->ref, 2); + spin_lock_init(&zwplug->lock); + zwplug->flags = 0; + zwplug->zone_no = zno; + zwplug->wp_offset = sector & (disk->queue->limits.chunk_sectors - 1); + bio_list_init(&zwplug->bio_list); + INIT_WORK(&zwplug->bio_work, blk_zone_wplug_bio_work); + zwplug->disk = disk; + + spin_lock_irqsave(&zwplug->lock, *flags); + + /* + * Insert the new zone write plug in the hash table. This can fail only + * if another context already inserted a plug. Retry from the beginning + * in such case. + */ + if (!disk_insert_zone_wplug(disk, zwplug)) { + spin_unlock_irqrestore(&zwplug->lock, *flags); + mempool_free(zwplug, disk->zone_wplugs_pool); + goto again; + } + + return zwplug; +} + +static inline void blk_zone_wplug_bio_io_error(struct bio *bio) +{ + struct request_queue *q = bio->bi_bdev->bd_disk->queue; + + bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); + bio_io_error(bio); + blk_queue_exit(q); +} + +/* + * Abort (fail) all plugged BIOs of a zone write plug. + */ +static void disk_zone_wplug_abort(struct blk_zone_wplug *zwplug) +{ + struct bio *bio; + + while ((bio = bio_list_pop(&zwplug->bio_list))) { + blk_zone_wplug_bio_io_error(bio); + disk_put_zone_wplug(zwplug); + } +} + +/* + * Abort (fail) all plugged BIOs of a zone write plug that are not aligned + * with the assumed write pointer location of the zone when the BIO will + * be unplugged. + */ +static void disk_zone_wplug_abort_unaligned(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + unsigned int zone_capacity = disk->zone_capacity; + unsigned int wp_offset = zwplug->wp_offset; + struct bio_list bl = BIO_EMPTY_LIST; + struct bio *bio; + + while ((bio = bio_list_pop(&zwplug->bio_list))) { + if (wp_offset >= zone_capacity || + bio_offset_from_zone_start(bio) != wp_offset) { + blk_zone_wplug_bio_io_error(bio); + disk_put_zone_wplug(zwplug); + continue; + } + + wp_offset += bio_sectors(bio); + bio_list_add(&bl, bio); + } + + bio_list_merge(&zwplug->bio_list, &bl); +} + +/* + * Set a zone write plug write pointer offset to either 0 (zone reset case) + * or to the zone size (zone finish case). This aborts all plugged BIOs, which + * is fine to do as doing a zone reset or zone finish while writes are in-flight + * is a mistake from the user which will most likely cause all plugged BIOs to + * fail anyway. + */ +static void disk_zone_wplug_set_wp_offset(struct gendisk *disk, + struct blk_zone_wplug *zwplug, + unsigned int wp_offset) +{ + unsigned long flags; + + spin_lock_irqsave(&zwplug->lock, flags); + + /* + * Make sure that a BIO completion or another zone reset or finish + * operation has not already removed the plug from the hash table. + */ + if (zwplug->flags & BLK_ZONE_WPLUG_UNHASHED) { + spin_unlock_irqrestore(&zwplug->lock, flags); + return; + } + + /* Update the zone write pointer and abort all plugged BIOs. */ + zwplug->wp_offset = wp_offset; + disk_zone_wplug_abort(zwplug); + + /* + * Updating the write pointer offset puts back the zone + * in a good state. So clear the error flag and decrement the + * error count if we were in error state. + */ + if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) { + zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR; + spin_lock(&disk->zone_wplugs_lock); + list_del_init(&zwplug->link); + spin_unlock(&disk->zone_wplugs_lock); + } + + /* + * The zone write plug now has no BIO plugged: remove it from the + * hash table so that it cannot be seen. The plug will be freed + * when the last reference is dropped. + */ + if (disk_should_remove_zone_wplug(disk, zwplug)) + disk_remove_zone_wplug(disk, zwplug); + + spin_unlock_irqrestore(&zwplug->lock, flags); +} + +static bool blk_zone_wplug_handle_reset_or_finish(struct bio *bio, + unsigned int wp_offset) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + sector_t sector = bio->bi_iter.bi_sector; + struct blk_zone_wplug *zwplug; + + /* Conventional zones cannot be reset nor finished. */ + if (disk_zone_is_conv(disk, sector)) { + bio_io_error(bio); + return true; + } + + /* + * If we have a zone write plug, set its write pointer offset to 0 + * (reset case) or to the zone size (finish case). This will abort all + * BIOs plugged for the target zone. It is fine as resetting or + * finishing zones while writes are still in-flight will result in the + * writes failing anyway. + */ + zwplug = disk_get_zone_wplug(disk, sector); + if (zwplug) { + disk_zone_wplug_set_wp_offset(disk, zwplug, wp_offset); + disk_put_zone_wplug(zwplug); + } + + return false; +} + +static bool blk_zone_wplug_handle_reset_all(struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + struct blk_zone_wplug *zwplug; + sector_t sector; + + /* + * Set the write pointer offset of all zone write plugs to 0. This will + * abort all plugged BIOs. It is fine as resetting zones while writes + * are still in-flight will result in the writes failing anyway. + */ + for (sector = 0; sector < get_capacity(disk); + sector += disk->queue->limits.chunk_sectors) { + zwplug = disk_get_zone_wplug(disk, sector); + if (zwplug) { + disk_zone_wplug_set_wp_offset(disk, zwplug, 0); + disk_put_zone_wplug(zwplug); + } + } + + return false; +} + +static inline void blk_zone_wplug_add_bio(struct blk_zone_wplug *zwplug, + struct bio *bio, unsigned int nr_segs) +{ + /* + * Grab an extra reference on the BIO request queue usage counter. + * This reference will be reused to submit a request for the BIO for + * blk-mq devices and dropped when the BIO is failed and after + * it is issued in the case of BIO-based devices. + */ + percpu_ref_get(&bio->bi_bdev->bd_disk->queue->q_usage_counter); + + /* + * The BIO is being plugged and thus will have to wait for the on-going + * write and for all other writes already plugged. So polling makes + * no sense. + */ + bio_clear_polled(bio); + + /* + * Reuse the poll cookie field to store the number of segments when + * split to the hardware limits. + */ + bio->__bi_nr_segments = nr_segs; + + /* + * We always receive BIOs after they are split and ready to be issued. + * The block layer passes the parts of a split BIO in order, and the + * user must also issue write sequentially. So simply add the new BIO + * at the tail of the list to preserve the sequential write order. + */ + bio_list_add(&zwplug->bio_list, bio); +} + +/* + * Called from bio_attempt_back_merge() when a BIO was merged with a request. + */ +void blk_zone_write_plug_bio_merged(struct bio *bio) +{ + struct blk_zone_wplug *zwplug; + unsigned long flags; + + /* + * If the BIO was already plugged, then we were called through + * blk_zone_write_plug_attempt_merge() -> blk_attempt_bio_merge(). + * For this case, blk_zone_write_plug_attempt_merge() will handle the + * zone write pointer offset update. + */ + if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING)) + return; + + bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); + + /* + * Increase the plug reference count and advance the zone write + * pointer offset. + */ + zwplug = disk_get_zone_wplug(bio->bi_bdev->bd_disk, + bio->bi_iter.bi_sector); + spin_lock_irqsave(&zwplug->lock, flags); + zwplug->wp_offset += bio_sectors(bio); + spin_unlock_irqrestore(&zwplug->lock, flags); +} + +/* + * Attempt to merge plugged BIOs with a newly prepared request for a BIO that + * already went through zone write plugging (either a new BIO or one that was + * unplugged). + */ +void blk_zone_write_plug_attempt_merge(struct request *req) +{ + sector_t req_back_sector = blk_rq_pos(req) + blk_rq_sectors(req); + struct request_queue *q = req->q; + struct gendisk *disk = q->disk; + unsigned int zone_capacity = disk->zone_capacity; + struct blk_zone_wplug *zwplug = + disk_get_zone_wplug(disk, blk_rq_pos(req)); + unsigned long flags; + struct bio *bio; + + /* + * Completion of this request needs to be handled with + * blk_zone_write_plug_complete_request(). + */ + req->rq_flags |= RQF_ZONE_WRITE_PLUGGING; + + if (blk_queue_nomerges(q)) + return; + + /* + * Walk through the list of plugged BIOs to check if they can be merged + * into the back of the request. + */ + spin_lock_irqsave(&zwplug->lock, flags); + while (zwplug->wp_offset < zone_capacity) { + bio = bio_list_peek(&zwplug->bio_list); + if (!bio) + break; + + if (bio->bi_iter.bi_sector != req_back_sector || + !blk_rq_merge_ok(req, bio)) + break; + + WARN_ON_ONCE(bio_op(bio) != REQ_OP_WRITE_ZEROES && + !bio->__bi_nr_segments); + + bio_list_pop(&zwplug->bio_list); + if (bio_attempt_back_merge(req, bio, bio->__bi_nr_segments) != + BIO_MERGE_OK) { + bio_list_add_head(&zwplug->bio_list, bio); + break; + } + + /* + * Drop the extra reference on the queue usage we got when + * plugging the BIO and advance the write pointer offset. + */ + blk_queue_exit(q); + zwplug->wp_offset += bio_sectors(bio); + + req_back_sector += bio_sectors(bio); + } + spin_unlock_irqrestore(&zwplug->lock, flags); +} + +static inline void disk_zone_wplug_set_error(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR)) { + unsigned long flags; + + /* + * Increase the plug reference count. The reference will be + * dropped in disk_zone_wplugs_work() once the error state + * is handled. + */ + zwplug->flags |= BLK_ZONE_WPLUG_ERROR; + atomic_inc(&zwplug->ref); + + spin_lock_irqsave(&disk->zone_wplugs_lock, flags); + list_add_tail(&zwplug->link, &disk->zone_wplugs_err_list); + spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); + } +} + +/* + * Check and prepare a BIO for submission by incrementing the write pointer + * offset of its zone write plug. + */ +static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug, + struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + + /* + * Check that the user is not attempting to write to a full zone. + * We know such BIO will fail, and that would potentially overflow our + * write pointer offset beyond the end of the zone. + */ + if (zwplug->wp_offset >= disk->zone_capacity) + goto err; + + /* + * Check for non-sequential writes early because we avoid a + * whole lot of error handling trouble if we don't send it off + * to the driver. + */ + if (bio_offset_from_zone_start(bio) != zwplug->wp_offset) + goto err; + + /* Advance the zone write pointer offset. */ + zwplug->wp_offset += bio_sectors(bio); + + return true; + +err: + /* We detected an invalid write BIO: schedule error recovery. */ + disk_zone_wplug_set_error(disk, zwplug); + kblockd_schedule_work(&disk->zone_wplugs_work); + return false; +} + +static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + sector_t sector = bio->bi_iter.bi_sector; + struct blk_zone_wplug *zwplug; + gfp_t gfp_mask = GFP_NOIO; + unsigned long flags; + + /* + * BIOs must be fully contained within a zone so that we use the correct + * zone write plug for the entire BIO. For blk-mq devices, the block + * layer should already have done any splitting required to ensure this + * and this BIO should thus not be straddling zone boundaries. For + * BIO-based devices, it is the responsibility of the driver to split + * the bio before submitting it. + */ + if (WARN_ON_ONCE(bio_straddles_zones(bio))) { + bio_io_error(bio); + return true; + } + + /* Conventional zones do not need write plugging. */ + if (disk_zone_is_conv(disk, sector)) + return false; + + if (bio->bi_opf & REQ_NOWAIT) + gfp_mask = GFP_NOWAIT; + + zwplug = disk_get_and_lock_zone_wplug(disk, sector, gfp_mask, &flags); + if (!zwplug) { + bio_io_error(bio); + return true; + } + + /* Indicate that this BIO is being handled using zone write plugging. */ + bio_set_flag(bio, BIO_ZONE_WRITE_PLUGGING); + + /* + * If the zone is already plugged or has a pending error, add the BIO + * to the plug BIO list. Otherwise, plug and let the BIO execute. + */ + if (zwplug->flags & BLK_ZONE_WPLUG_BUSY) + goto plug; + + /* + * If an error is detected when preparing the BIO, add it to the BIO + * list so that error recovery can deal with it. + */ + if (!blk_zone_wplug_prepare_bio(zwplug, bio)) + goto plug; + + zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; + + spin_unlock_irqrestore(&zwplug->lock, flags); + + return false; + +plug: + zwplug->flags |= BLK_ZONE_WPLUG_PLUGGED; + blk_zone_wplug_add_bio(zwplug, bio, nr_segs); + + spin_unlock_irqrestore(&zwplug->lock, flags); + + return true; +} + +/** + * blk_zone_plug_bio - Handle a zone write BIO with zone write plugging + * @bio: The BIO being submitted + * @nr_segs: The number of physical segments of @bio + * + * Handle write and write zeroes operations using zone write plugging. + * + * Return true whenever @bio execution needs to be delayed through the zone + * write plug. Otherwise, return false to let the submission path process + * @bio normally. + */ +bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) +{ + struct block_device *bdev = bio->bi_bdev; + + if (!bdev->bd_disk->zone_wplugs_hash) + return false; + + /* + * If the BIO already has the plugging flag set, then it was already + * handled through this path and this is a submission from the zone + * plug bio submit work. + */ + if (bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING)) + return false; + + /* + * We do not need to do anything special for empty flush BIOs, e.g + * BIOs such as issued by blkdev_issue_flush(). The is because it is + * the responsibility of the user to first wait for the completion of + * write operations for flush to have any effect on the persistence of + * the written data. + */ + if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) + return false; + + /* + * Regular writes and write zeroes need to be handled through the target + * zone write plug. This includes writes with REQ_FUA | REQ_PREFLUSH + * which may need to go through the flush machinery depending on the + * target device capabilities. Plugging such writes is fine as the flush + * machinery operates at the request level, below the plug, and + * completion of the flush sequence will go through the regular BIO + * completion, which will handle zone write plugging. + * Zone reset, reset all and finish commands need special treatment + * to correctly track the write pointer offset of zones. These commands + * are not plugged as we do not need serialization with write + * operations. It is the responsibility of the user to not issue reset + * and finish commands when write operations are in flight. + */ + switch (bio_op(bio)) { + case REQ_OP_WRITE: + case REQ_OP_WRITE_ZEROES: + return blk_zone_wplug_handle_write(bio, nr_segs); + case REQ_OP_ZONE_RESET: + return blk_zone_wplug_handle_reset_or_finish(bio, 0); + case REQ_OP_ZONE_FINISH: + return blk_zone_wplug_handle_reset_or_finish(bio, + bdev_zone_sectors(bdev)); + case REQ_OP_ZONE_RESET_ALL: + return blk_zone_wplug_handle_reset_all(bio); + default: + return false; + } + + return false; +} +EXPORT_SYMBOL_GPL(blk_zone_plug_bio); + +static void disk_zone_wplug_unplug_bio(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + unsigned long flags; + + spin_lock_irqsave(&zwplug->lock, flags); + + /* + * If we had an error, schedule error recovery. The recovery work + * will restart submission of plugged BIOs. + */ + if (zwplug->flags & BLK_ZONE_WPLUG_ERROR) { + spin_unlock_irqrestore(&zwplug->lock, flags); + kblockd_schedule_work(&disk->zone_wplugs_work); + return; + } + + /* Schedule submission of the next plugged BIO if we have one. */ + if (!bio_list_empty(&zwplug->bio_list)) { + spin_unlock_irqrestore(&zwplug->lock, flags); + kblockd_schedule_work(&zwplug->bio_work); + return; + } + + zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; + + /* + * If the zone is full (it was fully written or finished, or empty + * (it was reset), remove its zone write plug from the hash table. + */ + if (disk_should_remove_zone_wplug(disk, zwplug)) + disk_remove_zone_wplug(disk, zwplug); + + spin_unlock_irqrestore(&zwplug->lock, flags); +} + +void blk_zone_write_plug_bio_endio(struct bio *bio) +{ + struct gendisk *disk = bio->bi_bdev->bd_disk; + struct blk_zone_wplug *zwplug = + disk_get_zone_wplug(bio->bi_bdev->bd_disk, + bio->bi_iter.bi_sector); + unsigned long flags; + + if (WARN_ON_ONCE(!zwplug)) + return; + + /* Make sure we do not see this BIO again by clearing the plug flag. */ + bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); + + /* + * If the BIO failed, mark the plug as having an error to trigger + * recovery. + */ + if (bio->bi_status != BLK_STS_OK) { + spin_lock_irqsave(&zwplug->lock, flags); + disk_zone_wplug_set_error(disk, zwplug); + spin_unlock_irqrestore(&zwplug->lock, flags); + } + + /* + * For BIO-based devices, blk_zone_write_plug_complete_request() + * is not called. So we need to schedule execution of the next + * plugged BIO here. + */ + if (bio->bi_bdev->bd_has_submit_bio) + disk_zone_wplug_unplug_bio(disk, zwplug); + + /* Drop the reference we took when the BIO was issued. */ + atomic_dec(&zwplug->ref); + disk_put_zone_wplug(zwplug); +} + +void blk_zone_write_plug_complete_request(struct request *req) +{ + struct gendisk *disk = req->q->disk; + struct blk_zone_wplug *zwplug = disk_get_zone_wplug(disk, req->__sector); + + if (WARN_ON_ONCE(!zwplug)) + return; + + req->rq_flags &= ~RQF_ZONE_WRITE_PLUGGING; + + disk_zone_wplug_unplug_bio(disk, zwplug); + + /* + * Drop the reference we took when the request was initialized in + * blk_zone_write_plug_attempt_merge(). + */ + atomic_dec(&zwplug->ref); + disk_put_zone_wplug(zwplug); +} + +static void blk_zone_wplug_bio_work(struct work_struct *work) +{ + struct blk_zone_wplug *zwplug = + container_of(work, struct blk_zone_wplug, bio_work); + struct block_device *bdev; + unsigned long flags; + struct bio *bio; + + /* + * Submit the next plugged BIO. If we do not have any, clear + * the plugged flag. + */ + spin_lock_irqsave(&zwplug->lock, flags); + + bio = bio_list_pop(&zwplug->bio_list); + if (!bio) { + zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; + spin_unlock_irqrestore(&zwplug->lock, flags); + return; + } + + if (!blk_zone_wplug_prepare_bio(zwplug, bio)) { + /* Error recovery will decide what to do with the BIO. */ + bio_list_add_head(&zwplug->bio_list, bio); + spin_unlock_irqrestore(&zwplug->lock, flags); + return; + } + + spin_unlock_irqrestore(&zwplug->lock, flags); + + bdev = bio->bi_bdev; + submit_bio_noacct_nocheck(bio); + + /* + * blk-mq devices will reuse the extra reference on the request queue + * usage counter we took when the BIO was plugged, but the submission + * path for BIO-based devices will not do that. So drop this extra + * reference here. + */ + if (bdev->bd_has_submit_bio) + blk_queue_exit(bdev->bd_disk->queue); +} + +static unsigned int blk_zone_wp_offset(struct blk_zone *zone) +{ + switch (zone->cond) { + case BLK_ZONE_COND_IMP_OPEN: + case BLK_ZONE_COND_EXP_OPEN: + case BLK_ZONE_COND_CLOSED: + return zone->wp - zone->start; + case BLK_ZONE_COND_FULL: + return zone->len; + case BLK_ZONE_COND_EMPTY: + return 0; + case BLK_ZONE_COND_NOT_WP: + case BLK_ZONE_COND_OFFLINE: + case BLK_ZONE_COND_READONLY: + default: + /* + * Conventional, offline and read-only zones do not have a valid + * write pointer. + */ + return UINT_MAX; + } +} + +static int blk_zone_wplug_report_zone_cb(struct blk_zone *zone, + unsigned int idx, void *data) +{ + struct blk_zone *zonep = data; + + *zonep = *zone; + return 0; +} + +static void disk_zone_wplug_handle_error(struct gendisk *disk, + struct blk_zone_wplug *zwplug) +{ + sector_t zone_start_sector = + bdev_zone_sectors(disk->part0) * zwplug->zone_no; + unsigned int noio_flag; + struct blk_zone zone; + unsigned long flags; + int ret; + + /* Get the current zone information from the device. */ + noio_flag = memalloc_noio_save(); + ret = disk->fops->report_zones(disk, zone_start_sector, 1, + blk_zone_wplug_report_zone_cb, &zone); + memalloc_noio_restore(noio_flag); + + spin_lock_irqsave(&zwplug->lock, flags); + + /* + * A zone reset or finish may have cleared the error already. In such + * case, do nothing as the report zones may have seen the "old" write + * pointer value before the reset/finish operation completed. + */ + if (!(zwplug->flags & BLK_ZONE_WPLUG_ERROR)) + goto unlock; + + zwplug->flags &= ~BLK_ZONE_WPLUG_ERROR; + + if (ret != 1) { + /* + * We failed to get the zone information, meaning that something + * is likely really wrong with the device. Abort all remaining + * plugged BIOs as otherwise we could endup waiting forever on + * plugged BIOs to complete if there is a queue freeze on-going. + */ + disk_zone_wplug_abort(zwplug); + goto unplug; + } + + /* Update the zone write pointer offset. */ + zwplug->wp_offset = blk_zone_wp_offset(&zone); + disk_zone_wplug_abort_unaligned(disk, zwplug); + + /* Restart BIO submission if we still have any BIO left. */ + if (!bio_list_empty(&zwplug->bio_list)) { + WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)); + kblockd_schedule_work(&zwplug->bio_work); + goto unlock; + } + +unplug: + zwplug->flags &= ~BLK_ZONE_WPLUG_PLUGGED; + if (disk_should_remove_zone_wplug(disk, zwplug)) + disk_remove_zone_wplug(disk, zwplug); + +unlock: + spin_unlock_irqrestore(&zwplug->lock, flags); +} + +static void disk_zone_wplugs_work(struct work_struct *work) +{ + struct gendisk *disk = + container_of(work, struct gendisk, zone_wplugs_work); + struct blk_zone_wplug *zwplug; + unsigned long flags; + + spin_lock_irqsave(&disk->zone_wplugs_lock, flags); + + while (!list_empty(&disk->zone_wplugs_err_list)) { + zwplug = list_first_entry(&disk->zone_wplugs_err_list, + struct blk_zone_wplug, link); + list_del_init(&zwplug->link); + spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); + + disk_zone_wplug_handle_error(disk, zwplug); + disk_put_zone_wplug(zwplug); + + spin_lock_irqsave(&disk->zone_wplugs_lock, flags); + } + + spin_unlock_irqrestore(&disk->zone_wplugs_lock, flags); +} + +static inline unsigned int disk_zone_wplugs_hash_size(struct gendisk *disk) +{ + return 1U << disk->zone_wplugs_hash_bits; +} + +void disk_init_zone_resources(struct gendisk *disk) +{ + spin_lock_init(&disk->zone_wplugs_lock); + INIT_LIST_HEAD(&disk->zone_wplugs_err_list); + INIT_WORK(&disk->zone_wplugs_work, disk_zone_wplugs_work); +} + +/* + * For the size of a disk zone write plug hash table, use the size of the + * zone write plug mempool, which is the maximum of the disk open zones and + * active zones limits. But do not exceed 4KB (512 hlist head entries), that is, + * 9 bits. For a disk that has no limits, mempool size defaults to 128. + */ +#define BLK_ZONE_WPLUG_MAX_HASH_BITS 9 +#define BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE 128 + +static int disk_alloc_zone_resources(struct gendisk *disk, + unsigned int pool_size) +{ + unsigned int i; + + disk->zone_wplugs_hash_bits = + min(ilog2(pool_size) + 1, BLK_ZONE_WPLUG_MAX_HASH_BITS); + + disk->zone_wplugs_hash = + kcalloc(disk_zone_wplugs_hash_size(disk), + sizeof(struct hlist_head), GFP_KERNEL); + if (!disk->zone_wplugs_hash) + return -ENOMEM; + + for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) + INIT_HLIST_HEAD(&disk->zone_wplugs_hash[i]); + + disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size, + sizeof(struct blk_zone_wplug)); + if (!disk->zone_wplugs_pool) { + kfree(disk->zone_wplugs_hash); + disk->zone_wplugs_hash = NULL; + disk->zone_wplugs_hash_bits = 0; + return -ENOMEM; + } + + return 0; +} + +static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) +{ + struct blk_zone_wplug *zwplug; + unsigned int i; + + if (!disk->zone_wplugs_hash) + return; + + /* Free all the zone write plugs we have. */ + for (i = 0; i < disk_zone_wplugs_hash_size(disk); i++) { + while (!hlist_empty(&disk->zone_wplugs_hash[i])) { + zwplug = hlist_entry(disk->zone_wplugs_hash[i].first, + struct blk_zone_wplug, node); + atomic_inc(&zwplug->ref); + disk_remove_zone_wplug(disk, zwplug); + disk_put_zone_wplug(zwplug); + } + } + + kfree(disk->zone_wplugs_hash); + disk->zone_wplugs_hash = NULL; + disk->zone_wplugs_hash_bits = 0; +} + +void disk_free_zone_resources(struct gendisk *disk) +{ + cancel_work_sync(&disk->zone_wplugs_work); + + disk_destroy_zone_wplugs_hash_table(disk); + + /* + * Wait for the zone write plugs to be RCU-freed before + * destorying the mempool. + */ + rcu_barrier(); + + mempool_destroy(disk->zone_wplugs_pool); + disk->zone_wplugs_pool = NULL; + kfree(disk->conv_zones_bitmap); disk->conv_zones_bitmap = NULL; kfree(disk->seq_zones_wlock); disk->seq_zones_wlock = NULL; + + disk->zone_capacity = 0; + disk->nr_zones = 0; +} + +static int disk_revalidate_zone_resources(struct gendisk *disk, + unsigned int nr_zones) +{ + struct queue_limits *lim = &disk->queue->limits; + unsigned int pool_size; + + /* + * If the device has no limit on the maximum number of open and active + * zones, use BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE. + */ + pool_size = max(lim->max_open_zones, lim->max_active_zones); + if (!pool_size) + pool_size = min(BLK_ZONE_WPLUG_DEFAULT_POOL_SIZE, nr_zones); + + if (!disk->zone_wplugs_hash) + return disk_alloc_zone_resources(disk, pool_size); + + /* Resize the zone write plug memory pool if needed. */ + if (disk->zone_wplugs_pool->min_nr != pool_size) + return mempool_resize(disk->zone_wplugs_pool, pool_size); + + return 0; } struct blk_revalidate_zone_args { @@ -453,6 +1514,9 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, struct request_queue *q = disk->queue; sector_t capacity = get_capacity(disk); sector_t zone_sectors = q->limits.chunk_sectors; + struct blk_zone_wplug *zwplug; + unsigned long flags; + unsigned int wp_offset; /* Check for bad zones and holes in the zone report */ if (zone->start != args->sector) { @@ -524,6 +1588,22 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, disk->disk_name); return -ENODEV; } + + /* + * We need to track the write pointer of all zones that are not + * empty nor full. So make sure we have a zone write plug for + * such zone. + */ + wp_offset = blk_zone_wp_offset(zone); + if (wp_offset && wp_offset < zone_sectors) { + zwplug = disk_get_and_lock_zone_wplug(disk, zone->start, + GFP_NOIO, &flags); + if (!zwplug) + return -ENOMEM; + spin_unlock_irqrestore(&zwplug->lock, flags); + disk_put_zone_wplug(zwplug); + } + break; case BLK_ZONE_TYPE_SEQWRITE_PREF: default: @@ -560,7 +1640,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk, sector_t capacity = get_capacity(disk); struct blk_revalidate_zone_args args = { }; unsigned int noio_flag; - int ret; + int ret = -ENOMEM; if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) return -EIO; @@ -593,6 +1673,11 @@ int blk_revalidate_disk_zones(struct gendisk *disk, args.disk = disk; args.nr_zones = (capacity + zone_sectors - 1) >> ilog2(zone_sectors); noio_flag = memalloc_noio_save(); + ret = disk_revalidate_zone_resources(disk, args.nr_zones); + if (ret) { + memalloc_noio_restore(noio_flag); + return ret; + } ret = disk->fops->report_zones(disk, 0, UINT_MAX, blk_revalidate_zone_cb, &args); if (!ret) { @@ -627,7 +1712,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk, ret = 0; } else { pr_warn("%s: failed to revalidate zones\n", disk->disk_name); - disk_free_zone_bitmaps(disk); + disk_free_zone_resources(disk); } blk_mq_unfreeze_queue(q); diff --git a/block/blk.h b/block/blk.h index bca50a9510c8..4df969f8fa28 100644 --- a/block/blk.h +++ b/block/blk.h @@ -415,7 +415,14 @@ static inline struct bio *blk_queue_bounce(struct bio *bio, } #ifdef CONFIG_BLK_DEV_ZONED -void disk_free_zone_bitmaps(struct gendisk *disk); +void disk_init_zone_resources(struct gendisk *disk); +void disk_free_zone_resources(struct gendisk *disk); +static inline bool bio_zone_write_plugging(struct bio *bio) +{ + return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING); +} +void blk_zone_write_plug_bio_merged(struct bio *bio); +void blk_zone_write_plug_attempt_merge(struct request *rq); static inline void blk_zone_update_request_bio(struct request *rq, struct bio *bio) { @@ -423,22 +430,60 @@ static inline void blk_zone_update_request_bio(struct request *rq, * For zone append requests, the request sector indicates the location * at which the BIO data was written. Return this value to the BIO * issuer through the BIO iter sector. + * For plugged zone writes, we need the original BIO sector so + * that blk_zone_write_plug_bio_endio() can lookup the zone write plug. */ - if (req_op(rq) == REQ_OP_ZONE_APPEND) + if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio)) bio->bi_iter.bi_sector = rq->__sector; } +void blk_zone_write_plug_bio_endio(struct bio *bio); +static inline void blk_zone_bio_endio(struct bio *bio) +{ + /* + * For write BIOs to zoned devices, signal the completion of the BIO so + * that the next write BIO can be submitted by zone write plugging. + */ + if (bio_zone_write_plugging(bio)) + blk_zone_write_plug_bio_endio(bio); +} + +void blk_zone_write_plug_complete_request(struct request *rq); +static inline void blk_zone_complete_request(struct request *rq) +{ + if (rq->rq_flags & RQF_ZONE_WRITE_PLUGGING) + blk_zone_write_plug_complete_request(rq); +} int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg); int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, unsigned int cmd, unsigned long arg); #else /* CONFIG_BLK_DEV_ZONED */ -static inline void disk_free_zone_bitmaps(struct gendisk *disk) +static inline void disk_init_zone_resources(struct gendisk *disk) +{ +} +static inline void disk_free_zone_resources(struct gendisk *disk) +{ +} +static inline bool bio_zone_write_plugging(struct bio *bio) +{ + return false; +} +static inline void blk_zone_write_plug_bio_merged(struct bio *bio) +{ +} +static inline void blk_zone_write_plug_attempt_merge(struct request *rq) { } static inline void blk_zone_update_request_bio(struct request *rq, struct bio *bio) { } +static inline void blk_zone_bio_endio(struct bio *bio) +{ +} +static inline void blk_zone_complete_request(struct request *rq) +{ +} static inline int blkdev_report_zones_ioctl(struct block_device *bdev, unsigned int cmd, unsigned long arg) { diff --git a/block/genhd.c b/block/genhd.c index bb29a68e1d67..eb893df56d51 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1182,7 +1182,7 @@ static void disk_release(struct device *dev) disk_release_events(disk); kfree(disk->random); - disk_free_zone_bitmaps(disk); + disk_free_zone_resources(disk); xa_destroy(&disk->part_tbl); disk->queue->disk = NULL; @@ -1364,6 +1364,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, if (blkcg_init_disk(disk)) goto out_erase_part0; + disk_init_zone_resources(disk); rand_initialize_disk(disk); disk_to_dev(disk)->class = &block_class; disk_to_dev(disk)->type = &disk_type; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index d3d8fd8e229b..60090c8366fb 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -56,6 +56,8 @@ typedef __u32 __bitwise req_flags_t; #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) /* The per-zone write lock is held for this request */ #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) +/* The request completion needs to be signaled to zone write pluging. */ +#define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20)) /* ->timeout has been called, don't expire again */ #define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21)) #define RQF_RESV ((__force req_flags_t)(1 << 23)) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index cb1526ec44b5..ed45de07d2ef 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -234,7 +234,12 @@ struct bio { struct bvec_iter bi_iter; - blk_qc_t bi_cookie; + union { + /* for polled bios: */ + blk_qc_t bi_cookie; + /* for plugged zoned writes only: */ + unsigned int __bi_nr_segments; + }; bio_end_io_t *bi_end_io; void *bi_private; #ifdef CONFIG_BLK_CGROUP @@ -305,6 +310,7 @@ enum { BIO_QOS_MERGED, /* but went through rq_qos merge path */ BIO_REMAPPED, BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ + BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ BIO_FLAG_LAST }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4e81f714cca7..348b57ca0425 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -194,6 +194,12 @@ struct gendisk { unsigned int zone_capacity; unsigned long *conv_zones_bitmap; unsigned long *seq_zones_wlock; + unsigned int zone_wplugs_hash_bits; + spinlock_t zone_wplugs_lock; + struct mempool_s *zone_wplugs_pool; + struct hlist_head *zone_wplugs_hash; + struct list_head zone_wplugs_err_list; + struct work_struct zone_wplugs_work; #endif /* CONFIG_BLK_DEV_ZONED */ #if IS_ENABLED(CONFIG_CDROM) @@ -663,6 +669,7 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) return bdev->bd_disk->queue->limits.max_active_zones; } +bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs); #else /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int bdev_nr_zones(struct block_device *bdev) { @@ -690,6 +697,10 @@ static inline unsigned int bdev_max_active_zones(struct block_device *bdev) { return 0; } +static inline bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) +{ + return false; +} #endif /* CONFIG_BLK_DEV_ZONED */ static inline unsigned int blk_queue_depth(struct request_queue *q) -- cgit From ccdbf0aad2523ca133cceb22ce0f8306730e7ac3 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:09 +0900 Subject: block: Allow zero value of max_zone_append_sectors queue limit In preparation for adding a generic zone append emulation using zone write plugging, allow device drivers supporting zoned block device to set a the max_zone_append_sectors queue limit of a device to 0 to indicate the lack of native support for zone append operations and that the block layer should emulate these operations using regular write operations. blk_queue_max_zone_append_sectors() is modified to allow passing 0 as the max_zone_append_sectors argument. The function queue_max_zone_append_sectors() is also modified to ensure that the minimum of the max_hw_sectors and chunk_sectors limit is used whenever the max_zone_append_sectors limit is 0. This minimum is consistent with the value set for the max_zone_append_sectors limit by the function blk_validate_zoned_limits() when limits for a queue are validated. The helper functions queue_emulates_zone_append() and bdev_emulates_zone_append() are added to test if a queue (or block device) emulates zone append operations. In order for blk_revalidate_disk_zones() to accept zoned block devices relying on zone append emulation, the direct check to the max_zone_append_sectors queue limit of the disk is replaced by a check using the value returned by queue_max_zone_append_sectors(). Similarly, queue_zone_append_max_show() is modified to use the same accessor so that the sysfs attribute advertizes the non-zero limit that will be used, regardless if it is for native or emulated commands. For stacking drivers, a top device should not need to care if the underlying devices have native or emulated zone append operations. blk_stack_limits() is thus modified to set the top device max_zone_append_sectors limit using the new accessor queue_limits_max_zone_append_sectors(). queue_max_zone_append_sectors() is modified to use this function as well. Stacking drivers that require zone append emulation, e.g. dm-crypt, can still request this feature by calling blk_queue_max_zone_append_sectors() with a 0 limit. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240408014128.205141-10-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-core.c | 2 +- block/blk-settings.c | 30 +++++++++++++++++++----------- block/blk-sysfs.c | 2 +- block/blk-zoned.c | 2 +- include/linux/blkdev.h | 23 ++++++++++++++++++++--- 5 files changed, 42 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index a16b5abdbbf5..3bf28149e104 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -602,7 +602,7 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q, return BLK_STS_IOERR; /* Make sure the BIO is small enough and will not get split */ - if (nr_sectors > q->limits.max_zone_append_sectors) + if (nr_sectors > queue_max_zone_append_sectors(q)) return BLK_STS_IOERR; bio->bi_opf |= REQ_NOMERGE; diff --git a/block/blk-settings.c b/block/blk-settings.c index cdbaef159c4b..c0197e1e7485 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -412,24 +412,32 @@ EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors); * blk_queue_max_zone_append_sectors - set max sectors for a single zone append * @q: the request queue for the device * @max_zone_append_sectors: maximum number of sectors to write per command + * + * Sets the maximum number of sectors allowed for zone append commands. If + * Specifying 0 for @max_zone_append_sectors indicates that the queue does + * not natively support zone append operations and that the block layer must + * emulate these operations using regular writes. **/ void blk_queue_max_zone_append_sectors(struct request_queue *q, unsigned int max_zone_append_sectors) { - unsigned int max_sectors; + unsigned int max_sectors = 0; if (WARN_ON(!blk_queue_is_zoned(q))) return; - max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors); - max_sectors = min(q->limits.chunk_sectors, max_sectors); + if (max_zone_append_sectors) { + max_sectors = min(q->limits.max_hw_sectors, + max_zone_append_sectors); + max_sectors = min(q->limits.chunk_sectors, max_sectors); - /* - * Signal eventual driver bugs resulting in the max_zone_append sectors limit - * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set, - * or the max_hw_sectors limit not set. - */ - WARN_ON(!max_sectors); + /* + * Signal eventual driver bugs resulting in the max_zone_append + * sectors limit being 0 due to the chunk_sectors limit (zone + * size) not set or the max_hw_sectors limit not set. + */ + WARN_ON_ONCE(!max_sectors); + } q->limits.max_zone_append_sectors = max_sectors; } @@ -756,8 +764,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors); t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors, b->max_write_zeroes_sectors); - t->max_zone_append_sectors = min(t->max_zone_append_sectors, - b->max_zone_append_sectors); + t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t), + queue_limits_max_zone_append_sectors(b)); t->bounce = max(t->bounce, b->bounce); t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8c8f69d8ba48..e3ed5a921aff 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -224,7 +224,7 @@ static ssize_t queue_zone_write_granularity_show(struct request_queue *q, static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) { - unsigned long long max_sectors = q->limits.max_zone_append_sectors; + unsigned long long max_sectors = queue_max_zone_append_sectors(q); return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); } diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 4b21a1ec00d4..fcc1284b7c19 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -1692,7 +1692,7 @@ int blk_revalidate_disk_zones(struct gendisk *disk, return -ENODEV; } - if (!q->limits.max_zone_append_sectors) { + if (!queue_max_zone_append_sectors(q)) { pr_warn("%s: Invalid 0 maximum zone append limit\n", disk->disk_name); return -ENODEV; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 348b57ca0425..46613bf6a402 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1173,12 +1173,29 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned int queue_max_zone_append_sectors(const struct request_queue *q) +static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l) { + unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); - const struct queue_limits *l = &q->limits; + return min_not_zero(l->max_zone_append_sectors, max_sectors); +} + +static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) +{ + if (!blk_queue_is_zoned(q)) + return 0; - return min(l->max_zone_append_sectors, l->max_sectors); + return queue_limits_max_zone_append_sectors(&q->limits); +} + +static inline bool queue_emulates_zone_append(struct request_queue *q) +{ + return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; +} + +static inline bool bdev_emulates_zone_append(struct block_device *bdev) +{ + return queue_emulates_zone_append(bdev_get_queue(bdev)); } static inline unsigned int -- cgit From 9b1ce7f0c6f82e241196febabddba5fab66c8f05 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:10 +0900 Subject: block: Implement zone append emulation Given that zone write plugging manages all writes to zones of a zoned block device and tracks the write pointer position of all zones that are not full nor empty, emulating zone append operations using regular writes can be implemented generically, without relying on the underlying device driver to implement such emulation. This is needed for devices that do not natively support the zone append command (e.g. SMR hard-disks). A device may request zone append emulation by setting its max_zone_append_sectors queue limit to 0. For such device, the function blk_zone_wplug_prepare_bio() changes zone append BIOs into non-mergeable regular write BIOs. Modified zone append BIOs are flagged with the new BIO flag BIO_EMULATES_ZONE_APPEND. This flag is checked on completion of the BIO in blk_zone_write_plug_bio_endio() to restore the original REQ_OP_ZONE_APPEND operation code of the BIO. The block layer internal inline helper function bio_is_zone_append() is added to test if a BIO is either a native zone append operation (REQ_OP_ZONE_APPEND operation code) or if it is flagged with BIO_EMULATES_ZONE_APPEND. Given that both native and emulated zone append BIO completion handling should be similar, The functions blk_update_request() and blk_zone_complete_request_bio() are modified to use bio_is_zone_append() to execute blk_zone_update_request_bio() for both native and emulated zone append operations. This commit contains contributions from Christoph Hellwig . Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240408014128.205141-11-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-mq.c | 3 +-- block/blk-zoned.c | 64 +++++++++++++++++++++++++++++++++++++++-------- block/blk.h | 14 +++++++++-- include/linux/blk_types.h | 1 + 4 files changed, 67 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 48eb7dd049d1..6f9cc1c4d4fb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -906,8 +906,7 @@ bool blk_update_request(struct request *req, blk_status_t error, if (bio_bytes == bio->bi_iter.bi_size) { req->bio = bio->bi_next; - } else if (req_op(req) == REQ_OP_ZONE_APPEND && - error == BLK_STS_OK) { + } else if (bio_is_zone_append(bio) && error == BLK_STS_OK) { /* * Partial zone append completions cannot be supported * as the BIO fragments may end up not being written diff --git a/block/blk-zoned.c b/block/blk-zoned.c index fcc1284b7c19..a60ac5b3e637 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -689,7 +689,8 @@ static void disk_zone_wplug_abort_unaligned(struct gendisk *disk, while ((bio = bio_list_pop(&zwplug->bio_list))) { if (wp_offset >= zone_capacity || - bio_offset_from_zone_start(bio) != wp_offset) { + (bio_op(bio) != REQ_OP_ZONE_APPEND && + bio_offset_from_zone_start(bio) != wp_offset)) { blk_zone_wplug_bio_io_error(bio); disk_put_zone_wplug(zwplug); continue; @@ -951,7 +952,8 @@ static inline void disk_zone_wplug_set_error(struct gendisk *disk, /* * Check and prepare a BIO for submission by incrementing the write pointer - * offset of its zone write plug. + * offset of its zone write plug and changing zone append operations into + * regular write when zone append emulation is needed. */ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug, struct bio *bio) @@ -966,13 +968,30 @@ static bool blk_zone_wplug_prepare_bio(struct blk_zone_wplug *zwplug, if (zwplug->wp_offset >= disk->zone_capacity) goto err; - /* - * Check for non-sequential writes early because we avoid a - * whole lot of error handling trouble if we don't send it off - * to the driver. - */ - if (bio_offset_from_zone_start(bio) != zwplug->wp_offset) - goto err; + if (bio_op(bio) == REQ_OP_ZONE_APPEND) { + /* + * Use a regular write starting at the current write pointer. + * Similarly to native zone append operations, do not allow + * merging. + */ + bio->bi_opf &= ~REQ_OP_MASK; + bio->bi_opf |= REQ_OP_WRITE | REQ_NOMERGE; + bio->bi_iter.bi_sector += zwplug->wp_offset; + + /* + * Remember that this BIO is in fact a zone append operation + * so that we can restore its operation code on completion. + */ + bio_set_flag(bio, BIO_EMULATES_ZONE_APPEND); + } else { + /* + * Check for non-sequential writes early because we avoid a + * whole lot of error handling trouble if we don't send it off + * to the driver. + */ + if (bio_offset_from_zone_start(bio) != zwplug->wp_offset) + goto err; + } /* Advance the zone write pointer offset. */ zwplug->wp_offset += bio_sectors(bio); @@ -1008,8 +1027,14 @@ static bool blk_zone_wplug_handle_write(struct bio *bio, unsigned int nr_segs) } /* Conventional zones do not need write plugging. */ - if (disk_zone_is_conv(disk, sector)) + if (disk_zone_is_conv(disk, sector)) { + /* Zone append to conventional zones is not allowed. */ + if (bio_op(bio) == REQ_OP_ZONE_APPEND) { + bio_io_error(bio); + return true; + } return false; + } if (bio->bi_opf & REQ_NOWAIT) gfp_mask = GFP_NOWAIT; @@ -1057,7 +1082,8 @@ plug: * @bio: The BIO being submitted * @nr_segs: The number of physical segments of @bio * - * Handle write and write zeroes operations using zone write plugging. + * Handle write, write zeroes and zone append operations requiring emulation + * using zone write plugging. * * Return true whenever @bio execution needs to be delayed through the zone * write plug. Otherwise, return false to let the submission path process @@ -1096,6 +1122,9 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) * machinery operates at the request level, below the plug, and * completion of the flush sequence will go through the regular BIO * completion, which will handle zone write plugging. + * Zone append operations for devices that requested emulation must + * also be plugged so that these BIOs can be changed into regular + * write BIOs. * Zone reset, reset all and finish commands need special treatment * to correctly track the write pointer offset of zones. These commands * are not plugged as we do not need serialization with write @@ -1103,6 +1132,10 @@ bool blk_zone_plug_bio(struct bio *bio, unsigned int nr_segs) * and finish commands when write operations are in flight. */ switch (bio_op(bio)) { + case REQ_OP_ZONE_APPEND: + if (!bdev_emulates_zone_append(bdev)) + return false; + fallthrough; case REQ_OP_WRITE: case REQ_OP_WRITE_ZEROES: return blk_zone_wplug_handle_write(bio, nr_segs); @@ -1171,6 +1204,15 @@ void blk_zone_write_plug_bio_endio(struct bio *bio) /* Make sure we do not see this BIO again by clearing the plug flag. */ bio_clear_flag(bio, BIO_ZONE_WRITE_PLUGGING); + /* + * If this is a regular write emulating a zone append operation, + * restore the original operation code. + */ + if (bio_flagged(bio, BIO_EMULATES_ZONE_APPEND)) { + bio->bi_opf &= ~REQ_OP_MASK; + bio->bi_opf |= REQ_OP_ZONE_APPEND; + } + /* * If the BIO failed, mark the plug as having an error to trigger * recovery. diff --git a/block/blk.h b/block/blk.h index 4df969f8fa28..1140c4a0be03 100644 --- a/block/blk.h +++ b/block/blk.h @@ -421,6 +421,11 @@ static inline bool bio_zone_write_plugging(struct bio *bio) { return bio_flagged(bio, BIO_ZONE_WRITE_PLUGGING); } +static inline bool bio_is_zone_append(struct bio *bio) +{ + return bio_op(bio) == REQ_OP_ZONE_APPEND || + bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); +} void blk_zone_write_plug_bio_merged(struct bio *bio); void blk_zone_write_plug_attempt_merge(struct request *rq); static inline void blk_zone_update_request_bio(struct request *rq, @@ -430,8 +435,9 @@ static inline void blk_zone_update_request_bio(struct request *rq, * For zone append requests, the request sector indicates the location * at which the BIO data was written. Return this value to the BIO * issuer through the BIO iter sector. - * For plugged zone writes, we need the original BIO sector so - * that blk_zone_write_plug_bio_endio() can lookup the zone write plug. + * For plugged zone writes, which include emulated zone append, we need + * the original BIO sector so that blk_zone_write_plug_bio_endio() can + * lookup the zone write plug. */ if (req_op(rq) == REQ_OP_ZONE_APPEND || bio_zone_write_plugging(bio)) bio->bi_iter.bi_sector = rq->__sector; @@ -468,6 +474,10 @@ static inline bool bio_zone_write_plugging(struct bio *bio) { return false; } +static inline bool bio_is_zone_append(struct bio *bio) +{ + return false; +} static inline void blk_zone_write_plug_bio_merged(struct bio *bio) { } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ed45de07d2ef..29b3170431e7 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -311,6 +311,7 @@ enum { BIO_REMAPPED, BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ + BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */ BIO_FLAG_LAST }; -- cgit From 63b5385e781417e73bda3fd652c2199826afda6e Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:19 +0900 Subject: block: Remove BLK_STS_ZONE_RESOURCE The zone append emulation of the scsi disk driver was the only driver using BLK_STS_ZONE_RESOURCE. With this code removed, BLK_STS_ZONE_RESOURCE is now unused. Remove this macro definition and simplify blk_mq_dispatch_rq_list() where this status code was handled. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240408014128.205141-20-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-mq.c | 26 -------------------------- drivers/scsi/scsi_lib.c | 1 - include/linux/blk_types.h | 20 ++++---------------- 3 files changed, 4 insertions(+), 43 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq.c b/block/blk-mq.c index 6f9cc1c4d4fb..9f2d9970eeba 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1921,19 +1921,6 @@ static void blk_mq_handle_dev_resource(struct request *rq, __blk_mq_requeue_request(rq); } -static void blk_mq_handle_zone_resource(struct request *rq, - struct list_head *zone_list) -{ - /* - * If we end up here it is because we cannot dispatch a request to a - * specific zone due to LLD level zone-write locking or other zone - * related resource not being available. In this case, set the request - * aside in zone_list for retrying it later. - */ - list_add(&rq->queuelist, zone_list); - __blk_mq_requeue_request(rq); -} - enum prep_dispatch { PREP_DISPATCH_OK, PREP_DISPATCH_NO_TAG, @@ -2019,7 +2006,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, struct request *rq; int queued; blk_status_t ret = BLK_STS_OK; - LIST_HEAD(zone_list); bool needs_resource = false; if (list_empty(list)) @@ -2061,23 +2047,11 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list, case BLK_STS_DEV_RESOURCE: blk_mq_handle_dev_resource(rq, list); goto out; - case BLK_STS_ZONE_RESOURCE: - /* - * Move the request to zone_list and keep going through - * the dispatch list to find more requests the drive can - * accept. - */ - blk_mq_handle_zone_resource(rq, &zone_list); - needs_resource = true; - break; default: blk_mq_end_request(rq, ret); } } while (!list_empty(list)); out: - if (!list_empty(&zone_list)) - list_splice_tail_init(&zone_list, list); - /* If we didn't flush the entire list, we could have told the driver * there was more coming, but that turned out to be a lie. */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 2e28e2360c85..9ca96116bd33 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1870,7 +1870,6 @@ out_put_budget: case BLK_STS_OK: break; case BLK_STS_RESOURCE: - case BLK_STS_ZONE_RESOURCE: if (scsi_device_blocked(sdev)) ret = BLK_STS_DEV_RESOURCE; break; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 29b3170431e7..ffe0c112b128 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -136,18 +136,6 @@ typedef u16 blk_short_t; */ #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) -/* - * BLK_STS_ZONE_RESOURCE is returned from the driver to the block layer if zone - * related resources are unavailable, but the driver can guarantee the queue - * will be rerun in the future once the resources become available again. - * - * This is different from BLK_STS_DEV_RESOURCE in that it explicitly references - * a zone specific resource and IO to a different zone on the same device could - * still be served. Examples of that are zones that are write-locked, but a read - * to the same zone could be served. - */ -#define BLK_STS_ZONE_RESOURCE ((__force blk_status_t)14) - /* * BLK_STS_ZONE_OPEN_RESOURCE is returned from the driver in the completion * path if the device returns a status indicating that too many zone resources @@ -155,7 +143,7 @@ typedef u16 blk_short_t; * after the number of open zones decreases below the device's limits, which is * reported in the request_queue's max_open_zones. */ -#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)15) +#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14) /* * BLK_STS_ZONE_ACTIVE_RESOURCE is returned from the driver in the completion @@ -164,20 +152,20 @@ typedef u16 blk_short_t; * after the number of active zones decreases below the device's limits, which * is reported in the request_queue's max_active_zones. */ -#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)16) +#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15) /* * BLK_STS_OFFLINE is returned from the driver when the target device is offline * or is being taken offline. This could help differentiate the case where a * device is intentionally being shut down from a real I/O error. */ -#define BLK_STS_OFFLINE ((__force blk_status_t)17) +#define BLK_STS_OFFLINE ((__force blk_status_t)16) /* * BLK_STS_DURATION_LIMIT is returned from the driver when the target device * aborted the command because it exceeded one of its Command Duration Limits. */ -#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)18) +#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17) /** * blk_path_error - returns true if error may be path related -- cgit From 9b3c08b90fc212de58c34621d83e74977170b2cd Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:20 +0900 Subject: block: Simplify blk_revalidate_disk_zones() interface The only user of blk_revalidate_disk_zones() second argument was the SCSI disk driver (sd). Now that this driver does not require this update_driver_data argument, remove it to simplify the interface of blk_revalidate_disk_zones(). Also update the function kdoc comment to be more accurate (i.e. there is no gendisk ->revalidate method). Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240408014128.205141-21-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-zoned.c | 21 +++++++-------------- drivers/block/null_blk/zoned.c | 2 +- drivers/block/ublk_drv.c | 2 +- drivers/block/virtio_blk.c | 2 +- drivers/md/dm-zone.c | 2 +- drivers/nvme/host/core.c | 2 +- drivers/scsi/sd_zbc.c | 2 +- include/linux/blkdev.h | 3 +-- 8 files changed, 14 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/block/blk-zoned.c b/block/blk-zoned.c index da0fc7e2d00a..e46d23ad2fa9 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -1713,21 +1713,17 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, /** * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps * @disk: Target disk - * @update_driver_data: Callback to update driver data on the frozen disk * - * Helper function for low-level device drivers to check and (re) allocate and - * initialize a disk request queue zone bitmaps. This functions should normally - * be called within the disk ->revalidate method for blk-mq based drivers. + * Helper function for low-level device drivers to check, (re) allocate and + * initialize resources used for managing zoned disks. This function should + * normally be called by blk-mq based drivers when a zoned gendisk is probed + * and when the zone configuration of the gendisk changes (e.g. after a format). * Before calling this function, the device driver must already have set the * device zone size (chunk_sector limit) and the max zone append limit. * BIO based drivers can also use this function as long as the device queue * can be safely frozen. - * If the @update_driver_data callback function is not NULL, the callback is - * executed with the device request queue frozen after all zones have been - * checked. */ -int blk_revalidate_disk_zones(struct gendisk *disk, - void (*update_driver_data)(struct gendisk *disk)) +int blk_revalidate_disk_zones(struct gendisk *disk) { struct request_queue *q = disk->queue; sector_t zone_sectors = q->limits.chunk_sectors; @@ -1794,13 +1790,10 @@ int blk_revalidate_disk_zones(struct gendisk *disk, * referencing the bitmaps). */ blk_mq_freeze_queue(q); - if (ret > 0) { + if (ret > 0) ret = disk_update_zone_resources(disk, &args); - if (update_driver_data) - update_driver_data(disk); - } else { + else pr_warn("%s: failed to revalidate zones\n", disk->disk_name); - } if (ret) disk_free_zone_resources(disk); blk_mq_unfreeze_queue(q); diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 0b2af273adaf..4ddd84752557 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -177,7 +177,7 @@ int null_register_zoned_dev(struct nullb *nullb) disk->disk_name, queue_emulates_zone_append(q) ? "emulated" : "native"); - return blk_revalidate_disk_zones(disk, NULL); + return blk_revalidate_disk_zones(disk); } void null_free_zoned_dev(struct nullb_device *dev) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index ab6af84e327c..851c78913de2 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -221,7 +221,7 @@ static int ublk_get_nr_zones(const struct ublk_device *ub) static int ublk_revalidate_disk_zones(struct ublk_device *ub) { - return blk_revalidate_disk_zones(ub->ub_disk, NULL); + return blk_revalidate_disk_zones(ub->ub_disk); } static int ublk_dev_param_zoned_validate(const struct ublk_device *ub) diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 42dea7601d87..c1af0a7d56c8 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -1543,7 +1543,7 @@ static int virtblk_probe(struct virtio_device *vdev) */ if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && lim.zoned) { blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, vblk->disk->queue); - err = blk_revalidate_disk_zones(vblk->disk, NULL); + err = blk_revalidate_disk_zones(vblk->disk); if (err) goto out_cleanup_disk; } diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 34769f3e3175..d17ae4486a6a 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -169,7 +169,7 @@ static int dm_revalidate_zones(struct mapped_device *md, struct dm_table *t) * our table for dm_blk_report_zones() to use directly. */ md->zone_revalidate_map = t; - ret = blk_revalidate_disk_zones(disk, NULL); + ret = blk_revalidate_disk_zones(disk); md->zone_revalidate_map = NULL; if (ret) { diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 943d72bdd794..c9955ecd1790 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2150,7 +2150,7 @@ static int nvme_update_ns_info_block(struct nvme_ns *ns, blk_mq_unfreeze_queue(ns->disk->queue); if (blk_queue_is_zoned(ns->queue)) { - ret = blk_revalidate_disk_zones(ns->disk, NULL); + ret = blk_revalidate_disk_zones(ns->disk); if (ret && !nvme_first_scan(ns->disk)) goto out; } diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index d0ead9858954..806036e48abe 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -572,7 +572,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) blk_queue_max_zone_append_sectors(q, 0); flags = memalloc_noio_save(); - ret = blk_revalidate_disk_zones(disk, NULL); + ret = blk_revalidate_disk_zones(disk); memalloc_noio_restore(flags); if (ret) { sdkp->zone_info = (struct zoned_disk_info){ }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 46613bf6a402..fbc6860b3622 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -336,8 +336,7 @@ int blkdev_report_zones(struct block_device *bdev, sector_t sector, unsigned int nr_zones, report_zones_cb cb, void *data); int blkdev_zone_mgmt(struct block_device *bdev, enum req_op op, sector_t sectors, sector_t nr_sectors); -int blk_revalidate_disk_zones(struct gendisk *disk, - void (*update_driver_data)(struct gendisk *disk)); +int blk_revalidate_disk_zones(struct gendisk *disk); /* * Independent access ranges: struct blk_independent_access_range describes -- cgit From e4eb37cc0f3ed8971c50dddfbeb35a799e5b504e Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:22 +0900 Subject: block: Remove elevator required features The only elevator feature ever implemented is ELEVATOR_F_ZBD_SEQ_WRITE for signaling that a scheduler implements zone write locking to tightly control the dispatching order of write operations to zoned block devices. With the removal of zone write locking support in mq-deadline and the reliance of all block device drivers on the block layer zone write plugging to control ordering of write operations to zones, the elevator feature ELEVATOR_F_ZBD_SEQ_WRITE is completely unused. Remove it, and also remove the now unused code for filtering the possible schedulers for a block device based on required features. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240408014128.205141-23-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-settings.c | 16 ---------------- block/elevator.c | 46 +++++----------------------------------------- block/elevator.h | 1 - include/linux/blkdev.h | 10 ---------- 4 files changed, 5 insertions(+), 68 deletions(-) (limited to 'include/linux') diff --git a/block/blk-settings.c b/block/blk-settings.c index c0197e1e7485..715f4b6356c4 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -1052,22 +1052,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) } EXPORT_SYMBOL_GPL(blk_queue_write_cache); -/** - * blk_queue_required_elevator_features - Set a queue required elevator features - * @q: the request queue for the target device - * @features: Required elevator features OR'ed together - * - * Tell the block layer that for the device controlled through @q, only the - * only elevators that can be used are those that implement at least the set of - * features specified by @features. - */ -void blk_queue_required_elevator_features(struct request_queue *q, - unsigned int features) -{ - q->required_elevator_features = features; -} -EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features); - /** * blk_queue_can_use_dma_map_merging - configure queue for merging segments. * @q: the request queue for the device diff --git a/block/elevator.c b/block/elevator.c index 5ff093cb3cf8..f64ebd726e58 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -83,13 +83,6 @@ bool elv_bio_merge_ok(struct request *rq, struct bio *bio) } EXPORT_SYMBOL(elv_bio_merge_ok); -static inline bool elv_support_features(struct request_queue *q, - const struct elevator_type *e) -{ - return (q->required_elevator_features & e->elevator_features) == - q->required_elevator_features; -} - /** * elevator_match - Check whether @e's name or alias matches @name * @e: Scheduler to test @@ -120,7 +113,7 @@ static struct elevator_type *elevator_find_get(struct request_queue *q, spin_lock(&elv_list_lock); e = __elevator_find(name); - if (e && (!elv_support_features(q, e) || !elevator_tryget(e))) + if (e && (!elevator_tryget(e))) e = NULL; spin_unlock(&elv_list_lock); return e; @@ -580,34 +573,8 @@ static struct elevator_type *elevator_get_default(struct request_queue *q) } /* - * Get the first elevator providing the features required by the request queue. - * Default to "none" if no matching elevator is found. - */ -static struct elevator_type *elevator_get_by_features(struct request_queue *q) -{ - struct elevator_type *e, *found = NULL; - - spin_lock(&elv_list_lock); - - list_for_each_entry(e, &elv_list, list) { - if (elv_support_features(q, e)) { - found = e; - break; - } - } - - if (found && !elevator_tryget(found)) - found = NULL; - - spin_unlock(&elv_list_lock); - return found; -} - -/* - * For a device queue that has no required features, use the default elevator - * settings. Otherwise, use the first elevator available matching the required - * features. If no suitable elevator is find or if the chosen elevator - * initialization fails, fall back to the "none" elevator (no elevator). + * Use the default elevator settings. If the chosen elevator initialization + * fails, fall back to the "none" elevator (no elevator). */ void elevator_init_mq(struct request_queue *q) { @@ -622,10 +589,7 @@ void elevator_init_mq(struct request_queue *q) if (unlikely(q->elevator)) return; - if (!q->required_elevator_features) - e = elevator_get_default(q); - else - e = elevator_get_by_features(q); + e = elevator_get_default(q); if (!e) return; @@ -781,7 +745,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name) list_for_each_entry(e, &elv_list, list) { if (e == cur) len += sprintf(name+len, "[%s] ", e->elevator_name); - else if (elv_support_features(q, e)) + else len += sprintf(name+len, "%s ", e->elevator_name); } spin_unlock(&elv_list_lock); diff --git a/block/elevator.h b/block/elevator.h index 7ca3d7b6ed82..e9a050a96e53 100644 --- a/block/elevator.h +++ b/block/elevator.h @@ -74,7 +74,6 @@ struct elevator_type struct elv_fs_entry *elevator_attrs; const char *elevator_name; const char *elevator_alias; - const unsigned int elevator_features; struct module *elevator_owner; #ifdef CONFIG_BLK_DEBUG_FS const struct blk_mq_debugfs_attr *queue_debugfs_attrs; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index fbc6860b3622..017e9d064177 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -453,8 +453,6 @@ struct request_queue { atomic_t nr_active_requests_shared_tags; - unsigned int required_elevator_features; - struct blk_mq_tags *sched_shared_tags; struct list_head icq_list; @@ -958,14 +956,6 @@ disk_alloc_independent_access_ranges(struct gendisk *disk, int nr_ia_ranges); void disk_set_independent_access_ranges(struct gendisk *disk, struct blk_independent_access_ranges *iars); -/* - * Elevator features for blk_queue_required_elevator_features: - */ -/* Supports zoned block devices sequential write constraint */ -#define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0) - -extern void blk_queue_required_elevator_features(struct request_queue *q, - unsigned int features); extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev); -- cgit From 02ccd7c360b1692da164842f211d41fab7d83adc Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:26 +0900 Subject: block: Remove zone write locking Zone write locking is now unused and replaced with zone write plugging. Remove all code that was implementing zone write locking, that is, the various helper functions controlling request zone write locking and the gendisk attached zone bitmaps. Signed-off-by: Damien Le Moal Reviewed-by: Bart Van Assche Reviewed-by: Christoph Hellwig Reviewed-by: Hannes Reinecke Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240408014128.205141-27-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-mq-debugfs.c | 1 - block/blk-zoned.c | 66 ++----------------------------------- include/linux/blk-mq.h | 83 ----------------------------------------------- include/linux/blk_types.h | 1 - include/linux/blkdev.h | 35 +++----------------- 5 files changed, 7 insertions(+), 179 deletions(-) (limited to 'include/linux') diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index ca1f2b9422d5..770c0c2b72fa 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -256,7 +256,6 @@ static const char *const rqf_name[] = { RQF_NAME(HASHED), RQF_NAME(STATS), RQF_NAME(SPECIAL_PAYLOAD), - RQF_NAME(ZONE_WRITE_LOCKED), RQF_NAME(TIMED_OUT), RQF_NAME(RESV), }; diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 44699b431ad0..3befebe6b319 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -115,52 +115,6 @@ const char *blk_zone_cond_str(enum blk_zone_cond zone_cond) } EXPORT_SYMBOL_GPL(blk_zone_cond_str); -/* - * Return true if a request is a write requests that needs zone write locking. - */ -bool blk_req_needs_zone_write_lock(struct request *rq) -{ - if (!rq->q->disk->seq_zones_wlock) - return false; - - return blk_rq_is_seq_zoned_write(rq); -} -EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); - -bool blk_req_zone_write_trylock(struct request *rq) -{ - unsigned int zno = blk_rq_zone_no(rq); - - if (test_and_set_bit(zno, rq->q->disk->seq_zones_wlock)) - return false; - - WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); - rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; - - return true; -} -EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock); - -void __blk_req_zone_write_lock(struct request *rq) -{ - if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), - rq->q->disk->seq_zones_wlock))) - return; - - WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); - rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; -} -EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); - -void __blk_req_zone_write_unlock(struct request *rq) -{ - rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; - if (rq->q->disk->seq_zones_wlock) - WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), - rq->q->disk->seq_zones_wlock)); -} -EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); - /** * bdev_nr_zones - Get number of zones * @bdev: Target device @@ -1506,9 +1460,6 @@ void disk_free_zone_resources(struct gendisk *disk) kfree(disk->conv_zones_bitmap); disk->conv_zones_bitmap = NULL; - kfree(disk->seq_zones_wlock); - disk->seq_zones_wlock = NULL; - disk->zone_capacity = 0; disk->nr_zones = 0; } @@ -1556,7 +1507,6 @@ static int disk_revalidate_zone_resources(struct gendisk *disk, struct blk_revalidate_zone_args { struct gendisk *disk; unsigned long *conv_zones_bitmap; - unsigned long *seq_zones_wlock; unsigned int nr_zones; unsigned int zone_capacity; sector_t sector; @@ -1574,7 +1524,6 @@ static int disk_update_zone_resources(struct gendisk *disk, disk->nr_zones = args->nr_zones; disk->zone_capacity = args->zone_capacity; - swap(disk->seq_zones_wlock, args->seq_zones_wlock); swap(disk->conv_zones_bitmap, args->conv_zones_bitmap); /* @@ -1664,13 +1613,6 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, set_bit(idx, args->conv_zones_bitmap); break; case BLK_ZONE_TYPE_SEQWRITE_REQ: - if (!args->seq_zones_wlock) { - args->seq_zones_wlock = - blk_alloc_zone_bitmap(q->node, args->nr_zones); - if (!args->seq_zones_wlock) - return -ENOMEM; - } - /* * Remember the capacity of the first sequential zone and check * if it is constant for all zones. @@ -1712,7 +1654,7 @@ static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, } /** - * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps + * blk_revalidate_disk_zones - (re)allocate and initialize zone write plugs * @disk: Target disk * * Helper function for low-level device drivers to check, (re) allocate and @@ -1786,9 +1728,8 @@ int blk_revalidate_disk_zones(struct gendisk *disk) } /* - * Install the new bitmaps and update nr_zones only once the queue is - * stopped and all I/Os are completed (i.e. a scheduler is not - * referencing the bitmaps). + * Set the new disk zone parameters only once the queue is frozen and + * all I/Os are completed. */ blk_mq_freeze_queue(q); if (ret > 0) @@ -1799,7 +1740,6 @@ int blk_revalidate_disk_zones(struct gendisk *disk) disk_free_zone_resources(disk); blk_mq_unfreeze_queue(q); - kfree(args.seq_zones_wlock); kfree(args.conv_zones_bitmap); return ret; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 60090c8366fb..89ba6b16fe8b 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -54,8 +54,6 @@ typedef __u32 __bitwise req_flags_t; /* Look at ->special_vec for the actual data payload instead of the bio chain. */ #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) -/* The per-zone write lock is held for this request */ -#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) /* The request completion needs to be signaled to zone write pluging. */ #define RQF_ZONE_WRITE_PLUGGING ((__force req_flags_t)(1 << 20)) /* ->timeout has been called, don't expire again */ @@ -1152,85 +1150,4 @@ static inline int blk_rq_map_sg(struct request_queue *q, struct request *rq, } void blk_dump_rq_flags(struct request *, char *); -#ifdef CONFIG_BLK_DEV_ZONED -static inline unsigned int blk_rq_zone_no(struct request *rq) -{ - return disk_zone_no(rq->q->disk, blk_rq_pos(rq)); -} - -static inline unsigned int blk_rq_zone_is_seq(struct request *rq) -{ - return disk_zone_is_seq(rq->q->disk, blk_rq_pos(rq)); -} - -/** - * blk_rq_is_seq_zoned_write() - Check if @rq requires write serialization. - * @rq: Request to examine. - * - * Note: REQ_OP_ZONE_APPEND requests do not require serialization. - */ -static inline bool blk_rq_is_seq_zoned_write(struct request *rq) -{ - return op_needs_zoned_write_locking(req_op(rq)) && - blk_rq_zone_is_seq(rq); -} - -bool blk_req_needs_zone_write_lock(struct request *rq); -bool blk_req_zone_write_trylock(struct request *rq); -void __blk_req_zone_write_lock(struct request *rq); -void __blk_req_zone_write_unlock(struct request *rq); - -static inline void blk_req_zone_write_lock(struct request *rq) -{ - if (blk_req_needs_zone_write_lock(rq)) - __blk_req_zone_write_lock(rq); -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ - if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) - __blk_req_zone_write_unlock(rq); -} - -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return rq->q->disk->seq_zones_wlock && - test_bit(blk_rq_zone_no(rq), rq->q->disk->seq_zones_wlock); -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - if (!blk_req_needs_zone_write_lock(rq)) - return true; - return !blk_req_zone_is_write_locked(rq); -} -#else /* CONFIG_BLK_DEV_ZONED */ -static inline bool blk_rq_is_seq_zoned_write(struct request *rq) -{ - return false; -} - -static inline bool blk_req_needs_zone_write_lock(struct request *rq) -{ - return false; -} - -static inline void blk_req_zone_write_lock(struct request *rq) -{ -} - -static inline void blk_req_zone_write_unlock(struct request *rq) -{ -} -static inline bool blk_req_zone_is_write_locked(struct request *rq) -{ - return false; -} - -static inline bool blk_req_can_dispatch_to_zone(struct request *rq) -{ - return true; -} -#endif /* CONFIG_BLK_DEV_ZONED */ - #endif /* BLK_MQ_H */ diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ffe0c112b128..5751292fee6a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -297,7 +297,6 @@ enum { BIO_QOS_THROTTLED, /* bio went through rq_qos throttle path */ BIO_QOS_MERGED, /* but went through rq_qos merge path */ BIO_REMAPPED, - BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ BIO_ZONE_WRITE_PLUGGING, /* bio handled through zone write plugging */ BIO_EMULATES_ZONE_APPEND, /* bio emulates a zone append operation */ BIO_FLAG_LAST diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 017e9d064177..7e8a68805324 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -177,23 +177,14 @@ struct gendisk { #ifdef CONFIG_BLK_DEV_ZONED /* - * Zoned block device information for request dispatch control. - * nr_zones is the total number of zones of the device. This is always - * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones - * bits which indicates if a zone is conventional (bit set) or - * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones - * bits which indicates if a zone is write locked, that is, if a write - * request targeting the zone was dispatched. - * - * Reads of this information must be protected with blk_queue_enter() / - * blk_queue_exit(). Modifying this information is only allowed while - * no requests are being processed. See also blk_mq_freeze_queue() and - * blk_mq_unfreeze_queue(). + * Zoned block device information. Reads of this information must be + * protected with blk_queue_enter() / blk_queue_exit(). Modifying this + * information is only allowed while no requests are being processed. + * See also blk_mq_freeze_queue() and blk_mq_unfreeze_queue(). */ unsigned int nr_zones; unsigned int zone_capacity; unsigned long *conv_zones_bitmap; - unsigned long *seq_zones_wlock; unsigned int zone_wplugs_hash_bits; spinlock_t zone_wplugs_lock; struct mempool_s *zone_wplugs_pool; @@ -635,15 +626,6 @@ static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) return sector >> ilog2(disk->queue->limits.chunk_sectors); } -static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) -{ - if (!blk_queue_is_zoned(disk->queue)) - return false; - if (!disk->conv_zones_bitmap) - return true; - return !test_bit(disk_zone_no(disk, sector), disk->conv_zones_bitmap); -} - static inline void disk_set_max_open_zones(struct gendisk *disk, unsigned int max_open_zones) { @@ -677,10 +659,6 @@ static inline unsigned int disk_nr_zones(struct gendisk *disk) { return 0; } -static inline bool disk_zone_is_seq(struct gendisk *disk, sector_t sector) -{ - return false; -} static inline unsigned int disk_zone_no(struct gendisk *disk, sector_t sector) { return 0; @@ -869,11 +847,6 @@ static inline bool bio_straddles_zones(struct bio *bio) disk_zone_no(bio->bi_bdev->bd_disk, bio_end_sector(bio) - 1); } -static inline unsigned int bio_zone_is_seq(struct bio *bio) -{ - return disk_zone_is_seq(bio->bi_bdev->bd_disk, bio->bi_iter.bi_sector); -} - /* * Return how much of the chunk is left to be used for I/O at a given offset. */ -- cgit From 99a9476b27e89525cef653b91e542baf61f105d2 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Mon, 8 Apr 2024 10:41:28 +0900 Subject: block: Do not special-case plugging of zone write operations With the block layer zone write plugging being automatically done for any write operation to a zone of a zoned block device, a regular request plugging handled through current->plug can only ever see at most a single write request per zone. In such case, any potential reordering of the plugged requests will be harmless. We can thus remove the special casing for write operations to zones and have these requests plugged as well. This allows removing the function blk_mq_plug and instead directly using current->plug where needed. Signed-off-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Christoph Hellwig Reviewed-by: Bart Van Assche Tested-by: Hans Holmberg Tested-by: Dennis Maisenbacher Reviewed-by: Martin K. Petersen Link: https://lore.kernel.org/r/20240408014128.205141-29-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-core.c | 6 ------ block/blk-merge.c | 3 +-- block/blk-mq.c | 7 +------ block/blk-mq.h | 31 ------------------------------- include/linux/blkdev.h | 12 ------------ 5 files changed, 2 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index e1a5344c2257..47400a4fe851 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -907,12 +907,6 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags) !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) return 0; - /* - * As the requests that require a zone lock are not plugged in the - * first place, directly accessing the plug instead of using - * blk_mq_plug() should not have any consequences during flushing for - * zoned devices. - */ blk_flush_plug(current->plug, false); /* diff --git a/block/blk-merge.c b/block/blk-merge.c index 7f8a808b74c1..f64115d72f3d 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -1113,10 +1113,9 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q, bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, unsigned int nr_segs) { - struct blk_plug *plug; + struct blk_plug *plug = current->plug; struct request *rq; - plug = blk_mq_plug(bio); if (!plug || rq_list_empty(plug->mq_list)) return false; diff --git a/block/blk-mq.c b/block/blk-mq.c index 9f2d9970eeba..434d45219e23 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1330,11 +1330,6 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head) blk_account_io_start(rq); - /* - * As plugging can be enabled for passthrough requests on a zoned - * device, directly accessing the plug instead of using blk_mq_plug() - * should not have any consequences. - */ if (current->plug && !at_head) { blk_add_rq_to_plug(current->plug, rq); return; @@ -2932,7 +2927,7 @@ static void blk_mq_use_cached_rq(struct request *rq, struct blk_plug *plug, void blk_mq_submit_bio(struct bio *bio) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); - struct blk_plug *plug = blk_mq_plug(bio); + struct blk_plug *plug = current->plug; const int is_sync = op_is_sync(bio->bi_opf); struct blk_mq_hw_ctx *hctx; unsigned int nr_segs = 1; diff --git a/block/blk-mq.h b/block/blk-mq.h index f75a9ecfebde..260beea8e332 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -365,37 +365,6 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap) qmap->mq_map[cpu] = 0; } -/* - * blk_mq_plug() - Get caller context plug - * @bio : the bio being submitted by the caller context - * - * Plugging, by design, may delay the insertion of BIOs into the elevator in - * order to increase BIO merging opportunities. This however can cause BIO - * insertion order to change from the order in which submit_bio() is being - * executed in the case of multiple contexts concurrently issuing BIOs to a - * device, even if these context are synchronized to tightly control BIO issuing - * order. While this is not a problem with regular block devices, this ordering - * change can cause write BIO failures with zoned block devices as these - * require sequential write patterns to zones. Prevent this from happening by - * ignoring the plug state of a BIO issuing context if it is for a zoned block - * device and the BIO to plug is a write operation. - * - * Return current->plug if the bio can be plugged and NULL otherwise - */ -static inline struct blk_plug *blk_mq_plug( struct bio *bio) -{ - /* Zoned block device write operation case: do not plug the BIO */ - if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) && - bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio))) - return NULL; - - /* - * For regular block devices or read operations, use the context plug - * which may be NULL if blk_start_plug() was not executed. - */ - return current->plug; -} - /* Free all requests on the list */ static inline void blk_mq_free_requests(struct list_head *list) { diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7e8a68805324..1ce8ba08e318 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1299,18 +1299,6 @@ static inline unsigned int bdev_zone_no(struct block_device *bdev, sector_t sec) return disk_zone_no(bdev->bd_disk, sec); } -/* Whether write serialization is required for @op on zoned devices. */ -static inline bool op_needs_zoned_write_locking(enum req_op op) -{ - return op == REQ_OP_WRITE || op == REQ_OP_WRITE_ZEROES; -} - -static inline bool bdev_op_is_zoned_write(struct block_device *bdev, - enum req_op op) -{ - return bdev_is_zoned(bdev) && op_needs_zoned_write_locking(op); -} - static inline sector_t bdev_zone_sectors(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); -- cgit From 8a7a6103258715857310253ec2193bcc4d1d7082 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 21 Feb 2024 23:31:56 +0200 Subject: gpiolib: Get rid of never false gpio_is_valid() calls In the cases when gpio_is_valid() is called with unsigned parameter the result is always true in the GPIO library code, hence the check for false won't ever be true. Get rid of such calls. While at it, move GPIO device base to be unsigned to clearly show it won't ever be negative. This requires a new definition for the maximum GPIO number in the system. Signed-off-by: Andy Shevchenko Reviewed-by: Linus Walleij Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpiolib-legacy.c | 10 +++++----- drivers/gpio/gpiolib-sysfs.c | 2 +- drivers/gpio/gpiolib.c | 19 +++++++++---------- drivers/gpio/gpiolib.h | 2 +- include/linux/gpio.h | 6 ++++++ 5 files changed, 22 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c index 616d60187f85..5a9911ae9125 100644 --- a/drivers/gpio/gpiolib-legacy.c +++ b/drivers/gpio/gpiolib-legacy.c @@ -28,10 +28,9 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) struct gpio_desc *desc; int err; - desc = gpio_to_desc(gpio); - /* Compatibility: assume unavailable "valid" GPIOs will appear later */ - if (!desc && gpio_is_valid(gpio)) + desc = gpio_to_desc(gpio); + if (!desc) return -EPROBE_DEFER; err = gpiod_request(desc, label); @@ -63,10 +62,11 @@ EXPORT_SYMBOL_GPL(gpio_request_one); */ int gpio_request(unsigned gpio, const char *label) { - struct gpio_desc *desc = gpio_to_desc(gpio); + struct gpio_desc *desc; /* Compatibility: assume unavailable "valid" GPIOs will appear later */ - if (!desc && gpio_is_valid(gpio)) + desc = gpio_to_desc(gpio); + if (!desc) return -EPROBE_DEFER; return gpiod_request(desc, label); diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 6853ecd98bcb..26202586fd39 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -412,7 +412,7 @@ static ssize_t base_show(struct device *dev, { const struct gpio_device *gdev = dev_get_drvdata(dev); - return sysfs_emit(buf, "%d\n", gdev->base); + return sysfs_emit(buf, "%u\n", gdev->base); } static DEVICE_ATTR_RO(base); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index f20367b1ea8e..f749ece2d3cd 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -150,9 +150,6 @@ struct gpio_desc *gpio_to_desc(unsigned gpio) } } - if (!gpio_is_valid(gpio)) - pr_warn("invalid GPIO %d\n", gpio); - return NULL; } EXPORT_SYMBOL_GPL(gpio_to_desc); @@ -297,10 +294,10 @@ struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev) EXPORT_SYMBOL_GPL(gpio_device_get_chip); /* dynamic allocation of GPIOs, e.g. on a hotplugged device */ -static int gpiochip_find_base_unlocked(int ngpio) +static int gpiochip_find_base_unlocked(u16 ngpio) { + unsigned int base = GPIO_DYNAMIC_BASE; struct gpio_device *gdev; - int base = GPIO_DYNAMIC_BASE; list_for_each_entry_srcu(gdev, &gpio_devices, list, lockdep_is_held(&gpio_devices_lock)) { @@ -311,9 +308,11 @@ static int gpiochip_find_base_unlocked(int ngpio) base = gdev->base + gdev->ngpio; if (base < GPIO_DYNAMIC_BASE) base = GPIO_DYNAMIC_BASE; + if (base > GPIO_DYNAMIC_MAX - ngpio) + break; } - if (gpio_is_valid(base)) { + if (base <= GPIO_DYNAMIC_MAX - ngpio) { pr_debug("%s: found new base at %d\n", __func__, base); return base; } else { @@ -749,7 +748,7 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) if (ret) goto err_remove_device; - dev_dbg(&gdev->dev, "registered GPIOs %d to %d on %s\n", gdev->base, + dev_dbg(&gdev->dev, "registered GPIOs %u to %u on %s\n", gdev->base, gdev->base + gdev->ngpio - 1, gdev->label); return 0; @@ -4788,14 +4787,14 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) value = gpio_chip_get_value(gc, desc); is_irq = test_bit(FLAG_USED_AS_IRQ, &desc->flags); active_low = test_bit(FLAG_ACTIVE_LOW, &desc->flags); - seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s\n", + seq_printf(s, " gpio-%-3u (%-20.20s|%-20.20s) %s %s %s%s\n", gpio, desc->name ?: "", gpiod_get_label(desc), is_out ? "out" : "in ", value >= 0 ? (value ? "hi" : "lo") : "? ", is_irq ? "IRQ " : "", active_low ? "ACTIVE LOW" : ""); } else if (desc->name) { - seq_printf(s, " gpio-%-3d (%-20.20s)\n", gpio, desc->name); + seq_printf(s, " gpio-%-3u (%-20.20s)\n", gpio, desc->name); } gpio++; @@ -4867,7 +4866,7 @@ static int gpiolib_seq_show(struct seq_file *s, void *v) return 0; } - seq_printf(s, "%s%s: GPIOs %d-%d", priv->newline ? "\n" : "", + seq_printf(s, "%s%s: GPIOs %u-%u", priv->newline ? "\n" : "", dev_name(&gdev->dev), gdev->base, gdev->base + gdev->ngpio - 1); parent = gc->parent; diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index f67d5991ab1c..7f94580efdbc 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -61,7 +61,7 @@ struct gpio_device { struct module *owner; struct gpio_chip __rcu *chip; struct gpio_desc *descs; - int base; + unsigned int base; u16 ngpio; bool can_sleep; const char *label; diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 4aaedcf424ce..56ac7e7a2889 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -74,6 +74,12 @@ static inline bool gpio_is_valid(int number) * Until they are all fixed, leave 0-512 space for them. */ #define GPIO_DYNAMIC_BASE 512 +/* + * Define the maximum of the possible GPIO in the global numberspace. + * While the GPIO base and numbers are positive, we limit it with signed + * maximum as a lot of code is using negative values for special cases. + */ +#define GPIO_DYNAMIC_MAX INT_MAX /* Always use the library code for GPIO management calls, * or when sleeping may be involved. -- cgit From 5677b17c33246fb69ecc250d493c635500b78980 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 2 Apr 2024 10:51:08 +0100 Subject: ARM: 9361/1: amba: store owner from modules with amba_driver_register() Modules registering driver with amba_driver_register() often forget to set .owner field. The field is used by some of other kernel parts for reference counting (try_module_get()), so it is expected that drivers will set it. Solve the problem by moving this task away from the drivers to the core amba bus code, just like we did for platform_driver in commit 9447057eaff8 ("platform_device: use a macro instead of platform_driver_register"). Link: https://lore.kernel.org/r/20240326-module-owner-amba-v1-1-4517b091385b@linaro.org Reviewed-by: Andi Shyti Acked-by: Suzuki K Poulose Signed-off-by: Krzysztof Kozlowski Signed-off-by: Russell King (Oracle) --- drivers/amba/bus.c | 11 +++++++---- include/linux/amba/bus.h | 11 +++++++++-- 2 files changed, 16 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index a24c152bfaac..aba3aa95b224 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -488,28 +488,31 @@ static int __init amba_stub_drv_init(void) * waiting on amba_match(). So, register a stub driver to make sure * amba_match() is called even if no amba driver has been registered. */ - return amba_driver_register(&amba_proxy_drv); + return __amba_driver_register(&amba_proxy_drv, NULL); } late_initcall_sync(amba_stub_drv_init); /** - * amba_driver_register - register an AMBA device driver + * __amba_driver_register - register an AMBA device driver * @drv: amba device driver structure + * @owner: owning module/driver * * Register an AMBA device driver with the Linux device model * core. If devices pre-exist, the drivers probe function will * be called. */ -int amba_driver_register(struct amba_driver *drv) +int __amba_driver_register(struct amba_driver *drv, + struct module *owner) { if (!drv->probe) return -EINVAL; + drv->drv.owner = owner; drv->drv.bus = &amba_bustype; return driver_register(&drv->drv); } -EXPORT_SYMBOL(amba_driver_register); +EXPORT_SYMBOL(__amba_driver_register); /** * amba_driver_unregister - remove an AMBA device driver diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index c60a6a14638c..958a55bcc708 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -112,11 +112,18 @@ extern struct bus_type amba_bustype; #define amba_get_drvdata(d) dev_get_drvdata(&d->dev) #define amba_set_drvdata(d,p) dev_set_drvdata(&d->dev, p) +/* + * use a macro to avoid include chaining to get THIS_MODULE + */ +#define amba_driver_register(drv) \ + __amba_driver_register(drv, THIS_MODULE) + #ifdef CONFIG_ARM_AMBA -int amba_driver_register(struct amba_driver *); +int __amba_driver_register(struct amba_driver *, struct module *); void amba_driver_unregister(struct amba_driver *); #else -static inline int amba_driver_register(struct amba_driver *drv) +static inline int __amba_driver_register(struct amba_driver *drv, + struct module *owner) { return -EINVAL; } -- cgit From bd2cd796d2859c1a7cee326d7a9c0f77c6b95c71 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 16 Apr 2024 17:53:45 +0200 Subject: usb: renesas_usbhs: Remove renesas_usbhs_get_info() wrapper The renesas_usbhs_get_info() wrapper was useful for legacy board code. Since commit 1fa59bda21c7fa36 ("ARM: shmobile: Remove legacy board code for Armadillo-800 EVA") in v4.3, it is no longer used outside the USBHS driver, and provides no added value over dev_get_platdata(), while obfuscating the real operation. Drop it, and replace it by dev_get_platdata() in its sole user. Signed-off-by: Geert Uytterhoeven Reviewed-by: Biju Das Reviewed-by: Yoshihiro Shimoda Link: https://lore.kernel.org/r/fa296af4452dfe394a58b75fd44c3bb9591936eb.1713282736.git.geert+renesas@glider.be Signed-off-by: Greg Kroah-Hartman --- drivers/usb/renesas_usbhs/common.c | 2 +- include/linux/usb/renesas_usbhs.h | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index b6bef9081bf2..edc43f169d49 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -613,7 +613,7 @@ static int usbhs_probe(struct platform_device *pdev) info = of_device_get_match_data(dev); if (!info) { - info = renesas_usbhs_get_info(pdev); + info = dev_get_platdata(dev); if (!info) return dev_err_probe(dev, -EINVAL, "no platform info\n"); } diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 372898d9eeb0..67bfcda6c7d2 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -194,9 +194,4 @@ struct renesas_usbhs_platform_info { struct renesas_usbhs_driver_param driver_param; }; -/* - * macro for platform - */ -#define renesas_usbhs_get_info(pdev)\ - ((struct renesas_usbhs_platform_info *)(pdev)->dev.platform_data) #endif /* RENESAS_USB_H */ -- cgit From decde1fa209323c77a7498e803350c5f3e992d62 Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Tue, 12 Mar 2024 16:53:43 -0700 Subject: cpumask: Add assign cpu Standardize an assign_cpu function for cpumasks. Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20240312-fencei-v13-4-4b6bdc2bbf32@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/cacheflush.c | 2 +- include/linux/cpumask.h | 16 ++++++++++++++++ 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 15a95578e670..9dad35f0ce8b 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -170,7 +170,7 @@ static void set_icache_stale_mask(void) stale_cpu = cpumask_test_cpu(smp_processor_id(), mask); cpumask_setall(mask); - assign_bit(cpumask_check(smp_processor_id()), cpumask_bits(mask), stale_cpu); + cpumask_assign_cpu(smp_processor_id(), mask, stale_cpu); } #endif diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index cfb545841a2c..1b85e09c4ba5 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -492,6 +492,22 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp) __clear_bit(cpumask_check(cpu), cpumask_bits(dstp)); } +/** + * cpumask_assign_cpu - assign a cpu in a cpumask + * @cpu: cpu number (< nr_cpu_ids) + * @dstp: the cpumask pointer + * @bool: the value to assign + */ +static __always_inline void cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value) +{ + assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value); +} + +static __always_inline void __cpumask_assign_cpu(int cpu, struct cpumask *dstp, bool value) +{ + __assign_bit(cpumask_check(cpu), cpumask_bits(dstp), value); +} + /** * cpumask_test_cpu - test for a cpu in a cpumask * @cpu: cpu number (< nr_cpu_ids) -- cgit From 3c258bf6bf29d8c9f9b358c64f6e9f4510c91ff9 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 17 Apr 2024 10:09:21 +0100 Subject: firmware: arm_ffa: Stash the partition properties for query purposes The properies obtained from the partition information descriptor as part of initial partitions discovery is useful as it contain info if the partition - Runs in AArch64 or AArch32 execution state - Can send and/or receive direct requests - Can send and receive indirect message - Does support receipt of notifications. These can be used for querying before attempting to do any of the above operations. Link: https://lore.kernel.org/r/20240417090921.2866447-1-sudeep.holla@arm.com Signed-off-by: Sudeep Holla --- drivers/firmware/arm_ffa/driver.c | 2 ++ include/linux/arm_ffa.h | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index b64e8ee64bc2..99a23dd3d189 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -1228,6 +1228,8 @@ static int ffa_setup_partitions(void) continue; } + ffa_dev->properties = tpbuf->properties; + if (drv_info->version > FFA_VERSION_1_0 && !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC)) ffa_mode_32bit_set(ffa_dev); diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index c906f666ff5d..94a49612ecec 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -126,6 +126,7 @@ /* FFA Bus/Device/Driver related */ struct ffa_device { u32 id; + u32 properties; int vm_id; bool mode_32bit; uuid_t uuid; @@ -221,12 +222,29 @@ struct ffa_partition_info { #define FFA_PARTITION_DIRECT_SEND BIT(1) /* partition can send and receive indirect messages. */ #define FFA_PARTITION_INDIRECT_MSG BIT(2) +/* partition can receive notifications */ +#define FFA_PARTITION_NOTIFICATION_RECV BIT(3) /* partition runs in the AArch64 execution state. */ #define FFA_PARTITION_AARCH64_EXEC BIT(8) u32 properties; u32 uuid[4]; }; +static inline +bool ffa_partition_check_property(struct ffa_device *dev, u32 property) +{ + return dev->properties & property; +} + +#define ffa_partition_supports_notify_recv(dev) \ + ffa_partition_check_property(dev, FFA_PARTITION_NOTIFICATION_RECV) + +#define ffa_partition_supports_indirect_msg(dev) \ + ffa_partition_check_property(dev, FFA_PARTITION_INDIRECT_MSG) + +#define ffa_partition_supports_direct_recv(dev) \ + ffa_partition_check_property(dev, FFA_PARTITION_DIRECT_RECV) + /* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP} which pass data via registers */ struct ffa_send_direct_data { unsigned long data0; /* w3/x3 */ -- cgit From 02c19d84c7c5026624d181b8e4cdc8488134d013 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 17 Apr 2024 10:09:31 +0100 Subject: firmware: arm_ffa: Add support for FFA_MSG_SEND2 The FFA_MSG_SEND2 can be used to transmit a partition message from the Tx buffer of the sender(the driver in this case) endpoint to the Rx buffer of the receiver endpoint. An invocation of the FFA_MSG_SEND2 transfers the ownership of the Tx buffer to the receiver endpoint(or any intermediate consumer). Completion of an FFA_MSG_SEND2 invocation transfers the ownership of the buffer back to the sender endpoint. The framework defines the FFA_MSG_SEND2 interface to transmit a partition message from the Tx buffer of the sender to the Rx buffer of a receiver and inform the scheduler that the receiver must be run. Link: https://lore.kernel.org/r/20240417090931.2866487-1-sudeep.holla@arm.com Signed-off-by: Sudeep Holla --- drivers/firmware/arm_ffa/driver.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/arm_ffa.h | 9 +++++++++ 2 files changed, 47 insertions(+) (limited to 'include/linux') diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 99a23dd3d189..26968edac5b2 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -345,6 +345,38 @@ static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit, return -EINVAL; } +static int ffa_msg_send2(u16 src_id, u16 dst_id, void *buf, size_t sz) +{ + u32 src_dst_ids = PACK_TARGET_INFO(src_id, dst_id); + struct ffa_indirect_msg_hdr *msg; + ffa_value_t ret; + int retval = 0; + + if (sz > (RXTX_BUFFER_SIZE - sizeof(*msg))) + return -ERANGE; + + mutex_lock(&drv_info->tx_lock); + + msg = drv_info->tx_buffer; + msg->flags = 0; + msg->res0 = 0; + msg->offset = sizeof(*msg); + msg->send_recv_id = src_dst_ids; + msg->size = sz; + memcpy(msg + msg->offset, buf, sz); + + /* flags = 0, sender VMID = 0 works for both physical/virtual NS */ + invoke_ffa_fn((ffa_value_t){ + .a0 = FFA_MSG_SEND2, .a1 = 0, .a2 = 0 + }, &ret); + + if (ret.a0 == FFA_ERROR) + retval = ffa_to_linux_errno((int)ret.a2); + + mutex_unlock(&drv_info->tx_lock); + return retval; +} + static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len, u32 len, u64 *handle) { @@ -871,6 +903,11 @@ static int ffa_sync_send_receive(struct ffa_device *dev, dev->mode_32bit, data); } +static int ffa_indirect_msg_send(struct ffa_device *dev, void *buf, size_t sz) +{ + return ffa_msg_send2(drv_info->vm_id, dev->vm_id, buf, sz); +} + static int ffa_memory_share(struct ffa_mem_ops_args *args) { if (drv_info->mem_ops_native) @@ -1146,6 +1183,7 @@ static const struct ffa_info_ops ffa_drv_info_ops = { static const struct ffa_msg_ops ffa_drv_msg_ops = { .mode_32bit_set = ffa_mode_32bit_set, .sync_send_receive = ffa_sync_send_receive, + .indirect_send = ffa_indirect_msg_send, }; static const struct ffa_mem_ops ffa_drv_mem_ops = { diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index 94a49612ecec..c82d56768101 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -254,6 +254,14 @@ struct ffa_send_direct_data { unsigned long data4; /* w7/x7 */ }; +struct ffa_indirect_msg_hdr { + u32 flags; + u32 res0; + u32 offset; + u32 send_recv_id; + u32 size; +}; + struct ffa_mem_region_addr_range { /* The base IPA of the constituent memory region, aligned to 4 kiB */ u64 address; @@ -414,6 +422,7 @@ struct ffa_msg_ops { void (*mode_32bit_set)(struct ffa_device *dev); int (*sync_send_receive)(struct ffa_device *dev, struct ffa_send_direct_data *data); + int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz); }; struct ffa_mem_ops { -- cgit From b58be8db6327b21e0c7fbee559b8eb47f5110efe Mon Sep 17 00:00:00 2001 From: "Kory Maincent (Dent Project)" Date: Wed, 17 Apr 2024 16:39:49 +0200 Subject: ethtool: Expand Ethernet Power Equipment with c33 (PoE) alongside PoDL In the current PSE interface for Ethernet Power Equipment, support is limited to PoDL. This patch extends the interface to accommodate the objects specified in IEEE 802.3-2022 145.2 for Power sourcing Equipment (PSE). The following objects are now supported and considered mandatory: - IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus - IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState - IEEE 802.3-2022 30.9.1.2.1 aPSEAdminControl To avoid confusion between "PoDL PSE" and "PoE PSE", which have similar names but distinct values, we have followed the suggestion of Oleksij Rempel and Andrew Lunn to maintain separate naming schemes for each, using c33 (clause 33) prefix for "PoE PSE". You can find more details in the discussion threads here: https://lore.kernel.org/netdev/20230912110637.GI780075@pengutronix.de/ https://lore.kernel.org/netdev/2539b109-72ad-470a-9dae-9f53de4f64ec@lunn.ch/ Reviewed-by: Andrew Lunn Reviewed-by: Oleksij Rempel Signed-off-by: Kory Maincent Link: https://lore.kernel.org/r/20240417-feature_poe-v9-1-242293fd1900@bootlin.com Signed-off-by: Jakub Kicinski --- Documentation/networking/index.rst | 1 + Documentation/networking/pse-pd/index.rst | 9 +++ Documentation/networking/pse-pd/introduction.rst | 73 ++++++++++++++++++++++++ include/linux/pse-pd/pse.h | 9 +++ include/uapi/linux/ethtool.h | 43 ++++++++++++++ include/uapi/linux/ethtool_netlink.h | 3 + 6 files changed, 138 insertions(+) create mode 100644 Documentation/networking/pse-pd/index.rst create mode 100644 Documentation/networking/pse-pd/introduction.rst (limited to 'include/linux') diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 473d72c36d61..7664c0bfe461 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -93,6 +93,7 @@ Contents: plip ppp_generic proc_net_tcp + pse-pd/index radiotap-headers rds regulatory diff --git a/Documentation/networking/pse-pd/index.rst b/Documentation/networking/pse-pd/index.rst new file mode 100644 index 000000000000..18197bc7303d --- /dev/null +++ b/Documentation/networking/pse-pd/index.rst @@ -0,0 +1,9 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Power Sourcing Equipment (PSE) Documentation +============================================ + +.. toctree:: + :maxdepth: 2 + + introduction diff --git a/Documentation/networking/pse-pd/introduction.rst b/Documentation/networking/pse-pd/introduction.rst new file mode 100644 index 000000000000..e3d3faaef717 --- /dev/null +++ b/Documentation/networking/pse-pd/introduction.rst @@ -0,0 +1,73 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Power Sourcing Equipment (PSE) in IEEE 802.3 Standard +===================================================== + +Overview +-------- + +Power Sourcing Equipment (PSE) is essential in networks for delivering power +along with data over Ethernet cables. It usually refers to devices like +switches and hubs that supply power to Powered Devices (PDs) such as IP +cameras, VoIP phones, and wireless access points. + +PSE vs. PoDL PSE +---------------- + +PSE in the IEEE 802.3 standard generally refers to equipment that provides +power alongside data over Ethernet cables, typically associated with Power over +Ethernet (PoE). + +PoDL PSE, or Power over Data Lines PSE, specifically denotes PSEs operating +with single balanced twisted-pair PHYs, as per Clause 104 of IEEE 802.3. PoDL +is significant in contexts like automotive and industrial controls where power +and data delivery over a single pair is advantageous. + +IEEE 802.3-2018 Addendums and Related Clauses +--------------------------------------------- + +Key addenda to the IEEE 802.3-2018 standard relevant to power delivery over +Ethernet are as follows: + +- **802.3af (Approved in 2003-06-12)**: Known as PoE in the market, detailed in + Clause 33, delivering up to 15.4W of power. +- **802.3at (Approved in 2009-09-11)**: Marketed as PoE+, enhancing PoE as + covered in Clause 33, increasing power delivery to up to 30W. +- **802.3bt (Approved in 2018-09-27)**: Known as 4PPoE in the market, outlined + in Clause 33. Type 3 delivers up to 60W, and Type 4 up to 100W. +- **802.3bu (Approved in 2016-12-07)**: Formerly referred to as PoDL, detailed + in Clause 104. Introduces Classes 0 - 9. Class 9 PoDL PSE delivers up to ~65W + +Kernel Naming Convention Recommendations +---------------------------------------- + +For clarity and consistency within the Linux kernel's networking subsystem, the +following naming conventions are recommended: + +- For general PSE (PoE) code, use "c33_pse" key words. For example: + ``enum ethtool_c33_pse_admin_state c33_admin_control;``. + This aligns with Clause 33, encompassing various PoE forms. + +- For PoDL PSE - specific code, use "podl_pse". For example: + ``enum ethtool_podl_pse_admin_state podl_admin_control;`` to differentiate + PoDL PSE settings according to Clause 104. + +Summary of Clause 33: Data Terminal Equipment (DTE) Power via Media Dependent Interface (MDI) +--------------------------------------------------------------------------------------------- + +Clause 33 of the IEEE 802.3 standard defines the functional and electrical +characteristics of Powered Device (PD) and Power Sourcing Equipment (PSE). +These entities enable power delivery using the same generic cabling as for data +transmission, integrating power with data communication for devices such as +10BASE-T, 100BASE-TX, or 1000BASE-T. + +Summary of Clause 104: Power over Data Lines (PoDL) of Single Balanced Twisted-Pair Ethernet +-------------------------------------------------------------------------------------------- + +Clause 104 of the IEEE 802.3 standard delineates the functional and electrical +characteristics of PoDL Powered Devices (PDs) and PoDL Power Sourcing Equipment +(PSEs). These are designed for use with single balanced twisted-pair Ethernet +Physical Layers. In this clause, 'PSE' refers specifically to PoDL PSE, and +'PD' to PoDL PD. The key intent is to provide devices with a unified interface +for both data and the power required to process this data over a single +balanced twisted-pair Ethernet connection. diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 199cf4ae3cf2..be4e5754eb24 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -17,9 +17,12 @@ struct pse_controller_dev; * * @podl_admin_control: set PoDL PSE admin control as described in * IEEE 802.3-2018 30.15.1.2.1 acPoDLPSEAdminControl + * @c33_admin_control: set PSE admin control as described in + * IEEE 802.3-2022 30.9.1.2.1 acPSEAdminControl */ struct pse_control_config { enum ethtool_podl_pse_admin_state podl_admin_control; + enum ethtool_c33_pse_admin_state c33_admin_control; }; /** @@ -29,10 +32,16 @@ struct pse_control_config { * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState * @podl_pw_status: power detection status of the PoDL PSE. * IEEE 802.3-2018 30.15.1.1.3 aPoDLPSEPowerDetectionStatus: + * @c33_admin_state: operational state of the PSE + * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState + * @c33_pw_status: power detection status of the PSE. + * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus: */ struct pse_control_status { enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; + enum ethtool_c33_pse_admin_state c33_admin_state; + enum ethtool_c33_pse_pw_d_status c33_pw_status; }; /** diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 95c2f09f0d0a..7b9a3d890949 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -752,6 +752,49 @@ enum ethtool_module_power_mode { ETHTOOL_MODULE_POWER_MODE_HIGH, }; +/** + * enum ethtool_c33_pse_admin_state - operational state of the PoDL PSE + * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState + * @ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN: state of PSE functions is unknown + * @ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED: PSE functions are disabled + * @ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED: PSE functions are enabled + */ +enum ethtool_c33_pse_admin_state { + ETHTOOL_C33_PSE_ADMIN_STATE_UNKNOWN = 1, + ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED, + ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED, +}; + +/** + * enum ethtool_c33_pse_pw_d_status - power detection status of the PSE. + * IEEE 802.3-2022 30.9.1.1.3 aPoDLPSEPowerDetectionStatus: + * @ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN: PSE status is unknown + * @ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED: The enumeration "disabled" + * indicates that the PSE State diagram is in the state DISABLED. + * @ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING: The enumeration "searching" + * indicates the PSE State diagram is in a state other than those + * listed. + * @ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING: The enumeration + * "deliveringPower" indicates that the PSE State diagram is in the + * state POWER_ON. + * @ETHTOOL_C33_PSE_PW_D_STATUS_TEST: The enumeration "test" indicates that + * the PSE State diagram is in the state TEST_MODE. + * @ETHTOOL_C33_PSE_PW_D_STATUS_FAULT: The enumeration "fault" indicates that + * the PSE State diagram is in the state TEST_ERROR. + * @ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT: The enumeration "otherFault" + * indicates that the PSE State diagram is in the state IDLE due to + * the variable error_condition = true. + */ +enum ethtool_c33_pse_pw_d_status { + ETHTOOL_C33_PSE_PW_D_STATUS_UNKNOWN = 1, + ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED, + ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING, + ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING, + ETHTOOL_C33_PSE_PW_D_STATUS_TEST, + ETHTOOL_C33_PSE_PW_D_STATUS_FAULT, + ETHTOOL_C33_PSE_PW_D_STATUS_OTHERFAULT, +}; + /** * enum ethtool_podl_pse_admin_state - operational state of the PoDL PSE * functions. IEEE 802.3-2018 30.15.1.1.2 aPoDLPSEAdminState diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index b4f0d233d048..f17dbe54bf5e 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -913,6 +913,9 @@ enum { ETHTOOL_A_PODL_PSE_ADMIN_STATE, /* u32 */ ETHTOOL_A_PODL_PSE_ADMIN_CONTROL, /* u32 */ ETHTOOL_A_PODL_PSE_PW_D_STATUS, /* u32 */ + ETHTOOL_A_C33_PSE_ADMIN_STATE, /* u32 */ + ETHTOOL_A_C33_PSE_ADMIN_CONTROL, /* u32 */ + ETHTOOL_A_C33_PSE_PW_D_STATUS, /* u32 */ /* add new constants above here */ __ETHTOOL_A_PSE_CNT, -- cgit From 47e0dd53c5eb9dab66689592523f31ea39fc32fa Mon Sep 17 00:00:00 2001 From: "Kory Maincent (Dent Project)" Date: Wed, 17 Apr 2024 16:39:50 +0200 Subject: net: pse-pd: Introduce PSE types enumeration Introduce an enumeration to define PSE types (C33 or PoDL), utilizing a bitfield for potential future support of both types. Include 'pse_get_types' helper for external access to PSE type info. Reviewed-by: Andrew Lunn Signed-off-by: Kory Maincent Link: https://lore.kernel.org/r/20240417-feature_poe-v9-2-242293fd1900@bootlin.com Signed-off-by: Jakub Kicinski --- drivers/net/pse-pd/pse_core.c | 12 ++++++++++++ drivers/net/pse-pd/pse_regulator.c | 1 + include/linux/pse-pd/pse.h | 15 +++++++++++++++ include/uapi/linux/ethtool.h | 12 ++++++++++++ 4 files changed, 40 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index 146b81f08a89..fed006cbc185 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -312,3 +312,15 @@ int pse_ethtool_set_config(struct pse_control *psec, return err; } EXPORT_SYMBOL_GPL(pse_ethtool_set_config); + +bool pse_has_podl(struct pse_control *psec) +{ + return psec->pcdev->types & ETHTOOL_PSE_PODL; +} +EXPORT_SYMBOL_GPL(pse_has_podl); + +bool pse_has_c33(struct pse_control *psec) +{ + return psec->pcdev->types & ETHTOOL_PSE_C33; +} +EXPORT_SYMBOL_GPL(pse_has_c33); diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c index 1dedf4de296e..547af384764b 100644 --- a/drivers/net/pse-pd/pse_regulator.c +++ b/drivers/net/pse-pd/pse_regulator.c @@ -116,6 +116,7 @@ pse_reg_probe(struct platform_device *pdev) priv->pcdev.owner = THIS_MODULE; priv->pcdev.ops = &pse_reg_ops; priv->pcdev.dev = dev; + priv->pcdev.types = ETHTOOL_PSE_PODL; ret = devm_pse_controller_register(dev, &priv->pcdev); if (ret) { dev_err(dev, "failed to register PSE controller (%pe)\n", diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index be4e5754eb24..19589571157f 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -77,6 +77,7 @@ struct pse_control; * device tree to id as given to the PSE control ops * @nr_lines: number of PSE controls in this controller device * @lock: Mutex for serialization access to the PSE controller + * @types: types of the PSE controller */ struct pse_controller_dev { const struct pse_controller_ops *ops; @@ -89,6 +90,7 @@ struct pse_controller_dev { const struct of_phandle_args *pse_spec); unsigned int nr_lines; struct mutex lock; + enum ethtool_pse_types types; }; #if IS_ENABLED(CONFIG_PSE_CONTROLLER) @@ -108,6 +110,9 @@ int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config); +bool pse_has_podl(struct pse_control *psec); +bool pse_has_c33(struct pse_control *psec); + #else static inline struct pse_control *of_pse_control_get(struct device_node *node) @@ -133,6 +138,16 @@ static inline int pse_ethtool_set_config(struct pse_control *psec, return -ENOTSUPP; } +static inline bool pse_has_podl(struct pse_control *psec) +{ + return false; +} + +static inline bool pse_has_c33(struct pse_control *psec) +{ + return false; +} + #endif #endif diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 7b9a3d890949..041e09c3515d 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -752,6 +752,18 @@ enum ethtool_module_power_mode { ETHTOOL_MODULE_POWER_MODE_HIGH, }; +/** + * enum ethtool_pse_types - Types of PSE controller. + * @ETHTOOL_PSE_UNKNOWN: Type of PSE controller is unknown + * @ETHTOOL_PSE_PODL: PSE controller which support PoDL + * @ETHTOOL_PSE_C33: PSE controller which support Clause 33 (PoE) + */ +enum ethtool_pse_types { + ETHTOOL_PSE_UNKNOWN = 1 << 0, + ETHTOOL_PSE_PODL = 1 << 1, + ETHTOOL_PSE_C33 = 1 << 2, +}; + /** * enum ethtool_c33_pse_admin_state - operational state of the PoDL PSE * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState -- cgit From 9be9567a7c59b7314ea776f56945fe3fc28efe99 Mon Sep 17 00:00:00 2001 From: "Kory Maincent (Dent Project)" Date: Wed, 17 Apr 2024 16:39:55 +0200 Subject: net: pse-pd: Add support for PSE PIs The Power Sourcing Equipment Power Interface (PSE PI) plays a pivotal role in the architecture of Power over Ethernet (PoE) systems. It is essentially a blueprint that outlines how one or multiple power sources are connected to the eight-pin modular jack, commonly known as the Ethernet RJ45 port. This connection scheme is crucial for enabling the delivery of power alongside data over Ethernet cables. This patch adds support for getting the PSE controller node through PSE PI device subnode. This supports adds a way to get the PSE PI id from the pse_pi devicetree subnode of a PSE controller node simply by reading the reg property. Reviewed-by: Andrew Lunn Signed-off-by: Kory Maincent Link: https://lore.kernel.org/r/20240417-feature_poe-v9-7-242293fd1900@bootlin.com Signed-off-by: Jakub Kicinski --- Documentation/networking/pse-pd/index.rst | 1 + Documentation/networking/pse-pd/pse-pi.rst | 301 +++++++++++++++++++++++++++++ drivers/net/pse-pd/pse_core.c | 262 +++++++++++++++++++++---- include/linux/pse-pd/pse.h | 38 +++- 4 files changed, 565 insertions(+), 37 deletions(-) create mode 100644 Documentation/networking/pse-pd/pse-pi.rst (limited to 'include/linux') diff --git a/Documentation/networking/pse-pd/index.rst b/Documentation/networking/pse-pd/index.rst index 18197bc7303d..de28a5aee316 100644 --- a/Documentation/networking/pse-pd/index.rst +++ b/Documentation/networking/pse-pd/index.rst @@ -7,3 +7,4 @@ Power Sourcing Equipment (PSE) Documentation :maxdepth: 2 introduction + pse-pi diff --git a/Documentation/networking/pse-pd/pse-pi.rst b/Documentation/networking/pse-pd/pse-pi.rst new file mode 100644 index 000000000000..5cad14fedc13 --- /dev/null +++ b/Documentation/networking/pse-pd/pse-pi.rst @@ -0,0 +1,301 @@ +.. SPDX-License-Identifier: GPL-2.0 + +PSE Power Interface (PSE PI) Documentation +========================================== + +The Power Sourcing Equipment Power Interface (PSE PI) plays a pivotal role in +the architecture of Power over Ethernet (PoE) systems. It is essentially a +blueprint that outlines how one or multiple power sources are connected to the +eight-pin modular jack, commonly known as the Ethernet RJ45 port. This +connection scheme is crucial for enabling the delivery of power alongside data +over Ethernet cables. + +Documentation and Standards +--------------------------- + +The IEEE 802.3 standard provides detailed documentation on the PSE PI. +Specifically: + +- Section "33.2.3 PI pin assignments" covers the pin assignments for PoE + systems that utilize two pairs for power delivery. +- Section "145.2.4 PSE PI" addresses the configuration for PoE systems that + deliver power over all four pairs of an Ethernet cable. + +PSE PI and Single Pair Ethernet +------------------------------- + +Single Pair Ethernet (SPE) represents a different approach to Ethernet +connectivity, utilizing just one pair of conductors for both data and power +transmission. Unlike the configurations detailed in the PSE PI for standard +Ethernet, which can involve multiple power sourcing arrangements across four or +two pairs of wires, SPE operates on a simpler model due to its single-pair +design. As a result, the complexities of choosing between alternative pin +assignments for power delivery, as described in the PSE PI for multi-pair +Ethernet, are not applicable to SPE. + +Understanding PSE PI +-------------------- + +The Power Sourcing Equipment Power Interface (PSE PI) is a framework defining +how Power Sourcing Equipment (PSE) delivers power to Powered Devices (PDs) over +Ethernet cables. It details two main configurations for power delivery, known +as Alternative A and Alternative B, which are distinguished not only by their +method of power transmission but also by the implications for polarity and data +transmission direction. + +Alternative A and B Overview +---------------------------- + +- **Alternative A:** Utilizes RJ45 conductors 1, 2, 3 and 6. In either case of + networks 10/100BaseT or 1G/2G/5G/10GBaseT, the pairs used are carrying data. + The power delivery's polarity in this alternative can vary based on the MDI + (Medium Dependent Interface) or MDI-X (Medium Dependent Interface Crossover) + configuration. + +- **Alternative B:** Utilizes RJ45 conductors 4, 5, 7 and 8. In case of + 10/100BaseT network the pairs used are spare pairs without data and are less + influenced by data transmission direction. This is not the case for + 1G/2G/5G/10GBaseT network. Alternative B includes two configurations with + different polarities, known as variant X and variant S, to accommodate + different network requirements and device specifications. + +Table 145-3 PSE Pinout Alternatives +----------------------------------- + +The following table outlines the pin configurations for both Alternative A and +Alternative B. + ++------------+-------------------+-----------------+-----------------+-----------------+ +| Conductor | Alternative A | Alternative A | Alternative B | Alternative B | +| | (MDI-X) | (MDI) | (X) | (S) | ++============+===================+=================+=================+=================+ +| 1 | Negative V | Positive V | - | - | ++------------+-------------------+-----------------+-----------------+-----------------+ +| 2 | Negative V | Positive V | - | - | ++------------+-------------------+-----------------+-----------------+-----------------+ +| 3 | Positive V | Negative V | - | - | ++------------+-------------------+-----------------+-----------------+-----------------+ +| 4 | - | - | Negative V | Positive V | ++------------+-------------------+-----------------+-----------------+-----------------+ +| 5 | - | - | Negative V | Positive V | ++------------+-------------------+-----------------+-----------------+-----------------+ +| 6 | Positive V | Negative V | - | - | ++------------+-------------------+-----------------+-----------------+-----------------+ +| 7 | - | - | Positive V | Negative V | ++------------+-------------------+-----------------+-----------------+-----------------+ +| 8 | - | - | Positive V | Negative V | ++------------+-------------------+-----------------+-----------------+-----------------+ + +.. note:: + - "Positive V" and "Negative V" indicate the voltage polarity for each pin. + - "-" indicates that the pin is not used for power delivery in that + specific configuration. + +PSE PI compatibilities +---------------------- + +The following table outlines the compatibility between the pinout alternative +and the 1000/2.5G/5G/10GBaseT in the PSE 2 pairs connection. + ++---------+---------------+---------------------+-----------------------+ +| Variant | Alternative | Power Feeding Type | Compatibility with | +| | (A/B) | (Direct/Phantom) | 1000/2.5G/5G/10GBaseT | ++=========+===============+=====================+=======================+ +| 1 | A | Phantom | Yes | ++---------+---------------+---------------------+-----------------------+ +| 2 | B | Phantom | Yes | ++---------+---------------+---------------------+-----------------------+ +| 3 | B | Direct | No | ++---------+---------------+---------------------+-----------------------+ + +.. note:: + - "Direct" indicate a variant where the power is injected directly to pairs + without using magnetics in case of spare pairs. + - "Phantom" indicate power path over coils/magnetics as it is done for + Alternative A variant. + +In case of PSE 4 pairs, a PSE supporting only 10/100BaseT (which mean Direct +Power on pinout Alternative B) is not compatible with a 4 pairs +1000/2.5G/5G/10GBaseT. + +PSE Power Interface (PSE PI) Connection Diagram +----------------------------------------------- + +The diagram below illustrates the connection architecture between the RJ45 +port, the Ethernet PHY (Physical Layer), and the PSE PI (Power Sourcing +Equipment Power Interface), demonstrating how power and data are delivered +simultaneously through an Ethernet cable. The RJ45 port serves as the physical +interface for these connections, with each of its eight pins connected to both +the Ethernet PHY for data transmission and the PSE PI for power delivery. + +.. code-block:: + + +--------------------------+ + | | + | RJ45 Port | + | | + +--+--+--+--+--+--+--+--+--+ +-------------+ + 1| 2| 3| 4| 5| 6| 7| 8| | | + | | | | | | | o-------------------+ | + | | | | | | o--|-------------------+ +<--- PSE 1 + | | | | | o--|--|-------------------+ | + | | | | o--|--|--|-------------------+ | + | | | o--|--|--|--|-------------------+ PSE PI | + | | o--|--|--|--|--|-------------------+ | + | o--|--|--|--|--|--|-------------------+ +<--- PSE 2 (optional) + o--|--|--|--|--|--|--|-------------------+ | + | | | | | | | | | | + +--+--+--+--+--+--+--+--+--+ +-------------+ + | | + | Ethernet PHY | + | | + +--------------------------+ + +Simple PSE PI Configuration for Alternative A +--------------------------------------------- + +The diagram below illustrates a straightforward PSE PI (Power Sourcing +Equipment Power Interface) configuration designed to support the Alternative A +setup for Power over Ethernet (PoE). This implementation is tailored to provide +power delivery through the data-carrying pairs of an Ethernet cable, suitable +for either MDI or MDI-X configurations, albeit supporting one variation at a +time. + +.. code-block:: + + +-------------+ + | PSE PI | + 8 -----+ +-------------+ + 7 -----+ Rail 1 | + 6 -----+------+----------------------+ + 5 -----+ | | + 4 -----+ | Rail 2 | PSE 1 + 3 -----+------/ +------------+ + 2 -----+--+-------------/ | + 1 -----+--/ +-------------+ + | + +-------------+ + +In this configuration: + +- Pins 1 and 2, as well as pins 3 and 6, are utilized for power delivery in + addition to data transmission. This aligns with the standard wiring for + 10/100BaseT Ethernet networks where these pairs are used for data. +- Rail 1 and Rail 2 represent the positive and negative voltage rails, with + Rail 1 connected to pins 1 and 2, and Rail 2 connected to pins 3 and 6. + More advanced PSE PI configurations may include integrated or external + switches to change the polarity of the voltage rails, allowing for + compatibility with both MDI and MDI-X configurations. + +More complex PSE PI configurations may include additional components, to support +Alternative B, or to provide additional features such as power management, or +additional power delivery capabilities such as 2-pair or 4-pair power delivery. + +.. code-block:: + + +-------------+ + | PSE PI | + | +---+ + 8 -----+--------+ | +-------------+ + 7 -----+--------+ | Rail 1 | + 6 -----+--------+ +-----------------+ + 5 -----+--------+ | | + 4 -----+--------+ | Rail 2 | PSE 1 + 3 -----+--------+ +----------------+ + 2 -----+--------+ | | + 1 -----+--------+ | +-------------+ + | +---+ + +-------------+ + +Device Tree Configuration: Describing PSE PI Configurations +----------------------------------------------------------- + +The necessity for a separate PSE PI node in the device tree is influenced by +the intricacy of the Power over Ethernet (PoE) system's setup. Here are +descriptions of both simple and complex PSE PI configurations to illustrate +this decision-making process: + +**Simple PSE PI Configuration:** +In a straightforward scenario, the PSE PI setup involves a direct, one-to-one +connection between a single PSE controller and an Ethernet port. This setup +typically supports basic PoE functionality without the need for dynamic +configuration or management of multiple power delivery modes. For such simple +configurations, detailing the PSE PI within the existing PSE controller's node +may suffice, as the system does not encompass additional complexity that +warrants a separate node. The primary focus here is on the clear and direct +association of power delivery to a specific Ethernet port. + +**Complex PSE PI Configuration:** +Contrastingly, a complex PSE PI setup may encompass multiple PSE controllers or +auxiliary circuits that collectively manage power delivery to one Ethernet +port. Such configurations might support a range of PoE standards and require +the capability to dynamically configure power delivery based on the operational +mode (e.g., PoE2 versus PoE4) or specific requirements of connected devices. In +these instances, a dedicated PSE PI node becomes essential for accurately +documenting the system architecture. This node would serve to detail the +interactions between different PSE controllers, the support for various PoE +modes, and any additional logic required to coordinate power delivery across +the network infrastructure. + +**Guidance:** + +For simple PSE setups, including PSE PI information in the PSE controller node +might suffice due to the straightforward nature of these systems. However, +complex configurations, involving multiple components or advanced PoE features, +benefit from a dedicated PSE PI node. This method adheres to IEEE 802.3 +specifications, improving documentation clarity and ensuring accurate +representation of the PoE system's complexity. + +PSE PI Node: Essential Information +---------------------------------- + +The PSE PI (Power Sourcing Equipment Power Interface) node in a device tree can +include several key pieces of information critical for defining the power +delivery capabilities and configurations of a PoE (Power over Ethernet) system. +Below is a list of such information, along with explanations for their +necessity and reasons why they might not be found within a PSE controller node: + +1. **Powered Pairs Configuration** + + - *Description:* Identifies the pairs used for power delivery in the + Ethernet cable. + - *Necessity:* Essential to ensure the correct pairs are powered according + to the board's design. + - *PSE Controller Node:* Typically lacks details on physical pair usage, + focusing on power regulation. + +2. **Polarity of Powered Pairs** + + - *Description:* Specifies the polarity (positive or negative) for each + powered pair. + - *Necessity:* Critical for safe and effective power transmission to PDs. + - *PSE Controller Node:* Polarity management may exceed the standard + functionalities of PSE controllers. + +3. **PSE Cells Association** + + - *Description:* Details the association of PSE cells with Ethernet ports or + pairs in multi-cell configurations. + - *Necessity:* Allows for optimized power resource allocation in complex + systems. + - *PSE Controller Node:* Controllers may not manage cell associations + directly, focusing instead on power flow regulation. + +4. **Support for PoE Standards** + + - *Description:* Lists the PoE standards and configurations supported by the + system. + - *Necessity:* Ensures system compatibility with various PDs and adherence + to industry standards. + - *PSE Controller Node:* Specific capabilities may depend on the overall PSE + PI design rather than the controller alone. Multiple PSE cells per PI + do not necessarily imply support for multiple PoE standards. + +5. **Protection Mechanisms** + + - *Description:* Outlines additional protection mechanisms, such as + overcurrent protection and thermal management. + - *Necessity:* Provides extra safety and stability, complementing PSE + controller protections. + - *PSE Controller Node:* Some protections may be implemented via + board-specific hardware or algorithms external to the controller. diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index fed006cbc185..ca5ced8e0d8a 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -27,38 +27,182 @@ struct pse_control { struct kref refcnt; }; +static int of_load_single_pse_pi_pairset(struct device_node *node, + struct pse_pi *pi, + int pairset_num) +{ + struct device_node *pairset_np; + const char *name; + int ret; + + ret = of_property_read_string_index(node, "pairset-names", + pairset_num, &name); + if (ret) + return ret; + + if (!strcmp(name, "alternative-a")) { + pi->pairset[pairset_num].pinout = ALTERNATIVE_A; + } else if (!strcmp(name, "alternative-b")) { + pi->pairset[pairset_num].pinout = ALTERNATIVE_B; + } else { + pr_err("pse: wrong pairset-names value %s (%pOF)\n", + name, node); + return -EINVAL; + } + + pairset_np = of_parse_phandle(node, "pairsets", pairset_num); + if (!pairset_np) + return -ENODEV; + + pi->pairset[pairset_num].np = pairset_np; + + return 0; +} + /** - * of_pse_zero_xlate - dummy function for controllers with one only control - * @pcdev: a pointer to the PSE controller device - * @pse_spec: PSE line specifier as found in the device tree + * of_load_pse_pi_pairsets - load PSE PI pairsets pinout and polarity + * @node: a pointer of the device node + * @pi: a pointer of the PSE PI to fill + * @npairsets: the number of pairsets (1 or 2) used by the PI * - * This static translation function is used by default if of_xlate in - * :c:type:`pse_controller_dev` is not set. It is useful for all PSE - * controllers with #pse-cells = <0>. + * Return: 0 on success and failure value on error */ -static int of_pse_zero_xlate(struct pse_controller_dev *pcdev, - const struct of_phandle_args *pse_spec) +static int of_load_pse_pi_pairsets(struct device_node *node, + struct pse_pi *pi, + int npairsets) { - return 0; + int i, ret; + + ret = of_property_count_strings(node, "pairset-names"); + if (ret != npairsets) { + pr_err("pse: amount of pairsets and pairset-names is not equal %d != %d (%pOF)\n", + npairsets, ret, node); + return -EINVAL; + } + + for (i = 0; i < npairsets; i++) { + ret = of_load_single_pse_pi_pairset(node, pi, i); + if (ret) + goto out; + } + + if (npairsets == 2 && + pi->pairset[0].pinout == pi->pairset[1].pinout) { + pr_err("pse: two PI pairsets can not have identical pinout (%pOF)", + node); + ret = -EINVAL; + } + +out: + /* If an error appears, release all the pairset device node kref */ + if (ret) { + of_node_put(pi->pairset[0].np); + pi->pairset[0].np = NULL; + of_node_put(pi->pairset[1].np); + pi->pairset[1].np = NULL; + } + + return ret; +} + +static void pse_release_pis(struct pse_controller_dev *pcdev) +{ + int i; + + for (i = 0; i <= pcdev->nr_lines; i++) { + of_node_put(pcdev->pi[i].pairset[0].np); + of_node_put(pcdev->pi[i].pairset[1].np); + of_node_put(pcdev->pi[i].np); + } + kfree(pcdev->pi); } /** - * of_pse_simple_xlate - translate pse_spec to the PSE line number + * of_load_pse_pis - load all the PSE PIs * @pcdev: a pointer to the PSE controller device - * @pse_spec: PSE line specifier as found in the device tree * - * This static translation function is used by default if of_xlate in - * :c:type:`pse_controller_dev` is not set. It is useful for all PSE - * controllers with 1:1 mapping, where PSE lines can be indexed by number - * without gaps. + * Return: 0 on success and failure value on error */ -static int of_pse_simple_xlate(struct pse_controller_dev *pcdev, - const struct of_phandle_args *pse_spec) +static int of_load_pse_pis(struct pse_controller_dev *pcdev) { - if (pse_spec->args[0] >= pcdev->nr_lines) - return -EINVAL; + struct device_node *np = pcdev->dev->of_node; + struct device_node *node, *pis; + int ret; - return pse_spec->args[0]; + if (!np) + return -ENODEV; + + pis = of_get_child_by_name(np, "pse-pis"); + if (!pis) { + /* no description of PSE PIs */ + pcdev->no_of_pse_pi = true; + return 0; + } + + pcdev->pi = kcalloc(pcdev->nr_lines, sizeof(*pcdev->pi), GFP_KERNEL); + if (!pcdev->pi) { + of_node_put(pis); + return -ENOMEM; + } + + for_each_child_of_node(pis, node) { + struct pse_pi pi = {0}; + u32 id; + + if (!of_node_name_eq(node, "pse-pi")) + continue; + + ret = of_property_read_u32(node, "reg", &id); + if (ret) { + dev_err(pcdev->dev, + "can't get reg property for node '%pOF'", + node); + goto out; + } + + if (id >= pcdev->nr_lines) { + dev_err(pcdev->dev, + "reg value (%u) is out of range (%u) (%pOF)\n", + id, pcdev->nr_lines, node); + ret = -EINVAL; + goto out; + } + + if (pcdev->pi[id].np) { + dev_err(pcdev->dev, + "other node with same reg value was already registered. %pOF : %pOF\n", + pcdev->pi[id].np, node); + ret = -EINVAL; + goto out; + } + + ret = of_count_phandle_with_args(node, "pairsets", NULL); + /* npairsets is limited to value one or two */ + if (ret == 1 || ret == 2) { + ret = of_load_pse_pi_pairsets(node, &pi, ret); + if (ret) + goto out; + } else if (ret != ENOENT) { + dev_err(pcdev->dev, + "error: wrong number of pairsets. Should be 1 or 2, got %d (%pOF)\n", + ret, node); + ret = -EINVAL; + goto out; + } + + of_node_get(node); + pi.np = node; + memcpy(&pcdev->pi[id], &pi, sizeof(pi)); + } + + of_node_put(pis); + return 0; + +out: + pse_release_pis(pcdev); + of_node_put(node); + of_node_put(pis); + return ret; } /** @@ -67,16 +211,18 @@ static int of_pse_simple_xlate(struct pse_controller_dev *pcdev, */ int pse_controller_register(struct pse_controller_dev *pcdev) { - if (!pcdev->of_xlate) { - if (pcdev->of_pse_n_cells == 0) - pcdev->of_xlate = of_pse_zero_xlate; - else if (pcdev->of_pse_n_cells == 1) - pcdev->of_xlate = of_pse_simple_xlate; - } + int ret; mutex_init(&pcdev->lock); INIT_LIST_HEAD(&pcdev->pse_control_head); + if (!pcdev->nr_lines) + pcdev->nr_lines = 1; + + ret = of_load_pse_pis(pcdev); + if (ret) + return ret; + mutex_lock(&pse_list_mutex); list_add(&pcdev->list, &pse_controller_list); mutex_unlock(&pse_list_mutex); @@ -91,6 +237,7 @@ EXPORT_SYMBOL_GPL(pse_controller_register); */ void pse_controller_unregister(struct pse_controller_dev *pcdev) { + pse_release_pis(pcdev); mutex_lock(&pse_list_mutex); list_del(&pcdev->list); mutex_unlock(&pse_list_mutex); @@ -203,8 +350,48 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) return psec; } -struct pse_control * -of_pse_control_get(struct device_node *node) +/** + * of_pse_match_pi - Find the PSE PI id matching the device node phandle + * @pcdev: a pointer to the PSE controller device + * @np: a pointer to the device node + * + * Return: id of the PSE PI, -EINVAL if not found + */ +static int of_pse_match_pi(struct pse_controller_dev *pcdev, + struct device_node *np) +{ + int i; + + for (i = 0; i <= pcdev->nr_lines; i++) { + if (pcdev->pi[i].np == np) + return i; + } + + return -EINVAL; +} + +/** + * psec_id_xlate - translate pse_spec to the PSE line number according + * to the number of pse-cells in case of no pse_pi node + * @pcdev: a pointer to the PSE controller device + * @pse_spec: PSE line specifier as found in the device tree + * + * Return: 0 if #pse-cells = <0>. Return PSE line number otherwise. + */ +static int psec_id_xlate(struct pse_controller_dev *pcdev, + const struct of_phandle_args *pse_spec) +{ + if (!pcdev->of_pse_n_cells) + return 0; + + if (pcdev->of_pse_n_cells > 1 || + pse_spec->args[0] >= pcdev->nr_lines) + return -EINVAL; + + return pse_spec->args[0]; +} + +struct pse_control *of_pse_control_get(struct device_node *node) { struct pse_controller_dev *r, *pcdev; struct of_phandle_args args; @@ -222,7 +409,14 @@ of_pse_control_get(struct device_node *node) mutex_lock(&pse_list_mutex); pcdev = NULL; list_for_each_entry(r, &pse_controller_list, list) { - if (args.np == r->dev->of_node) { + if (!r->no_of_pse_pi) { + ret = of_pse_match_pi(r, args.np); + if (ret >= 0) { + pcdev = r; + psec_id = ret; + break; + } + } else if (args.np == r->dev->of_node) { pcdev = r; break; } @@ -238,10 +432,12 @@ of_pse_control_get(struct device_node *node) goto out; } - psec_id = pcdev->of_xlate(pcdev, &args); - if (psec_id < 0) { - psec = ERR_PTR(psec_id); - goto out; + if (pcdev->no_of_pse_pi) { + psec_id = psec_id_xlate(pcdev, &args); + if (psec_id < 0) { + psec = ERR_PTR(psec_id); + goto out; + } } /* pse_list_mutex also protects the pcdev's pse_control list */ diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 19589571157f..e19d58b5e777 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -64,6 +64,36 @@ struct device_node; struct of_phandle_args; struct pse_control; +/* PSE PI pairset pinout can either be Alternative A or Alternative B */ +enum pse_pi_pairset_pinout { + ALTERNATIVE_A, + ALTERNATIVE_B, +}; + +/** + * struct pse_pi_pairset - PSE PI pairset entity describing the pinout + * alternative ant its phandle + * + * @pinout: description of the pinout alternative + * @np: device node pointer describing the pairset phandle + */ +struct pse_pi_pairset { + enum pse_pi_pairset_pinout pinout; + struct device_node *np; +}; + +/** + * struct pse_pi - PSE PI (Power Interface) entity as described in + * IEEE 802.3-2022 145.2.4 + * + * @pairset: table of the PSE PI pinout alternative for the two pairset + * @np: device node pointer of the PSE PI node + */ +struct pse_pi { + struct pse_pi_pairset pairset[2]; + struct device_node *np; +}; + /** * struct pse_controller_dev - PSE controller entity that might * provide multiple PSE controls @@ -73,11 +103,11 @@ struct pse_control; * @pse_control_head: head of internal list of requested PSE controls * @dev: corresponding driver model device struct * @of_pse_n_cells: number of cells in PSE line specifiers - * @of_xlate: translation function to translate from specifier as found in the - * device tree to id as given to the PSE control ops * @nr_lines: number of PSE controls in this controller device * @lock: Mutex for serialization access to the PSE controller * @types: types of the PSE controller + * @pi: table of PSE PIs described in this controller device + * @no_of_pse_pi: flag set if the pse_pis devicetree node is not used */ struct pse_controller_dev { const struct pse_controller_ops *ops; @@ -86,11 +116,11 @@ struct pse_controller_dev { struct list_head pse_control_head; struct device *dev; int of_pse_n_cells; - int (*of_xlate)(struct pse_controller_dev *pcdev, - const struct of_phandle_args *pse_spec); unsigned int nr_lines; struct mutex lock; enum ethtool_pse_types types; + struct pse_pi *pi; + bool no_of_pse_pi; }; #if IS_ENABLED(CONFIG_PSE_CONTROLLER) -- cgit From 29e28d1d7a16a3f6dba55c1df5cc6e0f9edf3017 Mon Sep 17 00:00:00 2001 From: "Kory Maincent (Dent Project)" Date: Wed, 17 Apr 2024 16:39:57 +0200 Subject: net: pse-pd: Add support for setup_pi_matrix callback Implement setup_pi_matrix callback to configure the PSE PI matrix. This functionality is invoked before registering the PSE and following the core parsing of the pse_pis devicetree subnode. Signed-off-by: Kory Maincent Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20240417-feature_poe-v9-9-242293fd1900@bootlin.com Signed-off-by: Jakub Kicinski --- drivers/net/pse-pd/pse_core.c | 6 ++++++ include/linux/pse-pd/pse.h | 2 ++ 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index ca5ced8e0d8a..a7ff7676ab77 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -223,6 +223,12 @@ int pse_controller_register(struct pse_controller_dev *pcdev) if (ret) return ret; + if (pcdev->ops->setup_pi_matrix) { + ret = pcdev->ops->setup_pi_matrix(pcdev); + if (ret) + return ret; + } + mutex_lock(&pse_list_mutex); list_add(&pcdev->list, &pse_controller_list); mutex_unlock(&pse_list_mutex); diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index e19d58b5e777..fa0c73da0cf1 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -49,6 +49,7 @@ struct pse_control_status { * * @ethtool_get_status: get PSE control status for ethtool interface * @ethtool_set_config: set PSE control configuration over ethtool interface + * @setup_pi_matrix: setup PI matrix of the PSE controller */ struct pse_controller_ops { int (*ethtool_get_status)(struct pse_controller_dev *pcdev, @@ -57,6 +58,7 @@ struct pse_controller_ops { int (*ethtool_set_config)(struct pse_controller_dev *pcdev, unsigned long id, struct netlink_ext_ack *extack, const struct pse_control_config *config); + int (*setup_pi_matrix)(struct pse_controller_dev *pcdev); }; struct module; -- cgit From d83e13761d5b0568376963729abcccf6de5a43ba Mon Sep 17 00:00:00 2001 From: "Kory Maincent (Dent Project)" Date: Wed, 17 Apr 2024 16:39:58 +0200 Subject: net: pse-pd: Use regulator framework within PSE framework Integrate the regulator framework to the PSE framework for enhanced access to features such as voltage, power measurement, and limits, which are akin to regulators. Additionally, PSE features like port priorities could potentially enhance the regulator framework. Note that this integration introduces some implementation complexity, including wrapper callbacks, but the potential benefits make it worthwhile. Regulator are using enable counter with specific behavior. Two calls to regulator_disable will trigger kernel warnings. If the counter exceeds one, regulator_disable call won't disable the PSE PI. These behavior isn't suitable for PSE control. Added a boolean 'enabled' state to prevent multiple calls to regulator_enable/disable. These calls will only be called from PSE framework as it won't have any regulator children, therefore no mutex are needed to safeguards this boolean. regulator_get needs the consumer device pointer. Use PSE as regulator provider and consumer device until we have RJ45 ports represented in the Kernel. Signed-off-by: Kory Maincent Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20240417-feature_poe-v9-10-242293fd1900@bootlin.com Signed-off-by: Jakub Kicinski --- drivers/net/pse-pd/pse_core.c | 249 ++++++++++++++++++++++++++++++++++--- drivers/net/pse-pd/pse_regulator.c | 48 +++---- include/linux/pse-pd/pse.h | 15 ++- 3 files changed, 267 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index a7ff7676ab77..31f23c454678 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -8,6 +8,8 @@ #include #include #include +#include +#include static DEFINE_MUTEX(pse_list_mutex); static LIST_HEAD(pse_controller_list); @@ -16,12 +18,14 @@ static LIST_HEAD(pse_controller_list); * struct pse_control - a PSE control * @pcdev: a pointer to the PSE controller device * this PSE control belongs to + * @ps: PSE PI supply of the PSE control * @list: list entry for the pcdev's PSE controller list * @id: ID of the PSE line in the PSE controller device * @refcnt: Number of gets of this pse_control */ struct pse_control { struct pse_controller_dev *pcdev; + struct regulator *ps; struct list_head list; unsigned int id; struct kref refcnt; @@ -132,6 +136,10 @@ static int of_load_pse_pis(struct pse_controller_dev *pcdev) if (!np) return -ENODEV; + pcdev->pi = kcalloc(pcdev->nr_lines, sizeof(*pcdev->pi), GFP_KERNEL); + if (!pcdev->pi) + return -ENOMEM; + pis = of_get_child_by_name(np, "pse-pis"); if (!pis) { /* no description of PSE PIs */ @@ -139,12 +147,6 @@ static int of_load_pse_pis(struct pse_controller_dev *pcdev) return 0; } - pcdev->pi = kcalloc(pcdev->nr_lines, sizeof(*pcdev->pi), GFP_KERNEL); - if (!pcdev->pi) { - of_node_put(pis); - return -ENOMEM; - } - for_each_child_of_node(pis, node) { struct pse_pi pi = {0}; u32 id; @@ -205,13 +207,124 @@ out: return ret; } +static int pse_pi_is_enabled(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id, ret; + + ops = pcdev->ops; + if (!ops->pi_is_enabled) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + ret = ops->pi_is_enabled(pcdev, id); + mutex_unlock(&pcdev->lock); + + return ret; +} + +static int pse_pi_enable(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id, ret; + + ops = pcdev->ops; + if (!ops->pi_enable) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + ret = ops->pi_enable(pcdev, id); + if (!ret) + pcdev->pi[id].admin_state_enabled = 1; + mutex_unlock(&pcdev->lock); + + return ret; +} + +static int pse_pi_disable(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id, ret; + + ops = pcdev->ops; + if (!ops->pi_disable) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + ret = ops->pi_disable(pcdev, id); + if (!ret) + pcdev->pi[id].admin_state_enabled = 0; + mutex_unlock(&pcdev->lock); + + return ret; +} + +static const struct regulator_ops pse_pi_ops = { + .is_enabled = pse_pi_is_enabled, + .enable = pse_pi_enable, + .disable = pse_pi_disable, +}; + +static int +devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev, + char *name, int id) +{ + struct regulator_init_data *rinit_data; + struct regulator_config rconfig = {0}; + struct regulator_desc *rdesc; + struct regulator_dev *rdev; + + rinit_data = devm_kzalloc(pcdev->dev, sizeof(*rinit_data), + GFP_KERNEL); + if (!rinit_data) + return -ENOMEM; + + rdesc = devm_kzalloc(pcdev->dev, sizeof(*rdesc), GFP_KERNEL); + if (!rdesc) + return -ENOMEM; + + /* Regulator descriptor id have to be the same as its associated + * PSE PI id for the well functioning of the PSE controls. + */ + rdesc->id = id; + rdesc->name = name; + rdesc->type = REGULATOR_CURRENT; + rdesc->ops = &pse_pi_ops; + rdesc->owner = pcdev->owner; + + rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; + rinit_data->supply_regulator = "vpwr"; + + rconfig.dev = pcdev->dev; + rconfig.driver_data = pcdev; + rconfig.init_data = rinit_data; + + rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig); + if (IS_ERR(rdev)) { + dev_err_probe(pcdev->dev, PTR_ERR(rdev), + "Failed to register regulator\n"); + return PTR_ERR(rdev); + } + + pcdev->pi[id].rdev = rdev; + + return 0; +} + /** * pse_controller_register - register a PSE controller device * @pcdev: a pointer to the initialized PSE controller device */ int pse_controller_register(struct pse_controller_dev *pcdev) { - int ret; + size_t reg_name_len; + int ret, i; mutex_init(&pcdev->lock); INIT_LIST_HEAD(&pcdev->pse_control_head); @@ -229,6 +342,31 @@ int pse_controller_register(struct pse_controller_dev *pcdev) return ret; } + /* Each regulator name len is pcdev dev name + 7 char + + * int max digit number (10) + 1 + */ + reg_name_len = strlen(dev_name(pcdev->dev)) + 18; + + /* Register PI regulators */ + for (i = 0; i < pcdev->nr_lines; i++) { + char *reg_name; + + /* Do not register regulator for PIs not described */ + if (!pcdev->no_of_pse_pi && !pcdev->pi[i].np) + continue; + + reg_name = devm_kzalloc(pcdev->dev, reg_name_len, GFP_KERNEL); + if (!reg_name) + return -ENOMEM; + + snprintf(reg_name, reg_name_len, "pse-%s_pi%d", + dev_name(pcdev->dev), i); + + ret = devm_pse_pi_regulator_register(pcdev, reg_name, i); + if (ret) + return ret; + } + mutex_lock(&pse_list_mutex); list_add(&pcdev->list, &pse_controller_list); mutex_unlock(&pse_list_mutex); @@ -297,6 +435,10 @@ static void __pse_control_release(struct kref *kref) lockdep_assert_held(&pse_list_mutex); + if (psec->pcdev->pi[psec->id].admin_state_enabled) + regulator_disable(psec->ps); + devm_regulator_put(psec->ps); + module_put(psec->pcdev->owner); list_del(&psec->list); @@ -329,6 +471,7 @@ static struct pse_control * pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) { struct pse_control *psec; + int ret; lockdep_assert_held(&pse_list_mutex); @@ -344,16 +487,38 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index) return ERR_PTR(-ENOMEM); if (!try_module_get(pcdev->owner)) { - kfree(psec); - return ERR_PTR(-ENODEV); + ret = -ENODEV; + goto free_psec; } + psec->ps = devm_regulator_get_exclusive(pcdev->dev, + rdev_get_name(pcdev->pi[index].rdev)); + if (IS_ERR(psec->ps)) { + ret = PTR_ERR(psec->ps); + goto put_module; + } + + ret = regulator_is_enabled(psec->ps); + if (ret < 0) + goto regulator_put; + + pcdev->pi[index].admin_state_enabled = ret; + psec->pcdev = pcdev; list_add(&psec->list, &pcdev->pse_control_head); psec->id = index; kref_init(&psec->refcnt); return psec; + +regulator_put: + devm_regulator_put(psec->ps); +put_module: + module_put(pcdev->owner); +free_psec: + kfree(psec); + + return ERR_PTR(ret); } /** @@ -486,6 +651,54 @@ int pse_ethtool_get_status(struct pse_control *psec, } EXPORT_SYMBOL_GPL(pse_ethtool_get_status); +static int pse_ethtool_c33_set_config(struct pse_control *psec, + const struct pse_control_config *config) +{ + int err = 0; + + /* Look at admin_state_enabled status to not call regulator_enable + * or regulator_disable twice creating a regulator counter mismatch + */ + switch (config->c33_admin_control) { + case ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED: + if (!psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_enable(psec->ps); + break; + case ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED: + if (psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_disable(psec->ps); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static int pse_ethtool_podl_set_config(struct pse_control *psec, + const struct pse_control_config *config) +{ + int err = 0; + + /* Look at admin_state_enabled status to not call regulator_enable + * or regulator_disable twice creating a regulator counter mismatch + */ + switch (config->podl_admin_control) { + case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: + if (!psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_enable(psec->ps); + break; + case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: + if (psec->pcdev->pi[psec->id].admin_state_enabled) + err = regulator_disable(psec->ps); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + /** * pse_ethtool_set_config - set PSE control configuration * @psec: PSE control pointer @@ -496,20 +709,16 @@ int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config) { - const struct pse_controller_ops *ops; - int err; - - ops = psec->pcdev->ops; + int err = 0; - if (!ops->ethtool_set_config) { - NL_SET_ERR_MSG(extack, - "PSE driver does not configuration"); - return -EOPNOTSUPP; + if (pse_has_c33(psec)) { + err = pse_ethtool_c33_set_config(psec, config); + if (err) + return err; } - mutex_lock(&psec->pcdev->lock); - err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config); - mutex_unlock(&psec->pcdev->lock); + if (pse_has_podl(psec)) + err = pse_ethtool_podl_set_config(psec, config); return err; } diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c index 547af384764b..64ab36974fe0 100644 --- a/drivers/net/pse-pd/pse_regulator.c +++ b/drivers/net/pse-pd/pse_regulator.c @@ -24,37 +24,41 @@ static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev) } static int -pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id, - struct netlink_ext_ack *extack, - const struct pse_control_config *config) +pse_reg_pi_enable(struct pse_controller_dev *pcdev, int id) { struct pse_reg_priv *priv = to_pse_reg(pcdev); int ret; - if (priv->admin_state == config->podl_admin_control) - return 0; - - switch (config->podl_admin_control) { - case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED: - ret = regulator_enable(priv->ps); - break; - case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED: - ret = regulator_disable(priv->ps); - break; - default: - dev_err(pcdev->dev, "Unknown admin state %i\n", - config->podl_admin_control); - ret = -ENOTSUPP; - } - + ret = regulator_enable(priv->ps); if (ret) return ret; - priv->admin_state = config->podl_admin_control; + priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED; + return 0; +} + +static int +pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id) +{ + struct pse_reg_priv *priv = to_pse_reg(pcdev); + int ret; + ret = regulator_disable(priv->ps); + if (ret) + return ret; + + priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED; return 0; } +static int +pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id) +{ + struct pse_reg_priv *priv = to_pse_reg(pcdev); + + return regulator_is_enabled(priv->ps); +} + static int pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id, struct netlink_ext_ack *extack, @@ -80,7 +84,9 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id, static const struct pse_controller_ops pse_reg_ops = { .ethtool_get_status = pse_reg_ethtool_get_status, - .ethtool_set_config = pse_reg_ethtool_set_config, + .pi_enable = pse_reg_pi_enable, + .pi_is_enabled = pse_reg_pi_is_enabled, + .pi_disable = pse_reg_pi_disable, }; static int diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index fa0c73da0cf1..6d07c95dabb9 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -48,17 +48,20 @@ struct pse_control_status { * struct pse_controller_ops - PSE controller driver callbacks * * @ethtool_get_status: get PSE control status for ethtool interface - * @ethtool_set_config: set PSE control configuration over ethtool interface * @setup_pi_matrix: setup PI matrix of the PSE controller + * @pi_is_enabled: Return 1 if the PSE PI is enabled, 0 if not. + * May also return negative errno. + * @pi_enable: Configure the PSE PI as enabled. + * @pi_disable: Configure the PSE PI as disabled. */ struct pse_controller_ops { int (*ethtool_get_status)(struct pse_controller_dev *pcdev, unsigned long id, struct netlink_ext_ack *extack, struct pse_control_status *status); - int (*ethtool_set_config)(struct pse_controller_dev *pcdev, - unsigned long id, struct netlink_ext_ack *extack, - const struct pse_control_config *config); int (*setup_pi_matrix)(struct pse_controller_dev *pcdev); + int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id); + int (*pi_enable)(struct pse_controller_dev *pcdev, int id); + int (*pi_disable)(struct pse_controller_dev *pcdev, int id); }; struct module; @@ -90,10 +93,14 @@ struct pse_pi_pairset { * * @pairset: table of the PSE PI pinout alternative for the two pairset * @np: device node pointer of the PSE PI node + * @rdev: regulator represented by the PSE PI + * @admin_state_enabled: PI enabled state */ struct pse_pi { struct pse_pi_pairset pairset[2]; struct device_node *np; + struct regulator_dev *rdev; + bool admin_state_enabled; }; /** -- cgit From c936e287df26929ad6b5fd7fef22b356e434e90e Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 12 Feb 2024 10:48:34 +0200 Subject: thunderbolt: Get rid of TB_CFG_PKG_PREPARE_TO_SLEEP This is not used anywhere in the driver so remove it. No functional changes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb_msgs.h | 6 ------ include/linux/thunderbolt.h | 1 - 2 files changed, 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index cd750e4b3440..a1670a96cbdc 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -98,12 +98,6 @@ struct cfg_reset_pkg { struct tb_cfg_header header; } __packed; -/* TB_CFG_PKG_PREPARE_TO_SLEEP */ -struct cfg_pts_pkg { - struct tb_cfg_header header; - u32 data; -} __packed; - /* ICM messages */ enum icm_pkg_code { diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index 4338ea9ac4fd..7d902d8c054b 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -33,7 +33,6 @@ enum tb_cfg_pkg_type { TB_CFG_PKG_ICM_EVENT = 10, TB_CFG_PKG_ICM_CMD = 11, TB_CFG_PKG_ICM_RESP = 12, - TB_CFG_PKG_PREPARE_TO_SLEEP = 13, }; /** -- cgit From 2887af4d22f90bcddd3ff9a6eb93ecdaab3acd94 Mon Sep 17 00:00:00 2001 From: Miri Korenblit Date: Tue, 16 Apr 2024 13:53:58 +0300 Subject: wifi: iwlwifi: mvm: implement link grading For selecting what link(s) out of the usable ones to activate, calculate a grade for a given link. Calculation of a link grade is done as follows: 1. get the estimated throughput according to the RSSI of the link, this will be the base grade 2. get the channel load from the BSS Load Element, subtracting the load caused by us. Apply the factor on the grade. 3. puncturing factor: calculate the percentage of the punctured subchannels (out of the total subchannels). Apply this on the grade. The link grading will be used by the link selection mechanism in a later patch. Also add KUnit tests for it. Signed-off-by: Miri Korenblit Reviewed-by: Johannes Berg Link: https://msgid.link/20240416134215.a6799dbd5643.If137ca6dc443606c7d8c99ec1fc38b325003a7c1@changeid Signed-off-by: Johannes Berg --- drivers/net/wireless/intel/iwlwifi/mvm/Makefile | 3 +- drivers/net/wireless/intel/iwlwifi/mvm/link.c | 172 +++++++++++++++++ drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | 1 + .../net/wireless/intel/iwlwifi/mvm/tests/Makefile | 3 + .../net/wireless/intel/iwlwifi/mvm/tests/links.c | 210 +++++++++++++++++++++ .../net/wireless/intel/iwlwifi/mvm/tests/module.c | 10 + include/linux/ieee80211.h | 18 ++ 7 files changed, 416 insertions(+), 1 deletion(-) create mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile create mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c create mode 100644 drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c (limited to 'include/linux') diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile index 593fe28d89cf..5c754b87ea20 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_IWLMVM) += iwlmvm.o +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/ iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o iwlmvm-y += scan.o time-event.o rs.o rs-fw.o @@ -15,4 +16,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o iwlmvm-$(CONFIG_PM) += d3.o iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o -ccflags-y += -I $(srctree)/$(src)/../ +subdir-ccflags-y += -I $(srctree)/$(src)/../ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index 9f69e04594e4..23660238348b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -329,3 +329,175 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return ret; } + +struct iwl_mvm_rssi_to_grade { + s8 rssi[2]; + u16 grade; +}; + +#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \ + { \ + .rssi = {_lb, _hb_uhb}, \ + .grade = _grade \ + } + +/* + * This array must be sorted by increasing RSSI for proper functionality. + * The grades are actually estimated throughput, represented as fixed-point + * with a scale factor of 1/10. + */ +static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = { + RSSI_TO_GRADE_LINE(-85, -89, 177), + RSSI_TO_GRADE_LINE(-83, -86, 344), + RSSI_TO_GRADE_LINE(-82, -85, 516), + RSSI_TO_GRADE_LINE(-80, -83, 688), + RSSI_TO_GRADE_LINE(-77, -79, 1032), + RSSI_TO_GRADE_LINE(-73, -76, 1376), + RSSI_TO_GRADE_LINE(-70, -74, 1548), + RSSI_TO_GRADE_LINE(-69, -72, 1750), + RSSI_TO_GRADE_LINE(-65, -68, 2064), + RSSI_TO_GRADE_LINE(-61, -66, 2294), + RSSI_TO_GRADE_LINE(-58, -61, 2580), + RSSI_TO_GRADE_LINE(-55, -58, 2868), + RSSI_TO_GRADE_LINE(-46, -55, 3098), + RSSI_TO_GRADE_LINE(-43, -54, 3442) +}; + +#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade) + +#define DEFAULT_CHAN_LOAD_LB 30 +#define DEFAULT_CHAN_LOAD_HB 15 +#define DEFAULT_CHAN_LOAD_UHB 0 + +/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */ +#define SCALE_FACTOR 256 + +/* Convert a percentage from [0,100] to [0,255] */ +#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100) + +static unsigned int +iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_chan_width chan_width = + link_conf->chanreq.oper.width; + int mhz = nl80211_chan_width_to_mhz(chan_width); + unsigned int n_subchannels, n_punctured, puncturing_penalty; + + if (WARN_ONCE(mhz < 20 || mhz > 320, + "Invalid channel width : (%d)\n", mhz)) + return SCALE_FACTOR; + + /* No puncturing, no penalty */ + if (mhz < 80) + return SCALE_FACTOR; + + /* total number of subchannels */ + n_subchannels = mhz / 20; + /* how many of these are punctured */ + n_punctured = hweight16(link_conf->chanreq.oper.punctured); + + puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels; + return SCALE_FACTOR - puncturing_penalty; +} + +static unsigned int +iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf) +{ + struct iwl_mvm_vif_link_info *mvm_link = + iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id]; + const struct element *bss_load_elem = + ieee80211_bss_get_elem(link_conf->bss, WLAN_EID_QBSS_LOAD); + const struct ieee80211_bss_load_elem *bss_load; + enum nl80211_band band = link_conf->chanreq.oper.chan->band; + unsigned int chan_load; + u32 chan_load_by_us; + + /* If there isn't BSS Load element, take the defaults */ + if (!bss_load_elem || + bss_load_elem->datalen != sizeof(*bss_load)) { + switch (band) { + case NL80211_BAND_2GHZ: + chan_load = DEFAULT_CHAN_LOAD_LB; + break; + case NL80211_BAND_5GHZ: + chan_load = DEFAULT_CHAN_LOAD_HB; + break; + case NL80211_BAND_6GHZ: + chan_load = DEFAULT_CHAN_LOAD_UHB; + break; + default: + chan_load = 0; + break; + } + /* The defaults are given in percentage */ + return SCALE_FACTOR - NORMALIZE_PERCENT_TO_255(chan_load); + } + + bss_load = (const void *)bss_load_elem->data; + /* Channel util is in range 0-255 */ + chan_load = bss_load->channel_util; + + if (!mvm_link || !mvm_link->active) + goto done; + + if (WARN_ONCE(!mvm_link->phy_ctxt, + "Active link (%u) without phy ctxt assigned!\n", + link_conf->link_id)) + goto done; + + /* channel load by us is given in percentage */ + chan_load_by_us = + NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us); + + /* Use only values that firmware sends that can possibly be valid */ + if (chan_load_by_us <= chan_load) + chan_load -= chan_load_by_us; +done: + return SCALE_FACTOR - chan_load; +} + +/* This function calculates the grade of a link. Returns 0 in error case */ +unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf) +{ + enum nl80211_band band; + int i, rssi_idx; + s32 link_rssi; + unsigned int grade = MAX_GRADE; + + if (WARN_ON_ONCE(!link_conf)) + return 0; + + band = link_conf->chanreq.oper.chan->band; + if (WARN_ONCE(band != NL80211_BAND_2GHZ && + band != NL80211_BAND_5GHZ && + band != NL80211_BAND_6GHZ, + "Invalid band (%u)\n", band)) + return 0; + + link_rssi = MBM_TO_DBM(link_conf->bss->signal); + /* + * For 6 GHz the RSSI of the beacons is lower than + * the RSSI of the data. + */ + if (band == NL80211_BAND_6GHZ) + link_rssi += 4; + + rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1; + + /* Get grade based on RSSI */ + for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) { + const struct iwl_mvm_rssi_to_grade *line = + &rssi_to_grade_map[i]; + + if (link_rssi > line->rssi[rssi_idx]) + continue; + grade = line->grade; + break; + } + + /* apply the channel load and puncturing factors */ + grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR; + grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR; + return grade; +} +EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 0dd83e0171ba..c477978d8fa3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -2449,6 +2449,7 @@ u32 iwl_mvm_get_sec_flags(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *keyconf); +unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf); bool iwl_rfi_supported(struct iwl_mvm *mvm); int iwl_rfi_send_config_cmd(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile new file mode 100644 index 000000000000..b13aebbf7d5e --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile @@ -0,0 +1,3 @@ +iwlmvm-tests-y += module.o links.o + +obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c new file mode 100644 index 000000000000..321d18de1ca3 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for channel helper functions + * + * Copyright (C) 2024 Intel Corporation + */ +#include +#include "../mvm.h" +#include + +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); + +static struct ieee80211_channel chan_5ghz = { + .band = NL80211_BAND_5GHZ, +}; + +static struct ieee80211_channel chan_6ghz = { + .band = NL80211_BAND_6GHZ, +}; + +static struct ieee80211_channel chan_2ghz = { + .band = NL80211_BAND_2GHZ, +}; + +static struct iwl_mvm_phy_ctxt ctx = {}; + +static struct iwl_mvm_vif_link_info mvm_link = { + .phy_ctxt = &ctx, + .active = true +}; + +static struct cfg80211_bss bss = {}; + +static struct ieee80211_bss_conf link_conf = {.bss = &bss}; + +static const struct link_grading_case { + const char *desc; + const struct cfg80211_chan_def chandef; + s32 signal; + s16 channel_util; + int chan_load_by_us; + unsigned int grade; +} link_grading_cases[] = { + { + .desc = "UHB, RSSI below range, no factors", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -100, + .grade = 177, + }, + { + .desc = "LB, RSSI in range, no factors", + .chandef = { + .chan = &chan_2ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -84, + .grade = 344, + }, + { + .desc = "HB, RSSI above range, no factors", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -50, + .grade = 3442, + }, + { + .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -66, + .channel_util = 51, + .grade = 1836, + }, + { + .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor", + .chandef = { + .chan = &chan_2ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -61, + .channel_util = 51, + .chan_load_by_us = 10, + .grade = 2061, + }, + { + .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_20, + }, + .signal = -66, + .channel_util = 102, + .chan_load_by_us = 50, + .grade = 1552, + }, + { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_80, + .punctured = 0x0000 + }, + .signal = -72, + .grade = 1750, + }, + { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_160, + .punctured = 0x3 + }, + .signal = -72, + .grade = 1312, + }, + { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)", + .chandef = { + .chan = &chan_6ghz, + .width = NL80211_CHAN_WIDTH_320, + .punctured = 0x3 + }, + .signal = -72, + .grade = 1806, + }, + { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25", + .chandef = { + .chan = &chan_5ghz, + .width = NL80211_CHAN_WIDTH_160, + .punctured = 0x3 + }, + .channel_util = 51, + .chan_load_by_us = 10, + .signal = -72, + .grade = 1179, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc) + +static void setup_link_conf(struct kunit *test) +{ + const struct link_grading_case *params = test->param_value; + size_t vif_size = sizeof(struct ieee80211_vif) + + sizeof(struct iwl_mvm_vif); + struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL); + struct ieee80211_bss_load_elem *bss_load; + struct element *element; + size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element); + struct cfg80211_bss_ies *ies; + struct iwl_mvm_vif *mvmvif; + + KUNIT_ASSERT_NOT_NULL(test, vif); + + mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (params->chan_load_by_us > 0) { + ctx.channel_load_by_us = params->chan_load_by_us; + mvmvif->link[0] = &mvm_link; + } + + link_conf.vif = vif; + link_conf.chanreq.oper = params->chandef; + bss.signal = DBM_TO_MBM(params->signal); + + ies = kunit_kzalloc(test, ies_size, GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, ies); + ies->len = sizeof(*bss_load) + sizeof(struct element); + + element = (void *)ies->data; + element->datalen = sizeof(*bss_load); + element->id = 11; + + bss_load = (void *)element->data; + bss_load->channel_util = params->channel_util; + + rcu_assign_pointer(bss.ies, ies); +} + +static void test_link_grading(struct kunit *test) +{ + const struct link_grading_case *params = test->param_value; + unsigned int ret; + + setup_link_conf(test); + + rcu_read_lock(); + ret = iwl_mvm_get_link_grade(&link_conf); + rcu_read_unlock(); + + KUNIT_EXPECT_EQ(test, ret, params->grade); + + kunit_kfree(test, link_conf.vif); + RCU_INIT_POINTER(bss.ies, NULL); +} + +static struct kunit_case link_grading_test_cases[] = { + KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params), + {} +}; + +static struct kunit_suite link_grading = { + .name = "iwlmvm-link-grading", + .test_cases = link_grading_test_cases, +}; + +kunit_test_suite(link_grading); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c new file mode 100644 index 000000000000..f556acbac77f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This is just module boilerplate for the iwlmvm kunit module. + * + * Copyright (C) 2024 Intel Corporation + */ +#include + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("kunit tests for iwlmvm"); diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 7f9b829dcb1e..de2dce743ee2 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1287,6 +1287,24 @@ struct ieee80211_ttlm_elem { u8 optional[]; } __packed; +/** + * struct ieee80211_bss_load_elem - BSS Load elemen + * + * Defined in section 9.4.2.26 in IEEE 802.11-REVme D4.1 + * + * @sta_count: total number of STAs currently associated with the AP. + * @channel_util: Percentage of time that the access point sensed the channel + * was busy. This value is in range [0, 255], the highest value means + * 100% busy. + * @avail_admission_capa: remaining amount of medium time used for admission + * control. + */ +struct ieee80211_bss_load_elem { + __le16 sta_count; + u8 channel_util; + __le16 avail_admission_capa; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; -- cgit From 2145af01b9cb326b7dd202a4281976fae614183e Mon Sep 17 00:00:00 2001 From: Peng Fan Date: Thu, 18 Apr 2024 09:32:30 +0800 Subject: firmware: arm_scmi: Add basic support for SCMI v3.2 pincontrol protocol Add basic implementation of the SCMI v3.2 pincontrol protocol. Co-developed-by: Oleksii Moisieiev Signed-off-by: Oleksii Moisieiev Co-developed-by: Cristian Marussi Signed-off-by: Cristian Marussi Signed-off-by: Peng Fan Link: https://lore.kernel.org/r/20240418-pinctrl-scmi-v11-3-499dca9864a7@nxp.com Signed-off-by: Sudeep Holla --- drivers/firmware/arm_scmi/Makefile | 3 +- drivers/firmware/arm_scmi/driver.c | 2 + drivers/firmware/arm_scmi/pinctrl.c | 916 ++++++++++++++++++++++++++++++++++ drivers/firmware/arm_scmi/protocols.h | 1 + include/linux/scmi_protocol.h | 84 ++++ 5 files changed, 1005 insertions(+), 1 deletion(-) create mode 100644 drivers/firmware/arm_scmi/pinctrl.c (limited to 'include/linux') diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index a7bc4796519c..fd59f58ce8a2 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -10,7 +10,8 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o -scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o +scmi-protocols-y := base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o +scmi-protocols-y += pinctrl.o scmi-module-objs := $(scmi-driver-y) $(scmi-protocols-y) $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-core.o diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index ab5c138e0d09..6b6957f4743f 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -3334,6 +3334,7 @@ static int __init scmi_driver_init(void) scmi_voltage_register(); scmi_system_register(); scmi_powercap_register(); + scmi_pinctrl_register(); return platform_driver_register(&scmi_driver); } @@ -3351,6 +3352,7 @@ static void __exit scmi_driver_exit(void) scmi_voltage_unregister(); scmi_system_unregister(); scmi_powercap_unregister(); + scmi_pinctrl_unregister(); scmi_transports_exit(); diff --git a/drivers/firmware/arm_scmi/pinctrl.c b/drivers/firmware/arm_scmi/pinctrl.c new file mode 100644 index 000000000000..a2a7f880d6a3 --- /dev/null +++ b/drivers/firmware/arm_scmi/pinctrl.c @@ -0,0 +1,916 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Pinctrl Protocol + * + * Copyright (C) 2024 EPAM + * Copyright 2024 NXP + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "common.h" +#include "protocols.h" + +/* Updated only after ALL the mandatory features for that version are merged */ +#define SCMI_PROTOCOL_SUPPORTED_VERSION 0x10000 + +#define GET_GROUPS_NR(x) le32_get_bits((x), GENMASK(31, 16)) +#define GET_PINS_NR(x) le32_get_bits((x), GENMASK(15, 0)) +#define GET_FUNCTIONS_NR(x) le32_get_bits((x), GENMASK(15, 0)) + +#define EXT_NAME_FLAG(x) le32_get_bits((x), BIT(31)) +#define NUM_ELEMS(x) le32_get_bits((x), GENMASK(15, 0)) + +#define REMAINING(x) le32_get_bits((x), GENMASK(31, 16)) +#define RETURNED(x) le32_get_bits((x), GENMASK(11, 0)) + +#define CONFIG_FLAG_MASK GENMASK(19, 18) +#define SELECTOR_MASK GENMASK(17, 16) +#define SKIP_CONFIGS_MASK GENMASK(15, 8) +#define CONFIG_TYPE_MASK GENMASK(7, 0) + +enum scmi_pinctrl_protocol_cmd { + PINCTRL_ATTRIBUTES = 0x3, + PINCTRL_LIST_ASSOCIATIONS = 0x4, + PINCTRL_SETTINGS_GET = 0x5, + PINCTRL_SETTINGS_CONFIGURE = 0x6, + PINCTRL_REQUEST = 0x7, + PINCTRL_RELEASE = 0x8, + PINCTRL_NAME_GET = 0x9, + PINCTRL_SET_PERMISSIONS = 0xa, +}; + +struct scmi_msg_settings_conf { + __le32 identifier; + __le32 function_id; + __le32 attributes; + __le32 configs[]; +}; + +struct scmi_msg_settings_get { + __le32 identifier; + __le32 attributes; +}; + +struct scmi_resp_settings_get { + __le32 function_selected; + __le32 num_configs; + __le32 configs[]; +}; + +struct scmi_msg_pinctrl_protocol_attributes { + __le32 attributes_low; + __le32 attributes_high; +}; + +struct scmi_msg_pinctrl_attributes { + __le32 identifier; + __le32 flags; +}; + +struct scmi_resp_pinctrl_attributes { + __le32 attributes; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; +}; + +struct scmi_msg_pinctrl_list_assoc { + __le32 identifier; + __le32 flags; + __le32 index; +}; + +struct scmi_resp_pinctrl_list_assoc { + __le32 flags; + __le16 array[]; +}; + +struct scmi_msg_request { + __le32 identifier; + __le32 flags; +}; + +struct scmi_group_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; + u32 *group_pins; + u32 nr_pins; +}; + +struct scmi_function_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; + u32 *groups; + u32 nr_groups; +}; + +struct scmi_pin_info { + char name[SCMI_MAX_STR_SIZE]; + bool present; +}; + +struct scmi_pinctrl_info { + u32 version; + int nr_groups; + int nr_functions; + int nr_pins; + struct scmi_group_info *groups; + struct scmi_function_info *functions; + struct scmi_pin_info *pins; +}; + +static int scmi_pinctrl_attributes_get(const struct scmi_protocol_handle *ph, + struct scmi_pinctrl_info *pi) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_pinctrl_protocol_attributes *attr; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, sizeof(*attr), &t); + if (ret) + return ret; + + attr = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + pi->nr_functions = GET_FUNCTIONS_NR(attr->attributes_high); + pi->nr_groups = GET_GROUPS_NR(attr->attributes_low); + pi->nr_pins = GET_PINS_NR(attr->attributes_low); + if (pi->nr_pins == 0) { + dev_warn(ph->dev, "returned zero pins\n"); + ret = -EINVAL; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_pinctrl_count_get(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + switch (type) { + case PIN_TYPE: + return pi->nr_pins; + case GROUP_TYPE: + return pi->nr_groups; + case FUNCTION_TYPE: + return pi->nr_functions; + default: + return -EINVAL; + } +} + +static int scmi_pinctrl_validate_id(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type) +{ + int value; + + value = scmi_pinctrl_count_get(ph, type); + if (value < 0) + return value; + + if (selector >= value || value == 0) + return -EINVAL; + + return 0; +} + +static int scmi_pinctrl_attributes(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type, + u32 selector, char *name, + u32 *n_elems) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_pinctrl_attributes *tx; + struct scmi_resp_pinctrl_attributes *rx; + bool ext_name_flag; + + if (!name) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_ATTRIBUTES, sizeof(*tx), + sizeof(*rx), &t); + if (ret) + return ret; + + tx = t->tx.buf; + rx = t->rx.buf; + tx->identifier = cpu_to_le32(selector); + tx->flags = cpu_to_le32(type); + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + if (n_elems) + *n_elems = NUM_ELEMS(rx->attributes); + + strscpy(name, rx->name, SCMI_SHORT_NAME_MAX_SIZE); + + ext_name_flag = !!EXT_NAME_FLAG(rx->attributes); + } + + ph->xops->xfer_put(ph, t); + + if (ret) + return ret; + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (ext_name_flag) + ret = ph->hops->extended_name_get(ph, PINCTRL_NAME_GET, + selector, (u32 *)&type, name, + SCMI_MAX_STR_SIZE); + return ret; +} + +struct scmi_pinctrl_ipriv { + u32 selector; + enum scmi_pinctrl_selector_type type; + u32 *array; +}; + +static void iter_pinctrl_assoc_prepare_message(void *message, + u32 desc_index, + const void *priv) +{ + struct scmi_msg_pinctrl_list_assoc *msg = message; + const struct scmi_pinctrl_ipriv *p = priv; + + msg->identifier = cpu_to_le32(p->selector); + msg->flags = cpu_to_le32(p->type); + msg->index = cpu_to_le32(desc_index); +} + +static int iter_pinctrl_assoc_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_resp_pinctrl_list_assoc *r = response; + + st->num_returned = RETURNED(r->flags); + st->num_remaining = REMAINING(r->flags); + + return 0; +} + +static int +iter_pinctrl_assoc_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, void *priv) +{ + const struct scmi_resp_pinctrl_list_assoc *r = response; + struct scmi_pinctrl_ipriv *p = priv; + + p->array[st->desc_index + st->loop_idx] = + le16_to_cpu(r->array[st->loop_idx]); + + return 0; +} + +static int scmi_pinctrl_list_associations(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + u16 size, u32 *array) +{ + int ret; + void *iter; + struct scmi_iterator_ops ops = { + .prepare_message = iter_pinctrl_assoc_prepare_message, + .update_state = iter_pinctrl_assoc_update_state, + .process_response = iter_pinctrl_assoc_process_response, + }; + struct scmi_pinctrl_ipriv ipriv = { + .selector = selector, + .type = type, + .array = array, + }; + + if (!array || !size || type == PIN_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + iter = ph->hops->iter_response_init(ph, &ops, size, + PINCTRL_LIST_ASSOCIATIONS, + sizeof(struct scmi_msg_pinctrl_list_assoc), + &ipriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +struct scmi_settings_get_ipriv { + u32 selector; + enum scmi_pinctrl_selector_type type; + bool get_all; + unsigned int *nr_configs; + enum scmi_pinctrl_conf_type *config_types; + u32 *config_values; +}; + +static void +iter_pinctrl_settings_get_prepare_message(void *message, u32 desc_index, + const void *priv) +{ + struct scmi_msg_settings_get *msg = message; + const struct scmi_settings_get_ipriv *p = priv; + u32 attributes; + + attributes = FIELD_PREP(SELECTOR_MASK, p->type); + + if (p->get_all) { + attributes |= FIELD_PREP(CONFIG_FLAG_MASK, 1) | + FIELD_PREP(SKIP_CONFIGS_MASK, desc_index); + } else { + attributes |= FIELD_PREP(CONFIG_TYPE_MASK, p->config_types[0]); + } + + msg->attributes = cpu_to_le32(attributes); + msg->identifier = cpu_to_le32(p->selector); +} + +static int +iter_pinctrl_settings_get_update_state(struct scmi_iterator_state *st, + const void *response, void *priv) +{ + const struct scmi_resp_settings_get *r = response; + struct scmi_settings_get_ipriv *p = priv; + + if (p->get_all) { + st->num_returned = le32_get_bits(r->num_configs, GENMASK(7, 0)); + st->num_remaining = le32_get_bits(r->num_configs, GENMASK(31, 24)); + } else { + st->num_returned = 1; + st->num_remaining = 0; + } + + return 0; +} + +static int +iter_pinctrl_settings_get_process_response(const struct scmi_protocol_handle *ph, + const void *response, + struct scmi_iterator_state *st, + void *priv) +{ + const struct scmi_resp_settings_get *r = response; + struct scmi_settings_get_ipriv *p = priv; + u32 type = le32_get_bits(r->configs[st->loop_idx * 2], GENMASK(7, 0)); + u32 val = le32_to_cpu(r->configs[st->loop_idx * 2 + 1]); + + if (p->get_all) { + p->config_types[st->desc_index + st->loop_idx] = type; + } else { + if (p->config_types[0] != type) + return -EINVAL; + } + + p->config_values[st->desc_index + st->loop_idx] = val; + ++*p->nr_configs; + + return 0; +} + +static int +scmi_pinctrl_settings_get(const struct scmi_protocol_handle *ph, u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values) +{ + int ret; + void *iter; + unsigned int max_configs = *nr_configs; + struct scmi_iterator_ops ops = { + .prepare_message = iter_pinctrl_settings_get_prepare_message, + .update_state = iter_pinctrl_settings_get_update_state, + .process_response = iter_pinctrl_settings_get_process_response, + }; + struct scmi_settings_get_ipriv ipriv = { + .selector = selector, + .type = type, + .get_all = (max_configs > 1), + .nr_configs = nr_configs, + .config_types = config_types, + .config_values = config_values, + }; + + if (!config_types || !config_values || type == FUNCTION_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + /* Prepare to count returned configs */ + *nr_configs = 0; + iter = ph->hops->iter_response_init(ph, &ops, max_configs, + PINCTRL_SETTINGS_GET, + sizeof(struct scmi_msg_settings_get), + &ipriv); + if (IS_ERR(iter)) + return PTR_ERR(iter); + + return ph->hops->iter_response_run(iter); +} + +static int scmi_pinctrl_settings_get_one(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_conf_type config_type, + u32 *config_value) +{ + unsigned int nr_configs = 1; + + return scmi_pinctrl_settings_get(ph, selector, type, &nr_configs, + &config_type, config_value); +} + +static int scmi_pinctrl_settings_get_all(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values) +{ + if (!nr_configs || *nr_configs == 0) + return -EINVAL; + + return scmi_pinctrl_settings_get(ph, selector, type, nr_configs, + config_types, config_values); +} + +static int +scmi_pinctrl_settings_conf(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + u32 nr_configs, + enum scmi_pinctrl_conf_type *config_type, + u32 *config_value) +{ + struct scmi_xfer *t; + struct scmi_msg_settings_conf *tx; + u32 attributes; + int ret, i; + u32 configs_in_chunk, conf_num = 0; + u32 chunk; + int max_msg_size = ph->hops->get_max_msg_size(ph); + + if (!config_type || !config_value || type == FUNCTION_TYPE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, selector, type); + if (ret) + return ret; + + configs_in_chunk = (max_msg_size - sizeof(*tx)) / (sizeof(__le32) * 2); + while (conf_num < nr_configs) { + chunk = (nr_configs - conf_num > configs_in_chunk) ? + configs_in_chunk : nr_configs - conf_num; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE, + sizeof(*tx) + + chunk * 2 * sizeof(__le32), 0, &t); + if (ret) + break; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(selector); + tx->function_id = cpu_to_le32(0xFFFFFFFF); + attributes = FIELD_PREP(GENMASK(1, 0), type) | + FIELD_PREP(GENMASK(9, 2), chunk); + tx->attributes = cpu_to_le32(attributes); + + for (i = 0; i < chunk; i++) { + tx->configs[i * 2] = + cpu_to_le32(config_type[conf_num + i]); + tx->configs[i * 2 + 1] = + cpu_to_le32(config_value[conf_num + i]); + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + + if (ret) + break; + + conf_num += chunk; + } + + return ret; +} + +static int scmi_pinctrl_function_select(const struct scmi_protocol_handle *ph, + u32 group, + enum scmi_pinctrl_selector_type type, + u32 function_id) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_settings_conf *tx; + u32 attributes; + + ret = scmi_pinctrl_validate_id(ph, group, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, PINCTRL_SETTINGS_CONFIGURE, + sizeof(*tx), 0, &t); + if (ret) + return ret; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(group); + tx->function_id = cpu_to_le32(function_id); + attributes = FIELD_PREP(GENMASK(1, 0), type) | BIT(10); + tx->attributes = cpu_to_le32(attributes); + + ret = ph->xops->do_xfer(ph, t); + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_pinctrl_request_free(const struct scmi_protocol_handle *ph, + u32 identifier, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_protocol_cmd cmd) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_request *tx; + + if (type == FUNCTION_TYPE) + return -EINVAL; + + if (cmd != PINCTRL_REQUEST && cmd != PINCTRL_RELEASE) + return -EINVAL; + + ret = scmi_pinctrl_validate_id(ph, identifier, type); + if (ret) + return ret; + + ret = ph->xops->xfer_get_init(ph, cmd, sizeof(*tx), 0, &t); + if (ret) + return ret; + + tx = t->tx.buf; + tx->identifier = cpu_to_le32(identifier); + tx->flags = cpu_to_le32(type); + + ret = ph->xops->do_xfer(ph, t); + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_pinctrl_pin_request(const struct scmi_protocol_handle *ph, + u32 pin) +{ + return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_REQUEST); +} + +static int scmi_pinctrl_pin_free(const struct scmi_protocol_handle *ph, u32 pin) +{ + return scmi_pinctrl_request_free(ph, pin, PIN_TYPE, PINCTRL_RELEASE); +} + +static int scmi_pinctrl_get_group_info(const struct scmi_protocol_handle *ph, + u32 selector, + struct scmi_group_info *group) +{ + int ret; + + ret = scmi_pinctrl_attributes(ph, GROUP_TYPE, selector, group->name, + &group->nr_pins); + if (ret) + return ret; + + if (!group->nr_pins) { + dev_err(ph->dev, "Group %d has 0 elements", selector); + return -ENODATA; + } + + group->group_pins = kmalloc_array(group->nr_pins, + sizeof(*group->group_pins), + GFP_KERNEL); + if (!group->group_pins) + return -ENOMEM; + + ret = scmi_pinctrl_list_associations(ph, selector, GROUP_TYPE, + group->nr_pins, group->group_pins); + if (ret) { + kfree(group->group_pins); + return ret; + } + + group->present = true; + return 0; +} + +static int scmi_pinctrl_get_group_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_groups || pi->nr_groups == 0) + return -EINVAL; + + if (!pi->groups[selector].present) { + int ret; + + ret = scmi_pinctrl_get_group_info(ph, selector, + &pi->groups[selector]); + if (ret) + return ret; + } + + *name = pi->groups[selector].name; + + return 0; +} + +static int scmi_pinctrl_group_pins_get(const struct scmi_protocol_handle *ph, + u32 selector, const u32 **pins, + u32 *nr_pins) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!pins || !nr_pins) + return -EINVAL; + + if (selector >= pi->nr_groups || pi->nr_groups == 0) + return -EINVAL; + + if (!pi->groups[selector].present) { + int ret; + + ret = scmi_pinctrl_get_group_info(ph, selector, + &pi->groups[selector]); + if (ret) + return ret; + } + + *pins = pi->groups[selector].group_pins; + *nr_pins = pi->groups[selector].nr_pins; + + return 0; +} + +static int scmi_pinctrl_get_function_info(const struct scmi_protocol_handle *ph, + u32 selector, + struct scmi_function_info *func) +{ + int ret; + + ret = scmi_pinctrl_attributes(ph, FUNCTION_TYPE, selector, func->name, + &func->nr_groups); + if (ret) + return ret; + + if (!func->nr_groups) { + dev_err(ph->dev, "Function %d has 0 elements", selector); + return -ENODATA; + } + + func->groups = kmalloc_array(func->nr_groups, sizeof(*func->groups), + GFP_KERNEL); + if (!func->groups) + return -ENOMEM; + + ret = scmi_pinctrl_list_associations(ph, selector, FUNCTION_TYPE, + func->nr_groups, func->groups); + if (ret) { + kfree(func->groups); + return ret; + } + + func->present = true; + return 0; +} + +static int scmi_pinctrl_get_function_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_functions || pi->nr_functions == 0) + return -EINVAL; + + if (!pi->functions[selector].present) { + int ret; + + ret = scmi_pinctrl_get_function_info(ph, selector, + &pi->functions[selector]); + if (ret) + return ret; + } + + *name = pi->functions[selector].name; + return 0; +} + +static int +scmi_pinctrl_function_groups_get(const struct scmi_protocol_handle *ph, + u32 selector, u32 *nr_groups, + const u32 **groups) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!groups || !nr_groups) + return -EINVAL; + + if (selector >= pi->nr_functions || pi->nr_functions == 0) + return -EINVAL; + + if (!pi->functions[selector].present) { + int ret; + + ret = scmi_pinctrl_get_function_info(ph, selector, + &pi->functions[selector]); + if (ret) + return ret; + } + + *groups = pi->functions[selector].groups; + *nr_groups = pi->functions[selector].nr_groups; + + return 0; +} + +static int scmi_pinctrl_mux_set(const struct scmi_protocol_handle *ph, + u32 selector, u32 group) +{ + return scmi_pinctrl_function_select(ph, group, GROUP_TYPE, selector); +} + +static int scmi_pinctrl_get_pin_info(const struct scmi_protocol_handle *ph, + u32 selector, struct scmi_pin_info *pin) +{ + int ret; + + if (!pin) + return -EINVAL; + + ret = scmi_pinctrl_attributes(ph, PIN_TYPE, selector, pin->name, NULL); + if (ret) + return ret; + + pin->present = true; + return 0; +} + +static int scmi_pinctrl_get_pin_name(const struct scmi_protocol_handle *ph, + u32 selector, const char **name) +{ + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + if (!name) + return -EINVAL; + + if (selector >= pi->nr_pins) + return -EINVAL; + + if (!pi->pins[selector].present) { + int ret; + + ret = scmi_pinctrl_get_pin_info(ph, selector, &pi->pins[selector]); + if (ret) + return ret; + } + + *name = pi->pins[selector].name; + + return 0; +} + +static int scmi_pinctrl_name_get(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + const char **name) +{ + switch (type) { + case PIN_TYPE: + return scmi_pinctrl_get_pin_name(ph, selector, name); + case GROUP_TYPE: + return scmi_pinctrl_get_group_name(ph, selector, name); + case FUNCTION_TYPE: + return scmi_pinctrl_get_function_name(ph, selector, name); + default: + return -EINVAL; + } +} + +static const struct scmi_pinctrl_proto_ops pinctrl_proto_ops = { + .count_get = scmi_pinctrl_count_get, + .name_get = scmi_pinctrl_name_get, + .group_pins_get = scmi_pinctrl_group_pins_get, + .function_groups_get = scmi_pinctrl_function_groups_get, + .mux_set = scmi_pinctrl_mux_set, + .settings_get_one = scmi_pinctrl_settings_get_one, + .settings_get_all = scmi_pinctrl_settings_get_all, + .settings_conf = scmi_pinctrl_settings_conf, + .pin_request = scmi_pinctrl_pin_request, + .pin_free = scmi_pinctrl_pin_free, +}; + +static int scmi_pinctrl_protocol_init(const struct scmi_protocol_handle *ph) +{ + int ret; + u32 version; + struct scmi_pinctrl_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Pinctrl Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_pinctrl_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->pins = devm_kcalloc(ph->dev, pinfo->nr_pins, + sizeof(*pinfo->pins), GFP_KERNEL); + if (!pinfo->pins) + return -ENOMEM; + + pinfo->groups = devm_kcalloc(ph->dev, pinfo->nr_groups, + sizeof(*pinfo->groups), GFP_KERNEL); + if (!pinfo->groups) + return -ENOMEM; + + pinfo->functions = devm_kcalloc(ph->dev, pinfo->nr_functions, + sizeof(*pinfo->functions), GFP_KERNEL); + if (!pinfo->functions) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo, version); +} + +static int scmi_pinctrl_protocol_deinit(const struct scmi_protocol_handle *ph) +{ + int i; + struct scmi_pinctrl_info *pi = ph->get_priv(ph); + + /* Free groups_pins allocated in scmi_pinctrl_get_group_info */ + for (i = 0; i < pi->nr_groups; i++) { + if (pi->groups[i].present) { + kfree(pi->groups[i].group_pins); + pi->groups[i].present = false; + } + } + + /* Free groups allocated in scmi_pinctrl_get_function_info */ + for (i = 0; i < pi->nr_functions; i++) { + if (pi->functions[i].present) { + kfree(pi->functions[i].groups); + pi->functions[i].present = false; + } + } + + return 0; +} + +static const struct scmi_protocol scmi_pinctrl = { + .id = SCMI_PROTOCOL_PINCTRL, + .owner = THIS_MODULE, + .instance_init = &scmi_pinctrl_protocol_init, + .instance_deinit = &scmi_pinctrl_protocol_deinit, + .ops = &pinctrl_proto_ops, + .supported_version = SCMI_PROTOCOL_SUPPORTED_VERSION, +}; +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(pinctrl, scmi_pinctrl) diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 314371dc0f65..8e95f53bd7b7 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -370,6 +370,7 @@ void __exit scmi_##name##_unregister(void) \ DECLARE_SCMI_REGISTER_UNREGISTER(base); DECLARE_SCMI_REGISTER_UNREGISTER(clock); DECLARE_SCMI_REGISTER_UNREGISTER(perf); +DECLARE_SCMI_REGISTER_UNREGISTER(pinctrl); DECLARE_SCMI_REGISTER_UNREGISTER(power); DECLARE_SCMI_REGISTER_UNREGISTER(reset); DECLARE_SCMI_REGISTER_UNREGISTER(sensors); diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h index a3addb07e00a..3a9bb5b9a9e8 100644 --- a/include/linux/scmi_protocol.h +++ b/include/linux/scmi_protocol.h @@ -737,6 +737,89 @@ struct scmi_powercap_proto_ops { u32 *power_thresh_high); }; +enum scmi_pinctrl_selector_type { + PIN_TYPE = 0, + GROUP_TYPE, + FUNCTION_TYPE, +}; + +enum scmi_pinctrl_conf_type { + SCMI_PIN_DEFAULT = 0, + SCMI_PIN_BIAS_BUS_HOLD = 1, + SCMI_PIN_BIAS_DISABLE = 2, + SCMI_PIN_BIAS_HIGH_IMPEDANCE = 3, + SCMI_PIN_BIAS_PULL_UP = 4, + SCMI_PIN_BIAS_PULL_DEFAULT = 5, + SCMI_PIN_BIAS_PULL_DOWN = 6, + SCMI_PIN_DRIVE_OPEN_DRAIN = 7, + SCMI_PIN_DRIVE_OPEN_SOURCE = 8, + SCMI_PIN_DRIVE_PUSH_PULL = 9, + SCMI_PIN_DRIVE_STRENGTH = 10, + SCMI_PIN_INPUT_DEBOUNCE = 11, + SCMI_PIN_INPUT_MODE = 12, + SCMI_PIN_PULL_MODE = 13, + SCMI_PIN_INPUT_VALUE = 14, + SCMI_PIN_INPUT_SCHMITT = 15, + SCMI_PIN_LOW_POWER_MODE = 16, + SCMI_PIN_OUTPUT_MODE = 17, + SCMI_PIN_OUTPUT_VALUE = 18, + SCMI_PIN_POWER_SOURCE = 19, + SCMI_PIN_SLEW_RATE = 20, + SCMI_PIN_OEM_START = 192, + SCMI_PIN_OEM_END = 255, +}; + +/** + * struct scmi_pinctrl_proto_ops - represents the various operations provided + * by SCMI Pinctrl Protocol + * + * @count_get: returns count of the registered elements in given type + * @name_get: returns name by index of given type + * @group_pins_get: returns the set of pins, assigned to the specified group + * @function_groups_get: returns the set of groups, assigned to the specified + * function + * @mux_set: set muxing function for groups of pins + * @settings_get_one: returns one configuration parameter for pin or group + * specified by config_type + * @settings_get_all: returns all configuration parameters for pin or group + * @settings_conf: sets the configuration parameter for pin or group + * @pin_request: aquire pin before selecting mux setting + * @pin_free: frees pin, acquired by request_pin call + */ +struct scmi_pinctrl_proto_ops { + int (*count_get)(const struct scmi_protocol_handle *ph, + enum scmi_pinctrl_selector_type type); + int (*name_get)(const struct scmi_protocol_handle *ph, u32 selector, + enum scmi_pinctrl_selector_type type, + const char **name); + int (*group_pins_get)(const struct scmi_protocol_handle *ph, + u32 selector, const unsigned int **pins, + unsigned int *nr_pins); + int (*function_groups_get)(const struct scmi_protocol_handle *ph, + u32 selector, unsigned int *nr_groups, + const unsigned int **groups); + int (*mux_set)(const struct scmi_protocol_handle *ph, u32 selector, + u32 group); + int (*settings_get_one)(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + enum scmi_pinctrl_conf_type config_type, + u32 *config_value); + int (*settings_get_all)(const struct scmi_protocol_handle *ph, + u32 selector, + enum scmi_pinctrl_selector_type type, + unsigned int *nr_configs, + enum scmi_pinctrl_conf_type *config_types, + u32 *config_values); + int (*settings_conf)(const struct scmi_protocol_handle *ph, + u32 selector, enum scmi_pinctrl_selector_type type, + unsigned int nr_configs, + enum scmi_pinctrl_conf_type *config_type, + u32 *config_value); + int (*pin_request)(const struct scmi_protocol_handle *ph, u32 pin); + int (*pin_free)(const struct scmi_protocol_handle *ph, u32 pin); +}; + /** * struct scmi_notify_ops - represents notifications' operations provided by * SCMI core @@ -842,6 +925,7 @@ enum scmi_std_protocol { SCMI_PROTOCOL_RESET = 0x16, SCMI_PROTOCOL_VOLTAGE = 0x17, SCMI_PROTOCOL_POWERCAP = 0x18, + SCMI_PROTOCOL_PINCTRL = 0x19, }; enum scmi_system_events { -- cgit From c23e2b7103090b05e4d567d8976f99926ea855e9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 22 Jan 2024 15:53:11 -0800 Subject: KVM: Allow page-sized MMU caches to be initialized with custom 64-bit values Add support to MMU caches for initializing a page with a custom 64-bit value, e.g. to pre-fill an entire page table with non-zero PTE values. The functionality will be used by x86 to support Intel's TDX, which needs to set bit 63 in all non-present PTEs in order to prevent !PRESENT page faults from getting reflected into the guest (Intel's EPT Violation #VE architecture made the less than brilliant decision of having the per-PTE behavior be opt-out instead of opt-in). Signed-off-by: Sean Christopherson Signed-off-by: Isaku Yamahata Message-Id: <5919f685f109a1b0ebc6bd8fc4536ee94bcc172d.1705965635.git.isaku.yamahata@intel.com> Reviewed-by: Xiaoyao Li Reviewed-by: Binbin Wu Signed-off-by: Paolo Bonzini --- include/linux/kvm_types.h | 1 + virt/kvm/kvm_main.c | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index d93f6522b2c3..827ecc0b7e10 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -86,6 +86,7 @@ struct gfn_to_pfn_cache { struct kvm_mmu_memory_cache { gfp_t gfp_zero; gfp_t gfp_custom; + u64 init_value; struct kmem_cache *kmem_cache; int capacity; int nobjs; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 658581d4ad68..38b498669ef9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -401,12 +401,17 @@ static void kvm_flush_shadow_all(struct kvm *kvm) static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc, gfp_t gfp_flags) { + void *page; + gfp_flags |= mc->gfp_zero; if (mc->kmem_cache) return kmem_cache_alloc(mc->kmem_cache, gfp_flags); - else - return (void *)__get_free_page(gfp_flags); + + page = (void *)__get_free_page(gfp_flags); + if (page && mc->init_value) + memset64(page, mc->init_value, PAGE_SIZE / sizeof(u64)); + return page; } int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min) @@ -421,6 +426,13 @@ int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, if (WARN_ON_ONCE(!capacity)) return -EIO; + /* + * Custom init values can be used only for page allocations, + * and obviously conflict with __GFP_ZERO. + */ + if (WARN_ON_ONCE(mc->init_value && (mc->kmem_cache || mc->gfp_zero))) + return -EIO; + mc->objects = kvmalloc_array(capacity, sizeof(void *), gfp); if (!mc->objects) return -ENOMEM; -- cgit From a094de22e2efc2ec7f540d10d1edb7038f863925 Mon Sep 17 00:00:00 2001 From: Nuno Sa Date: Fri, 19 Apr 2024 10:25:34 +0200 Subject: iio: buffer-dma: add iio_dmaengine_buffer_setup() This brings the DMA buffer API more in line with what we have in the triggered buffer. There's no need of having both devm_iio_dmaengine_buffer_setup() and devm_iio_dmaengine_buffer_alloc(). Hence we introduce the new iio_dmaengine_buffer_setup() that together with devm_iio_dmaengine_buffer_setup() should be all we need. Note that as part of this change iio_dmaengine_buffer_alloc() is again static and the axi-adc was updated accordingly. Signed-off-by: Nuno Sa Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-1-5ca45b4de294@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/adc/adi-axi-adc.c | 16 +------- drivers/iio/buffer/industrialio-buffer-dmaengine.c | 48 +++++++++------------- include/linux/iio/buffer-dmaengine.h | 5 ++- 3 files changed, 24 insertions(+), 45 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index 4156639b3c8b..184b36dca6d0 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -124,26 +124,12 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back, struct iio_dev *indio_dev) { struct adi_axi_adc_state *st = iio_backend_get_priv(back); - struct iio_buffer *buffer; const char *dma_name; - int ret; if (device_property_read_string(st->dev, "dma-names", &dma_name)) dma_name = "rx"; - buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name); - if (IS_ERR(buffer)) { - dev_err(st->dev, "Could not get DMA buffer, %ld\n", - PTR_ERR(buffer)); - return ERR_CAST(buffer); - } - - indio_dev->modes |= INDIO_BUFFER_HARDWARE; - ret = iio_device_attach_buffer(indio_dev, buffer); - if (ret) - return ERR_PTR(ret); - - return buffer; + return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name); } static void axi_adc_free_buffer(struct iio_backend *back, diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index a18c1da292af..97f3116566f5 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -159,7 +159,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = { * Once done using the buffer iio_dmaengine_buffer_free() should be used to * release it. */ -struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, +static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, const char *channel) { struct dmaengine_buffer *dmaengine_buffer; @@ -210,7 +210,6 @@ err_free: kfree(dmaengine_buffer); return ERR_PTR(ret); } -EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER); /** * iio_dmaengine_buffer_free() - Free dmaengine buffer @@ -230,39 +229,33 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer) } EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER); -static void __devm_iio_dmaengine_buffer_free(void *buffer) -{ - iio_dmaengine_buffer_free(buffer); -} - -/** - * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc() - * @dev: Parent device for the buffer - * @channel: DMA channel name, typically "rx". - * - * This allocates a new IIO buffer which internally uses the DMAengine framework - * to perform its transfers. The parent device will be used to request the DMA - * channel. - * - * The buffer will be automatically de-allocated once the device gets destroyed. - */ -static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel) +struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, + struct iio_dev *indio_dev, + const char *channel) { struct iio_buffer *buffer; int ret; buffer = iio_dmaengine_buffer_alloc(dev, channel); if (IS_ERR(buffer)) - return buffer; + return ERR_CAST(buffer); + + indio_dev->modes |= INDIO_BUFFER_HARDWARE; - ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, - buffer); - if (ret) + ret = iio_device_attach_buffer(indio_dev, buffer); + if (ret) { + iio_dmaengine_buffer_free(buffer); return ERR_PTR(ret); + } return buffer; } +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); + +static void __devm_iio_dmaengine_buffer_free(void *buffer) +{ + iio_dmaengine_buffer_free(buffer); +} /** * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device @@ -281,13 +274,12 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev, { struct iio_buffer *buffer; - buffer = devm_iio_dmaengine_buffer_alloc(dev, channel); + buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel); if (IS_ERR(buffer)) return PTR_ERR(buffer); - indio_dev->modes |= INDIO_BUFFER_HARDWARE; - - return iio_device_attach_buffer(indio_dev, buffer); + return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, + buffer); } EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index cbb8ba957fad..acb60f9a3fff 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -10,9 +10,10 @@ struct iio_dev; struct device; -struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev, - const char *channel); void iio_dmaengine_buffer_free(struct iio_buffer *buffer); +struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, + struct iio_dev *indio_dev, + const char *channel); int devm_iio_dmaengine_buffer_setup(struct device *dev, struct iio_dev *indio_dev, const char *channel); -- cgit From 04ae3b1a76b77f98a4a0c8ed2c544007334fc680 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Fri, 19 Apr 2024 10:25:35 +0200 Subject: iio: buffer-dma: Rename iio_dma_buffer_data_available() Change its name to iio_dma_buffer_usage(), as this function can be used both for the .data_available and the .space_available callbacks. Signed-off-by: Paul Cercueil Signed-off-by: Nuno Sa Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-2-5ca45b4de294@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/industrialio-buffer-dma.c | 11 ++++++----- drivers/iio/buffer/industrialio-buffer-dmaengine.c | 2 +- include/linux/iio/buffer-dma.h | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index 5610ba67925e..404f9867bdc5 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -547,13 +547,14 @@ out_unlock: EXPORT_SYMBOL_GPL(iio_dma_buffer_read); /** - * iio_dma_buffer_data_available() - DMA buffer data_available callback + * iio_dma_buffer_usage() - DMA buffer data_available and + * space_available callback * @buf: Buffer to check for data availability * - * Should be used as the data_available callback for iio_buffer_access_ops - * struct for DMA buffers. + * Should be used as the data_available and space_available callbacks for + * iio_buffer_access_ops struct for DMA buffers. */ -size_t iio_dma_buffer_data_available(struct iio_buffer *buf) +size_t iio_dma_buffer_usage(struct iio_buffer *buf) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); struct iio_dma_buffer_block *block; @@ -586,7 +587,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf) return data_available; } -EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available); +EXPORT_SYMBOL_GPL(iio_dma_buffer_usage); /** * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index 97f3116566f5..df05d66afff9 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -117,7 +117,7 @@ static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = { .request_update = iio_dma_buffer_request_update, .enable = iio_dma_buffer_enable, .disable = iio_dma_buffer_disable, - .data_available = iio_dma_buffer_data_available, + .data_available = iio_dma_buffer_usage, .release = iio_dmaengine_buffer_release, .modes = INDIO_BUFFER_HARDWARE, diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 18d3702fa95d..52a838ec0e57 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -132,7 +132,7 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev); int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer); -size_t iio_dma_buffer_data_available(struct iio_buffer *buffer); +size_t iio_dma_buffer_usage(struct iio_buffer *buffer); int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); int iio_dma_buffer_request_update(struct iio_buffer *buffer); -- cgit From fb09febafd160b7aefd9e61f710a0c50f0472403 Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Fri, 19 Apr 2024 10:25:36 +0200 Subject: iio: buffer-dma: Enable buffer write support Adding write support to the buffer-dma code is easy - the write() function basically needs to do the exact same thing as the read() function: dequeue a block, read or write the data, enqueue the block when entirely processed. Therefore, the iio_buffer_dma_read() and the new iio_buffer_dma_write() now both call a function iio_buffer_dma_io(), which will perform this task. Note that we preemptively reset block->bytes_used to the buffer's size in iio_dma_buffer_request_update(), as in the future the iio_dma_buffer_enqueue() function won't reset it. Signed-off-by: Paul Cercueil Reviewed-by: Alexandru Ardelean Signed-off-by: Nuno Sa Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-3-5ca45b4de294@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/industrialio-buffer-dma.c | 89 +++++++++++++++++++++++----- include/linux/iio/buffer-dma.h | 2 + 2 files changed, 75 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index 404f9867bdc5..13b1a858969e 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -195,6 +195,18 @@ static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) block->state = IIO_BLOCK_STATE_DONE; } +static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue) +{ + __poll_t flags; + + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) + flags = EPOLLIN | EPOLLRDNORM; + else + flags = EPOLLOUT | EPOLLWRNORM; + + wake_up_interruptible_poll(&queue->buffer.pollq, flags); +} + /** * iio_dma_buffer_block_done() - Indicate that a block has been completed * @block: The completed block @@ -212,7 +224,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) spin_unlock_irqrestore(&queue->list_lock, flags); iio_buffer_block_put_atomic(block); - wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); + iio_dma_buffer_queue_wake(queue); } EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done); @@ -241,7 +253,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, } spin_unlock_irqrestore(&queue->list_lock, flags); - wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); + iio_dma_buffer_queue_wake(queue); } EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort); @@ -335,8 +347,24 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) queue->fileio.blocks[i] = block; } - block->state = IIO_BLOCK_STATE_QUEUED; - list_add_tail(&block->head, &queue->incoming); + /* + * block->bytes_used may have been modified previously, e.g. by + * iio_dma_buffer_block_list_abort(). Reset it here to the + * block's so that iio_dma_buffer_io() will work. + */ + block->bytes_used = block->size; + + /* + * If it's an input buffer, mark the block as queued, and + * iio_dma_buffer_enable() will submit it. Otherwise mark it as + * done, which means it's ready to be dequeued. + */ + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) { + block->state = IIO_BLOCK_STATE_QUEUED; + list_add_tail(&block->head, &queue->incoming); + } else { + block->state = IIO_BLOCK_STATE_DONE; + } } out_unlock: @@ -488,20 +516,12 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue( return block; } -/** - * iio_dma_buffer_read() - DMA buffer read callback - * @buffer: Buffer to read form - * @n: Number of bytes to read - * @user_buffer: Userspace buffer to copy the data to - * - * Should be used as the read callback for iio_buffer_access_ops - * struct for DMA buffers. - */ -int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, - char __user *user_buffer) +static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n, + char __user *user_buffer, bool is_from_user) { struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); struct iio_dma_buffer_block *block; + void *addr; int ret; if (n < buffer->bytes_per_datum) @@ -524,8 +544,13 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, n = rounddown(n, buffer->bytes_per_datum); if (n > block->bytes_used - queue->fileio.pos) n = block->bytes_used - queue->fileio.pos; + addr = block->vaddr + queue->fileio.pos; - if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { + if (is_from_user) + ret = copy_from_user(addr, user_buffer, n); + else + ret = copy_to_user(user_buffer, addr, n); + if (ret) { ret = -EFAULT; goto out_unlock; } @@ -544,8 +569,40 @@ out_unlock: return ret; } + +/** + * iio_dma_buffer_read() - DMA buffer read callback + * @buffer: Buffer to read form + * @n: Number of bytes to read + * @user_buffer: Userspace buffer to copy the data to + * + * Should be used as the read callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, + char __user *user_buffer) +{ + return iio_dma_buffer_io(buffer, n, user_buffer, false); +} EXPORT_SYMBOL_GPL(iio_dma_buffer_read); +/** + * iio_dma_buffer_write() - DMA buffer write callback + * @buffer: Buffer to read form + * @n: Number of bytes to read + * @user_buffer: Userspace buffer to copy the data from + * + * Should be used as the write callback for iio_buffer_access_ops + * struct for DMA buffers. + */ +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, + const char __user *user_buffer) +{ + return iio_dma_buffer_io(buffer, n, + (__force __user char *)user_buffer, true); +} +EXPORT_SYMBOL_GPL(iio_dma_buffer_write); + /** * iio_dma_buffer_usage() - DMA buffer data_available and * space_available callback diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 52a838ec0e57..6e27e47077d5 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -132,6 +132,8 @@ int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev); int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n, char __user *user_buffer); +int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n, + const char __user *user_buffer); size_t iio_dma_buffer_usage(struct iio_buffer *buffer); int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd); int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length); -- cgit From c1b91566580c245cf1147745d174be5e059ace6b Mon Sep 17 00:00:00 2001 From: Paul Cercueil Date: Fri, 19 Apr 2024 10:25:37 +0200 Subject: iio: buffer-dmaengine: Support specifying buffer direction Update the devm_iio_dmaengine_buffer_setup() function to support specifying the buffer direction. Update the iio_dmaengine_buffer_submit() function to handle input buffers as well as output buffers. Signed-off-by: Paul Cercueil Reviewed-by: Alexandru Ardelean Signed-off-by: Nuno Sa Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-4-5ca45b4de294@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/buffer/industrialio-buffer-dmaengine.c | 44 +++++++++++++++------- include/linux/iio/buffer-dmaengine.h | 25 +++++++++--- 2 files changed, 49 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c index df05d66afff9..951012651018 100644 --- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c +++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c @@ -64,14 +64,25 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct dmaengine_buffer *dmaengine_buffer = iio_buffer_to_dmaengine_buffer(&queue->buffer); struct dma_async_tx_descriptor *desc; + enum dma_transfer_direction dma_dir; + size_t max_size; dma_cookie_t cookie; - block->bytes_used = min(block->size, dmaengine_buffer->max_size); - block->bytes_used = round_down(block->bytes_used, - dmaengine_buffer->align); + max_size = min(block->size, dmaengine_buffer->max_size); + max_size = round_down(max_size, dmaengine_buffer->align); + + if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) { + block->bytes_used = max_size; + dma_dir = DMA_DEV_TO_MEM; + } else { + dma_dir = DMA_MEM_TO_DEV; + } + + if (!block->bytes_used || block->bytes_used > max_size) + return -EINVAL; desc = dmaengine_prep_slave_single(dmaengine_buffer->chan, - block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM, + block->phys_addr, block->bytes_used, dma_dir, DMA_PREP_INTERRUPT); if (!desc) return -ENOMEM; @@ -229,9 +240,10 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer) } EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER); -struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel) +struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir) { struct iio_buffer *buffer; int ret; @@ -242,6 +254,8 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, indio_dev->modes |= INDIO_BUFFER_HARDWARE; + buffer->direction = dir; + ret = iio_device_attach_buffer(indio_dev, buffer); if (ret) { iio_dmaengine_buffer_free(buffer); @@ -250,7 +264,7 @@ struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, return buffer; } -EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); +EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER); static void __devm_iio_dmaengine_buffer_free(void *buffer) { @@ -258,30 +272,32 @@ static void __devm_iio_dmaengine_buffer_free(void *buffer) } /** - * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device + * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device * @dev: Parent device for the buffer * @indio_dev: IIO device to which to attach this buffer. * @channel: DMA channel name, typically "rx". + * @dir: Direction of buffer (in or out) * * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * and attaches it to an IIO device with iio_device_attach_buffer(). * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device. */ -int devm_iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel) +int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir) { struct iio_buffer *buffer; - buffer = iio_dmaengine_buffer_setup(dev, indio_dev, channel); + buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir); if (IS_ERR(buffer)) return PTR_ERR(buffer); return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free, buffer); } -EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER); +EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("DMA buffer for the IIO framework"); diff --git a/include/linux/iio/buffer-dmaengine.h b/include/linux/iio/buffer-dmaengine.h index acb60f9a3fff..81d9a19aeb91 100644 --- a/include/linux/iio/buffer-dmaengine.h +++ b/include/linux/iio/buffer-dmaengine.h @@ -7,15 +7,28 @@ #ifndef __IIO_DMAENGINE_H__ #define __IIO_DMAENGINE_H__ +#include + struct iio_dev; struct device; void iio_dmaengine_buffer_free(struct iio_buffer *buffer); -struct iio_buffer *iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel); -int devm_iio_dmaengine_buffer_setup(struct device *dev, - struct iio_dev *indio_dev, - const char *channel); +struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir); + +#define iio_dmaengine_buffer_setup(dev, indio_dev, channel) \ + iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \ + IIO_BUFFER_DIRECTION_IN) + +int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, + struct iio_dev *indio_dev, + const char *channel, + enum iio_buffer_direction dir); + +#define devm_iio_dmaengine_buffer_setup(dev, indio_dev, channel) \ + devm_iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, \ + IIO_BUFFER_DIRECTION_IN) #endif -- cgit From 87800c4342a29d4e1c378ce72d27e3976d094ffa Mon Sep 17 00:00:00 2001 From: Nuno Sa Date: Fri, 19 Apr 2024 10:25:41 +0200 Subject: iio: backend: add new functionality This adds the needed backend ops for supporting a backend inerfacing with an high speed dac. The new ops are: * data_source_set(); * set_sampling_freq(); * extend_chan_spec(); * ext_info_set(); * ext_info_get(). Also to note the new helpers that are meant to be used by the backends when extending an IIO channel (adding extended info): * iio_backend_ext_info_set(); * iio_backend_ext_info_get(). Signed-off-by: Nuno Sa Link: https://lore.kernel.org/r/20240419-iio-backend-axi-dac-v4-8-5ca45b4de294@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-backend.c | 179 +++++++++++++++++++++++++++++++++++++ include/linux/iio/backend.h | 49 ++++++++++ 2 files changed, 228 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index 2fea2bbbe47f..f08ed6d70ae5 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -43,10 +43,12 @@ #include #include +#include struct iio_backend { struct list_head entry; const struct iio_backend_ops *ops; + struct device *frontend_dev; struct device *dev; struct module *owner; void *priv; @@ -186,6 +188,44 @@ int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, } EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND); +/** + * iio_backend_data_source_set - Select data source + * @back: Backend device + * @chan: Channel number + * @data: Data source + * + * A given backend may have different sources to stream/sync data. This allows + * to choose that source. + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, + enum iio_backend_data_source data) +{ + if (data >= IIO_BACKEND_DATA_SOURCE_MAX) + return -EINVAL; + + return iio_backend_op_call(back, data_source_set, chan, data); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, IIO_BACKEND); + +/** + * iio_backend_set_sampling_freq - Set channel sampling rate + * @back: Backend device + * @chan: Channel number + * @sample_rate_hz: Sample rate + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, + u64 sample_rate_hz) +{ + return iio_backend_op_call(back, set_sample_rate, chan, sample_rate_hz); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, IIO_BACKEND); + static void iio_backend_free_buffer(void *arg) { struct iio_backend_buffer_pair *pair = arg; @@ -231,6 +271,143 @@ int devm_iio_backend_request_buffer(struct device *dev, } EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND); +static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev) +{ + struct iio_backend *back = ERR_PTR(-ENODEV), *iter; + + /* + * We deliberately go through all backends even after finding a match. + * The reason is that we want to catch frontend devices which have more + * than one backend in which case returning the first we find is bogus. + * For those cases, frontends need to explicitly define + * get_iio_backend() in struct iio_info. + */ + guard(mutex)(&iio_back_lock); + list_for_each_entry(iter, &iio_back_list, entry) { + if (dev == iter->frontend_dev) { + if (!IS_ERR(back)) { + dev_warn(dev, + "Multiple backends! get_iio_backend() needs to be implemented"); + return ERR_PTR(-ENODEV); + } + + back = iter; + } + } + + return back; +} + +/** + * iio_backend_ext_info_get - IIO ext_info read callback + * @indio_dev: IIO device + * @private: Data private to the driver + * @chan: IIO channel + * @buf: Buffer where to place the attribute data + * + * This helper is intended to be used by backends that extend an IIO channel + * (through iio_backend_extend_chan_spec()) with extended info. In that case, + * backends are not supposed to give their own callbacks (as they would not have + * a way to get the backend from indio_dev). This is the getter. + * + * RETURNS: + * Number of bytes written to buf, negative error number on failure. + */ +ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, char *buf) +{ + struct iio_backend *back; + + /* + * The below should work for the majority of the cases. It will not work + * when one frontend has multiple backends in which case we'll need a + * new callback in struct iio_info so we can directly request the proper + * backend from the frontend. Anyways, let's only introduce new options + * when really needed... + */ + back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); + if (IS_ERR(back)) + return PTR_ERR(back); + + return iio_backend_op_call(back, ext_info_get, private, chan, buf); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, IIO_BACKEND); + +/** + * iio_backend_ext_info_set - IIO ext_info write callback + * @indio_dev: IIO device + * @private: Data private to the driver + * @chan: IIO channel + * @buf: Buffer holding the sysfs attribute + * @len: Buffer length + * + * This helper is intended to be used by backends that extend an IIO channel + * (trough iio_backend_extend_chan_spec()) with extended info. In that case, + * backends are not supposed to give their own callbacks (as they would not have + * a way to get the backend from indio_dev). This is the setter. + * + * RETURNS: + * Buffer length on success, negative error number on failure. + */ +ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len) +{ + struct iio_backend *back; + + back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); + if (IS_ERR(back)) + return PTR_ERR(back); + + return iio_backend_op_call(back, ext_info_set, private, chan, buf, len); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND); + +/** + * iio_backend_extend_chan_spec - Extend an IIO channel + * @indio_dev: IIO device + * @back: Backend device + * @chan: IIO channel + * + * Some backends may have their own functionalities and hence capable of + * extending a frontend's channel. + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_extend_chan_spec(struct iio_dev *indio_dev, + struct iio_backend *back, + struct iio_chan_spec *chan) +{ + const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info; + const struct iio_chan_spec_ext_info *back_ext_info; + int ret; + + ret = iio_backend_op_call(back, extend_chan_spec, chan); + if (ret) + return ret; + /* + * Let's keep things simple for now. Don't allow to overwrite the + * frontend's extended info. If ever needed, we can support appending + * it. + */ + if (frontend_ext_info && chan->ext_info != frontend_ext_info) + return -EOPNOTSUPP; + if (!chan->ext_info) + return 0; + + /* Don't allow backends to get creative and force their own handlers */ + for (back_ext_info = chan->ext_info; back_ext_info->name; back_ext_info++) { + if (back_ext_info->read != iio_backend_ext_info_get) + return -EINVAL; + if (back_ext_info->write != iio_backend_ext_info_set) + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_NS_GPL(iio_backend_extend_chan_spec, IIO_BACKEND); + static void iio_backend_release(void *arg) { struct iio_backend *back = arg; @@ -263,6 +440,8 @@ static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) "Could not link to supplier(%s)\n", dev_name(back->dev)); + back->frontend_dev = dev; + dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev)); return 0; diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index a6d79381866e..9d144631134d 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -4,6 +4,7 @@ #include +struct iio_chan_spec; struct fwnode_handle; struct iio_backend; struct device; @@ -15,6 +16,26 @@ enum iio_backend_data_type { IIO_BACKEND_DATA_TYPE_MAX }; +enum iio_backend_data_source { + IIO_BACKEND_INTERNAL_CONTINUOS_WAVE, + IIO_BACKEND_EXTERNAL, + IIO_BACKEND_DATA_SOURCE_MAX +}; + +/** + * IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute + * @_name: Attribute name + * @_shared: Whether the attribute is shared between all channels + * @_what: Data private to the driver + */ +#define IIO_BACKEND_EX_INFO(_name, _shared, _what) { \ + .name = (_name), \ + .shared = (_shared), \ + .read = iio_backend_ext_info_get, \ + .write = iio_backend_ext_info_set, \ + .private = (_what), \ +} + /** * struct iio_backend_data_fmt - Backend data format * @type: Data type. @@ -35,8 +56,13 @@ struct iio_backend_data_fmt { * @chan_enable: Enable one channel. * @chan_disable: Disable one channel. * @data_format_set: Configure the data format for a specific channel. + * @data_source_set: Configure the data source for a specific channel. + * @set_sample_rate: Configure the sampling rate for a specific channel. * @request_buffer: Request an IIO buffer. * @free_buffer: Free an IIO buffer. + * @extend_chan_spec: Extend an IIO channel. + * @ext_info_set: Extended info setter. + * @ext_info_get: Extended info getter. **/ struct iio_backend_ops { int (*enable)(struct iio_backend *back); @@ -45,10 +71,21 @@ struct iio_backend_ops { int (*chan_disable)(struct iio_backend *back, unsigned int chan); int (*data_format_set)(struct iio_backend *back, unsigned int chan, const struct iio_backend_data_fmt *data); + int (*data_source_set)(struct iio_backend *back, unsigned int chan, + enum iio_backend_data_source data); + int (*set_sample_rate)(struct iio_backend *back, unsigned int chan, + u64 sample_rate_hz); struct iio_buffer *(*request_buffer)(struct iio_backend *back, struct iio_dev *indio_dev); void (*free_buffer)(struct iio_backend *back, struct iio_buffer *buffer); + int (*extend_chan_spec)(struct iio_backend *back, + struct iio_chan_spec *chan); + int (*ext_info_set)(struct iio_backend *back, uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len); + int (*ext_info_get)(struct iio_backend *back, uintptr_t private, + const struct iio_chan_spec *chan, char *buf); }; int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan); @@ -56,10 +93,22 @@ int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan); int devm_iio_backend_enable(struct device *dev, struct iio_backend *back); int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, const struct iio_backend_data_fmt *data); +int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, + enum iio_backend_data_source data); +int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, + u64 sample_rate_hz); int devm_iio_backend_request_buffer(struct device *dev, struct iio_backend *back, struct iio_dev *indio_dev); +ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, + const char *buf, size_t len); +ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private, + const struct iio_chan_spec *chan, char *buf); +int iio_backend_extend_chan_spec(struct iio_dev *indio_dev, + struct iio_backend *back, + struct iio_chan_spec *chan); void *iio_backend_get_priv(const struct iio_backend *conv); struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name); struct iio_backend * -- cgit From bfa858f220ab8c950dd3e1310fee61950d0ecdae Mon Sep 17 00:00:00 2001 From: Thomas Weißschuh Date: Thu, 18 Apr 2024 11:40:08 +0200 Subject: sysctl: treewide: constify ctl_table_header::ctl_table_arg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To be able to constify instances of struct ctl_tables it is necessary to remove ways through which non-const versions are exposed from the sysctl core. One of these is the ctl_table_arg member of struct ctl_table_header. Constify this reference as a prerequisite for the full constification of struct ctl_table instances. No functional change. Signed-off-by: Thomas Weißschuh Reviewed-by: Kees Cook Signed-off-by: David S. Miller --- drivers/net/vrf.c | 2 +- include/linux/sysctl.h | 2 +- ipc/ipc_sysctl.c | 2 +- ipc/mq_sysctl.c | 2 +- kernel/ucount.c | 2 +- net/ax25/sysctl_net_ax25.c | 2 +- net/bridge/br_netfilter_hooks.c | 2 +- net/core/sysctl_net_core.c | 2 +- net/ieee802154/6lowpan/reassembly.c | 2 +- net/ipv4/devinet.c | 2 +- net/ipv4/ip_fragment.c | 2 +- net/ipv4/route.c | 2 +- net/ipv4/sysctl_net_ipv4.c | 2 +- net/ipv4/xfrm4_policy.c | 2 +- net/ipv6/addrconf.c | 2 +- net/ipv6/netfilter/nf_conntrack_reasm.c | 2 +- net/ipv6/reassembly.c | 2 +- net/ipv6/sysctl_net_ipv6.c | 6 +++--- net/ipv6/xfrm6_policy.c | 2 +- net/mpls/af_mpls.c | 4 ++-- net/mptcp/ctrl.c | 2 +- net/netfilter/nf_conntrack_standalone.c | 2 +- net/netfilter/nf_log.c | 2 +- net/sctp/sysctl.c | 2 +- net/smc/smc_sysctl.c | 2 +- net/unix/sysctl_net_unix.c | 2 +- net/xfrm/xfrm_sysctl.c | 2 +- 27 files changed, 30 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index bb95ce43cd97..66f8542f3b18 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -1971,7 +1971,7 @@ static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf) static void vrf_netns_exit_sysctl(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); - struct ctl_table *table; + const struct ctl_table *table; table = nn_vrf->ctl_hdr->ctl_table_arg; unregister_net_sysctl_table(nn_vrf->ctl_hdr); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index ee7d33b89e9e..9413241df962 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -182,7 +182,7 @@ struct ctl_table_header { struct rcu_head rcu; }; struct completion *unregistering; - struct ctl_table *ctl_table_arg; + const struct ctl_table *ctl_table_arg; struct ctl_table_root *root; struct ctl_table_set *set; struct ctl_dir *parent; diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 45cb1dabce29..3c3755918d34 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -306,7 +306,7 @@ bool setup_ipc_sysctls(struct ipc_namespace *ns) void retire_ipc_sysctls(struct ipc_namespace *ns) { - struct ctl_table *tbl; + const struct ctl_table *tbl; tbl = ns->ipc_sysctls->ctl_table_arg; unregister_sysctl_table(ns->ipc_sysctls); diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c index 21fba3a6edaf..69c709262f5a 100644 --- a/ipc/mq_sysctl.c +++ b/ipc/mq_sysctl.c @@ -160,7 +160,7 @@ bool setup_mq_sysctls(struct ipc_namespace *ns) void retire_mq_sysctls(struct ipc_namespace *ns) { - struct ctl_table *tbl; + const struct ctl_table *tbl; tbl = ns->mq_sysctls->ctl_table_arg; unregister_sysctl_table(ns->mq_sysctls); diff --git a/kernel/ucount.c b/kernel/ucount.c index 4aa6166cb856..d9e283600f5c 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -119,7 +119,7 @@ bool setup_userns_sysctls(struct user_namespace *ns) void retire_userns_sysctls(struct user_namespace *ns) { #ifdef CONFIG_SYSCTL - struct ctl_table *tbl; + const struct ctl_table *tbl; tbl = ns->sysctls->ctl_table_arg; unregister_sysctl_table(ns->sysctls); diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c index db66e11e7fe8..e0128dc9def3 100644 --- a/net/ax25/sysctl_net_ax25.c +++ b/net/ax25/sysctl_net_ax25.c @@ -171,7 +171,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev) void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) { struct ctl_table_header *header = ax25_dev->sysheader; - struct ctl_table *table; + const struct ctl_table *table; if (header) { ax25_dev->sysheader = NULL; diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index c683d8377862..7948a9e7542c 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -1275,7 +1275,7 @@ static int br_netfilter_sysctl_init_net(struct net *net) static void br_netfilter_sysctl_exit_net(struct net *net, struct brnf_net *brnet) { - struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg; + const struct ctl_table *table = brnet->ctl_hdr->ctl_table_arg; unregister_net_sysctl_table(brnet->ctl_hdr); if (!net_eq(net, &init_net)) diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 6973dda3abda..903ab4a51c17 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -743,7 +743,7 @@ err_dup: static __net_exit void sysctl_core_net_exit(struct net *net) { - struct ctl_table *tbl; + const struct ctl_table *tbl; tbl = net->core.sysctl_hdr->ctl_table_arg; unregister_net_sysctl_table(net->core.sysctl_hdr); diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 6dd960ec558c..2a983cf450da 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -399,7 +399,7 @@ err_alloc: static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; struct netns_ieee802154_lowpan *ieee802154_lowpan = net_ieee802154_lowpan(net); diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 7a437f0d4190..7592f242336b 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -2749,7 +2749,7 @@ err_alloc_all: static __net_exit void devinet_exit_net(struct net *net) { #ifdef CONFIG_SYSCTL - struct ctl_table *tbl; + const struct ctl_table *tbl; tbl = net->ipv4.forw_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.forw_hdr); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index fb947d1613fe..534b98a0744a 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -632,7 +632,7 @@ err_alloc: static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; table = net->ipv4.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.frags_hdr); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 6133f1fd848e..2e29ab819967 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -3576,7 +3576,7 @@ err_dup: static __net_exit void sysctl_route_net_exit(struct net *net) { - struct ctl_table *tbl; + const struct ctl_table *tbl; tbl = net->ipv4.route_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.route_hdr); diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 7e4f16a7dcc1..ce5d19978a26 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -1554,7 +1554,7 @@ err_alloc: static __net_exit void ipv4_sysctl_exit_net(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; kfree(net->ipv4.sysctl_local_reserved_ports); table = net->ipv4.ipv4_hdr->ctl_table_arg; diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index c33bca2c3841..1dda59e0aeab 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -186,7 +186,7 @@ err_alloc: static __net_exit void xfrm4_net_sysctl_exit(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; if (!net->ipv4.xfrm4_hdr) return; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 3a92d002abe4..9aa0900abfa1 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -7238,7 +7238,7 @@ out: static void __addrconf_sysctl_unregister(struct net *net, struct ipv6_devconf *p, int ifindex) { - struct ctl_table *table; + const struct ctl_table *table; if (!p->sysctl_header) return; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index d0dcbaca1994..ce8c14d8aff5 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -105,7 +105,7 @@ err_alloc: static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net) { struct nft_ct_frag6_pernet *nf_frag = nf_frag_pernet(net); - struct ctl_table *table; + const struct ctl_table *table; table = nf_frag->nf_frag_frags_hdr->ctl_table_arg; unregister_net_sysctl_table(nf_frag->nf_frag_frags_hdr); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index acb4f119e11f..ee95cdcc8747 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -487,7 +487,7 @@ err_alloc: static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c index 888676163e90..75de55f907b0 100644 --- a/net/ipv6/sysctl_net_ipv6.c +++ b/net/ipv6/sysctl_net_ipv6.c @@ -313,9 +313,9 @@ out_ipv6_table: static void __net_exit ipv6_sysctl_net_exit(struct net *net) { - struct ctl_table *ipv6_table; - struct ctl_table *ipv6_route_table; - struct ctl_table *ipv6_icmp_table; + const struct ctl_table *ipv6_table; + const struct ctl_table *ipv6_route_table; + const struct ctl_table *ipv6_icmp_table; ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg; ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg; diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 42fb6996b077..4891012b692f 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -218,7 +218,7 @@ err_alloc: static void __net_exit xfrm6_net_sysctl_exit(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; if (!net->ipv6.sysctl.xfrm6_hdr) return; diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 0315b8deed3f..5d2012d1cf4a 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -1422,7 +1422,7 @@ static void mpls_dev_sysctl_unregister(struct net_device *dev, struct mpls_dev *mdev) { struct net *net = dev_net(dev); - struct ctl_table *table; + const struct ctl_table *table; if (!mdev->sysctl) return; @@ -2690,7 +2690,7 @@ static void mpls_net_exit(struct net *net) { struct mpls_route __rcu **platform_label; size_t platform_labels; - struct ctl_table *table; + const struct ctl_table *table; unsigned int index; table = net->mpls.ctl->ctl_table_arg; diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c index 13fe0748dde8..8d661156ab8c 100644 --- a/net/mptcp/ctrl.c +++ b/net/mptcp/ctrl.c @@ -198,7 +198,7 @@ err_alloc: static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) { - struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg; + const struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg; unregister_net_sysctl_table(pernet->ctl_table_hdr); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 0ee98ce5b816..bb9dea676ec1 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -1122,7 +1122,7 @@ out_unregister_netfilter: static void nf_conntrack_standalone_fini_sysctl(struct net *net) { struct nf_conntrack_net *cnet = nf_ct_pernet(net); - struct ctl_table *table; + const struct ctl_table *table; table = cnet->sysctl_header->ctl_table_arg; unregister_net_sysctl_table(cnet->sysctl_header); diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index 370f8231385c..efedd2f13ac7 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -514,7 +514,7 @@ err_alloc: static void netfilter_log_sysctl_exit(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; table = net->nf.nf_log_dir_header->ctl_table_arg; unregister_net_sysctl_table(net->nf.nf_log_dir_header); diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index f65d6f92afcb..25bdf17c7262 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -624,7 +624,7 @@ int sctp_sysctl_net_register(struct net *net) void sctp_sysctl_net_unregister(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; table = net->sctp.sysctl_header->ctl_table_arg; unregister_net_sysctl_table(net->sctp.sysctl_header); diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c index a5946d1b9d60..4e8baa2e7ea4 100644 --- a/net/smc/smc_sysctl.c +++ b/net/smc/smc_sysctl.c @@ -133,7 +133,7 @@ err_alloc: void __net_exit smc_sysctl_net_exit(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; table = net->smc.smc_hdr->ctl_table_arg; unregister_net_sysctl_table(net->smc.smc_hdr); diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c index 3e84b31c355a..44996af61999 100644 --- a/net/unix/sysctl_net_unix.c +++ b/net/unix/sysctl_net_unix.c @@ -52,7 +52,7 @@ err_alloc: void unix_sysctl_unregister(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; table = net->unx.ctl->ctl_table_arg; unregister_net_sysctl_table(net->unx.ctl); diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c index 7fdeafc838a7..e972930c292b 100644 --- a/net/xfrm/xfrm_sysctl.c +++ b/net/xfrm/xfrm_sysctl.c @@ -76,7 +76,7 @@ out_kmemdup: void __net_exit xfrm_sysctl_fini(struct net *net) { - struct ctl_table *table; + const struct ctl_table *table; table = net->xfrm.sysctl_hdr->ctl_table_arg; unregister_net_sysctl_table(net->xfrm.sysctl_hdr); -- cgit From e84c60032a39e3267f0b46175d5368da33e214a6 Mon Sep 17 00:00:00 2001 From: Anna-Maria Behnsen Date: Wed, 17 Apr 2024 16:02:29 +0200 Subject: timerqueue: Remove never used function timerqueue_node_expires() This function was introduced with commit 60bda037f1dd ("posix-cpu-timers: Utilize timerqueue for storage") but never used. Remove it. Signed-off-by: Anna-Maria Behnsen Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240417140229.19633-1-anna-maria@linutronix.de --- include/linux/timerqueue.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/timerqueue.h b/include/linux/timerqueue.h index 62973f7d4610..d306d9dd2207 100644 --- a/include/linux/timerqueue.h +++ b/include/linux/timerqueue.h @@ -37,11 +37,6 @@ static inline bool timerqueue_node_queued(struct timerqueue_node *node) return !RB_EMPTY_NODE(&node->node); } -static inline bool timerqueue_node_expires(struct timerqueue_node *node) -{ - return node->expires; -} - static inline void timerqueue_init_head(struct timerqueue_head *head) { head->rb_root = RB_ROOT_CACHED; -- cgit From 35c8711c8fc4c16ad2749b8314da5829a493e28e Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 5 Mar 2024 07:31:52 -0700 Subject: io_uring/kbuf: add helpers for getting/peeking multiple buffers Our provided buffer interface only allows selection of a single buffer. Add an API that allows getting/peeking multiple buffers at the same time. This is only implemented for the ring provided buffers. It could be added for the legacy provided buffers as well, but since it's strongly encouraged to use the new interface, let's keep it simpler and just provide it for the new API. The legacy interface will always just select a single buffer. There are two new main functions: io_buffers_select(), which selects up as many buffers as it can. The caller supplies the iovec array, and io_buffers_select() may allocate a bigger array if the 'out_len' being passed in is non-zero and bigger than what fits in the provided iovec. Buffers grabbed with this helper are permanently assigned. io_buffers_peek(), which works like io_buffers_select(), except they can be recycled, if needed. Callers using either of these functions should call io_put_kbufs() rather than io_put_kbuf() at completion time. The peek interface must be called with the ctx locked from peek to completion. This add a bit state for the request: - REQ_F_BUFFERS_COMMIT, which means that the the buffers have been peeked and should be committed to the buffer ring head when they are put as part of completion. Prior to this, req->buf_list was cleared to NULL when committed. Signed-off-by: Jens Axboe --- include/linux/io_uring_types.h | 3 + io_uring/kbuf.c | 157 ++++++++++++++++++++++++++++++++++++++++- io_uring/kbuf.h | 53 +++++++++++--- 3 files changed, 201 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index c47f412cf18e..7a6b190c7da7 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -472,6 +472,7 @@ enum { REQ_F_CAN_POLL_BIT, REQ_F_BL_EMPTY_BIT, REQ_F_BL_NO_RECYCLE_BIT, + REQ_F_BUFFERS_COMMIT_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -550,6 +551,8 @@ enum { REQ_F_BL_EMPTY = IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT), /* don't recycle provided buffers for this request */ REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), + /* buffer ring head needs incrementing on put */ + REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c index 3846a055df44..d2945c9c812b 100644 --- a/io_uring/kbuf.c +++ b/io_uring/kbuf.c @@ -117,6 +117,27 @@ static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, return NULL; } +static int io_provided_buffers_select(struct io_kiocb *req, size_t *len, + struct io_buffer_list *bl, + struct iovec *iov) +{ + void __user *buf; + + buf = io_provided_buffer_select(req, len, bl); + if (unlikely(!buf)) + return -ENOBUFS; + + iov[0].iov_base = buf; + iov[0].iov_len = *len; + return 0; +} + +static struct io_uring_buf *io_ring_head_to_buf(struct io_uring_buf_ring *br, + __u16 head, __u16 mask) +{ + return &br->bufs[head & mask]; +} + static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, struct io_buffer_list *bl, unsigned int issue_flags) @@ -132,11 +153,10 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, if (head + 1 == tail) req->flags |= REQ_F_BL_EMPTY; - head &= bl->mask; - buf = &br->bufs[head]; + buf = io_ring_head_to_buf(br, head, bl->mask); if (*len == 0 || *len > buf->len) *len = buf->len; - req->flags |= REQ_F_BUFFER_RING; + req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT; req->buf_list = bl; req->buf_index = buf->bid; @@ -151,6 +171,7 @@ static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, * the transfer completes (or if we get -EAGAIN and must poll of * retry). */ + req->flags &= ~REQ_F_BUFFERS_COMMIT; req->buf_list = NULL; bl->head++; } @@ -177,6 +198,136 @@ void __user *io_buffer_select(struct io_kiocb *req, size_t *len, return ret; } +/* cap it at a reasonable 256, will be one page even for 4K */ +#define PEEK_MAX_IMPORT 256 + +static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg, + struct io_buffer_list *bl) +{ + struct io_uring_buf_ring *br = bl->buf_ring; + struct iovec *iov = arg->iovs; + int nr_iovs = arg->nr_iovs; + __u16 nr_avail, tail, head; + struct io_uring_buf *buf; + + tail = smp_load_acquire(&br->tail); + head = bl->head; + nr_avail = min_t(__u16, tail - head, UIO_MAXIOV); + if (unlikely(!nr_avail)) + return -ENOBUFS; + + buf = io_ring_head_to_buf(br, head, bl->mask); + if (arg->max_len) { + int needed; + + needed = (arg->max_len + buf->len - 1) / buf->len; + needed = min(needed, PEEK_MAX_IMPORT); + if (nr_avail > needed) + nr_avail = needed; + } + + /* + * only alloc a bigger array if we know we have data to map, eg not + * a speculative peek operation. + */ + if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) { + iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL); + if (unlikely(!iov)) + return -ENOMEM; + if (arg->mode & KBUF_MODE_FREE) + kfree(arg->iovs); + arg->iovs = iov; + nr_iovs = nr_avail; + } else if (nr_avail < nr_iovs) { + nr_iovs = nr_avail; + } + + /* set it to max, if not set, so we can use it unconditionally */ + if (!arg->max_len) + arg->max_len = INT_MAX; + + req->buf_index = buf->bid; + do { + /* truncate end piece, if needed */ + if (buf->len > arg->max_len) + buf->len = arg->max_len; + + iov->iov_base = u64_to_user_ptr(buf->addr); + iov->iov_len = buf->len; + iov++; + + arg->out_len += buf->len; + arg->max_len -= buf->len; + if (!arg->max_len) + break; + + buf = io_ring_head_to_buf(br, ++head, bl->mask); + } while (--nr_iovs); + + if (head == tail) + req->flags |= REQ_F_BL_EMPTY; + + req->flags |= REQ_F_BUFFER_RING; + req->buf_list = bl; + return iov - arg->iovs; +} + +int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, + unsigned int issue_flags) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_buffer_list *bl; + int ret = -ENOENT; + + io_ring_submit_lock(ctx, issue_flags); + bl = io_buffer_get_list(ctx, req->buf_index); + if (unlikely(!bl)) + goto out_unlock; + + if (bl->is_buf_ring) { + ret = io_ring_buffers_peek(req, arg, bl); + /* + * Don't recycle these buffers if we need to go through poll. + * Nobody else can use them anyway, and holding on to provided + * buffers for a send/write operation would happen on the app + * side anyway with normal buffers. Besides, we already + * committed them, they cannot be put back in the queue. + */ + if (ret > 0) { + req->flags |= REQ_F_BL_NO_RECYCLE; + req->buf_list->head += ret; + } + } else { + ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs); + } +out_unlock: + io_ring_submit_unlock(ctx, issue_flags); + return ret; +} + +int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg) +{ + struct io_ring_ctx *ctx = req->ctx; + struct io_buffer_list *bl; + int ret; + + lockdep_assert_held(&ctx->uring_lock); + + bl = io_buffer_get_list(ctx, req->buf_index); + if (unlikely(!bl)) + return -ENOENT; + + if (bl->is_buf_ring) { + ret = io_ring_buffers_peek(req, arg, bl); + if (ret > 0) + req->flags |= REQ_F_BUFFERS_COMMIT; + return ret; + } + + /* don't support multiple buffer selections for legacy */ + return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs); +} + static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer_list *bl, unsigned nbufs) { diff --git a/io_uring/kbuf.h b/io_uring/kbuf.h index 5a9635ee0217..b90aca3a57fa 100644 --- a/io_uring/kbuf.h +++ b/io_uring/kbuf.h @@ -41,8 +41,26 @@ struct io_buffer { __u16 bgid; }; +enum { + /* can alloc a bigger vec */ + KBUF_MODE_EXPAND = 1, + /* if bigger vec allocated, free old one */ + KBUF_MODE_FREE = 2, +}; + +struct buf_sel_arg { + struct iovec *iovs; + size_t out_len; + size_t max_len; + int nr_iovs; + int mode; +}; + void __user *io_buffer_select(struct io_kiocb *req, size_t *len, unsigned int issue_flags); +int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, + unsigned int issue_flags); +int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg); void io_destroy_buffers(struct io_ring_ctx *ctx); int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); @@ -75,7 +93,7 @@ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) */ if (req->buf_list) { req->buf_index = req->buf_list->bgid; - req->flags &= ~REQ_F_BUFFER_RING; + req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); return true; } return false; @@ -99,11 +117,16 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) return false; } -static inline void __io_put_kbuf_ring(struct io_kiocb *req) +static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr) { - if (req->buf_list) { - req->buf_index = req->buf_list->bgid; - req->buf_list->head++; + struct io_buffer_list *bl = req->buf_list; + + if (bl) { + if (req->flags & REQ_F_BUFFERS_COMMIT) { + bl->head += nr; + req->flags &= ~REQ_F_BUFFERS_COMMIT; + } + req->buf_index = bl->bgid; } req->flags &= ~REQ_F_BUFFER_RING; } @@ -112,7 +135,7 @@ static inline void __io_put_kbuf_list(struct io_kiocb *req, struct list_head *list) { if (req->flags & REQ_F_BUFFER_RING) { - __io_put_kbuf_ring(req); + __io_put_kbuf_ring(req, 1); } else { req->buf_index = req->kbuf->bgid; list_add(&req->kbuf->list, list); @@ -130,8 +153,8 @@ static inline void io_kbuf_drop(struct io_kiocb *req) __io_put_kbuf_list(req, &req->ctx->io_buffers_comp); } -static inline unsigned int io_put_kbuf(struct io_kiocb *req, - unsigned issue_flags) +static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs, + unsigned issue_flags) { unsigned int ret; @@ -140,9 +163,21 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); if (req->flags & REQ_F_BUFFER_RING) - __io_put_kbuf_ring(req); + __io_put_kbuf_ring(req, nbufs); else __io_put_kbuf(req, issue_flags); return ret; } + +static inline unsigned int io_put_kbuf(struct io_kiocb *req, + unsigned issue_flags) +{ + return __io_put_kbufs(req, 1, issue_flags); +} + +static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs, + unsigned issue_flags) +{ + return __io_put_kbufs(req, nbufs, issue_flags); +} #endif -- cgit From 51da7f68edae38e81543d57fd71811f7481c0472 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 22 Apr 2024 10:03:13 -1000 Subject: workqueue: Use "@..." in function comment to describe variable length argument Previously, it was using "remaining args" without leading "@" which isn't valid. Let's follow snprintf()'s example and use "@...". Signed-off-by: Tejun Heo Reported-by: Stephen Rothwell --- include/linux/workqueue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 2df1188c0f96..fb3993894536 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -473,7 +473,7 @@ void workqueue_softirq_dead(unsigned int cpu); * @fmt: printf format for the name of the workqueue * @flags: WQ_* flags * @max_active: max in-flight work items, 0 for default - * remaining args: args for @fmt + * @...: args for @fmt * * For a per-cpu workqueue, @max_active limits the number of in-flight work * items for each CPU. e.g. @max_active of 1 indicates that each CPU can be -- cgit From 20516d6e51dd9994afda8d556507cfbe7853384b Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 11 Apr 2024 13:46:14 -0300 Subject: x86: Stop using weak symbols for __iowrite32_copy() Start switching iomap_copy routines over to use #define and arch provided inline/macro functions instead of weak symbols. Inline functions allow more compiler optimization and this is often a driver hot path. x86 has the only weak implementation for __iowrite32_copy(), so replace it with a static inline containing the same single instruction inline assembly. The compiler will generate the "mov edx,ecx" in a more optimal way. Remove iomap_copy_64.S Link: https://lore.kernel.org/r/1-v3-1893cd8b9369+1925-mlx5_arm_wc_jgg@nvidia.com Acked-by: Arnd Bergmann Signed-off-by: Jason Gunthorpe --- arch/x86/include/asm/io.h | 17 +++++++++++++++++ arch/x86/lib/Makefile | 1 - arch/x86/lib/iomap_copy_64.S | 15 --------------- include/linux/io.h | 5 ++++- lib/iomap_copy.c | 6 +++--- 5 files changed, 24 insertions(+), 20 deletions(-) delete mode 100644 arch/x86/lib/iomap_copy_64.S (limited to 'include/linux') diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 294cd2a40818..4b99ed326b17 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -209,6 +209,23 @@ void memset_io(volatile void __iomem *, int, size_t); #define memcpy_toio memcpy_toio #define memset_io memset_io +#ifdef CONFIG_X86_64 +/* + * Commit 0f07496144c2 ("[PATCH] Add faster __iowrite32_copy routine for + * x86_64") says that circa 2006 rep movsl is noticeably faster than a copy + * loop. + */ +static inline void __iowrite32_copy(void __iomem *to, const void *from, + size_t count) +{ + asm volatile("rep ; movsl" + : "=&c"(count), "=&D"(to), "=&S"(from) + : "0"(count), "1"(to), "2"(from) + : "memory"); +} +#define __iowrite32_copy __iowrite32_copy +#endif + /* * ISA space is 'always mapped' on a typical x86 system, no need to * explicitly ioremap() it. The fact that the ISA IO space is mapped diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index 6da73513f026..98583a9dbab3 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -53,7 +53,6 @@ ifneq ($(CONFIG_X86_CMPXCHG64),y) lib-y += atomic64_386_32.o endif else - obj-y += iomap_copy_64.o ifneq ($(CONFIG_GENERIC_CSUM),y) lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o endif diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S deleted file mode 100644 index 6ff2f56cb0f7..000000000000 --- a/arch/x86/lib/iomap_copy_64.S +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2006 PathScale, Inc. All Rights Reserved. - */ - -#include - -/* - * override generic version in lib/iomap_copy.c - */ -SYM_FUNC_START(__iowrite32_copy) - movl %edx,%ecx - rep movsl - RET -SYM_FUNC_END(__iowrite32_copy) diff --git a/include/linux/io.h b/include/linux/io.h index 235ba7d80a8f..ce86120ce9d5 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -16,7 +16,10 @@ struct device; struct resource; -__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count); +#ifndef __iowrite32_copy +void __iowrite32_copy(void __iomem *to, const void *from, size_t count); +#endif + void __ioread32_copy(void *to, const void __iomem *from, size_t count); void __iowrite64_copy(void __iomem *to, const void *from, size_t count); diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 5de7c04e05ef..8ddcbb53507d 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c @@ -16,9 +16,8 @@ * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ -void __attribute__((weak)) __iowrite32_copy(void __iomem *to, - const void *from, - size_t count) +#ifndef __iowrite32_copy +void __iowrite32_copy(void __iomem *to, const void *from, size_t count) { u32 __iomem *dst = to; const u32 *src = from; @@ -28,6 +27,7 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to, __raw_writel(*src++, dst++); } EXPORT_SYMBOL_GPL(__iowrite32_copy); +#endif /** * __ioread32_copy - copy data from MMIO space, in 32-bit units -- cgit From e7bc47b16622d1016b3b77bbdb20fb9e213045f2 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 11 Apr 2024 13:46:16 -0300 Subject: s390: Stop using weak symbols for __iowrite64_copy() Complete switching the __iowriteXX_copy() routines over to use #define and arch provided inline/macro functions instead of weak symbols. S390 has an implementation that simply calls another memcpy function. Inline this so the callers don't have to do two jumps. Link: https://lore.kernel.org/r/3-v3-1893cd8b9369+1925-mlx5_arm_wc_jgg@nvidia.com Acked-by: Niklas Schnelle Acked-by: Arnd Bergmann Signed-off-by: Jason Gunthorpe --- arch/s390/include/asm/io.h | 7 +++++++ arch/s390/pci/pci.c | 6 ------ include/linux/io.h | 3 +++ lib/iomap_copy.c | 7 +++---- 4 files changed, 13 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 00704fc8a54b..0fbc992d7a5e 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h @@ -81,6 +81,13 @@ static inline void __iowrite32_copy(void __iomem *to, const void *from, } #define __iowrite32_copy __iowrite32_copy +static inline void __iowrite64_copy(void __iomem *to, const void *from, + size_t count) +{ + zpci_memcpy_toio(to, from, count * 8); +} +#define __iowrite64_copy __iowrite64_copy + #endif /* CONFIG_PCI */ #include diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 26afde0d1ed3..0de0f6e405b5 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -250,12 +250,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, return 0; } -/* combine single writes by using store-block insn */ -void __iowrite64_copy(void __iomem *to, const void *from, size_t count) -{ - zpci_memcpy_toio(to, from, count * 8); -} - void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size, unsigned long prot) { diff --git a/include/linux/io.h b/include/linux/io.h index ce86120ce9d5..42e132808f00 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -21,7 +21,10 @@ void __iowrite32_copy(void __iomem *to, const void *from, size_t count); #endif void __ioread32_copy(void *to, const void __iomem *from, size_t count); + +#ifndef __iowrite64_copy void __iowrite64_copy(void __iomem *to, const void *from, size_t count); +#endif #ifdef CONFIG_MMU int ioremap_page_range(unsigned long addr, unsigned long end, diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index 8ddcbb53507d..2fd5712fb7c0 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c @@ -60,9 +60,8 @@ EXPORT_SYMBOL_GPL(__ioread32_copy); * time. Order of access is not guaranteed, nor is a memory barrier * performed afterwards. */ -void __attribute__((weak)) __iowrite64_copy(void __iomem *to, - const void *from, - size_t count) +#ifndef __iowrite64_copy +void __iowrite64_copy(void __iomem *to, const void *from, size_t count) { #ifdef CONFIG_64BIT u64 __iomem *dst = to; @@ -75,5 +74,5 @@ void __attribute__((weak)) __iowrite64_copy(void __iomem *to, __iowrite32_copy(to, from, count * 2); #endif } - EXPORT_SYMBOL_GPL(__iowrite64_copy); +#endif -- cgit From eca1e8a62888a31c8dcfad79457232594be40c1a Mon Sep 17 00:00:00 2001 From: Rahul Rameshbabu Date: Fri, 19 Apr 2024 11:04:42 +0300 Subject: net/mlx5e: Use DIM constants for CQ period mode parameter Use core DIM CQ period mode enum values for the CQ parameter for the period mode. Translate the value to the specific mlx5 device constant for the selected period mode when creating a CQ. Avoid needing to translate mlx5 device constants to DIM constants for core DIM functionality. Co-developed-by: Nabil S. Alramli Signed-off-by: Nabil S. Alramli Co-developed-by: Joe Damato Signed-off-by: Joe Damato Signed-off-by: Rahul Rameshbabu Signed-off-by: Tariq Toukan Link: https://lore.kernel.org/r/20240419080445.417574-3-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en/dim.h | 26 +++++++++++++++++ .../net/ethernet/mellanox/mlx5/core/en/params.c | 34 +++++++--------------- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 16 +++------- drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 20 ++++--------- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 7 +++-- include/linux/mlx5/mlx5_ifc.h | 4 +-- 6 files changed, 53 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h index cd2cf647c85a..6411ae4c6b94 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h @@ -4,11 +4,37 @@ #ifndef __MLX5_EN_DIM_H__ #define __MLX5_EN_DIM_H__ +#include #include +#include /* Forward declarations */ struct work_struct; +/* convert a boolean value for cqe mode to appropriate dim constant + * true : DIM_CQ_PERIOD_MODE_START_FROM_CQE + * false : DIM_CQ_PERIOD_MODE_START_FROM_EQE + */ +static inline int mlx5e_dim_cq_period_mode(bool start_from_cqe) +{ + return start_from_cqe ? DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; +} + +static inline enum mlx5_cq_period_mode +mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode) +{ + switch (cq_period_mode) { + case DIM_CQ_PERIOD_MODE_START_FROM_EQE: + return MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + case DIM_CQ_PERIOD_MODE_START_FROM_CQE: + return MLX5_CQ_PERIOD_MODE_START_FROM_CQE; + default: + WARN_ON_ONCE(true); + return MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + } +} + void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index a3f31d9d527e..0424628405e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -6,6 +6,7 @@ #include "en/port.h" #include "en_accel/en_accel.h" #include "en_accel/ipsec.h" +#include #include #include @@ -520,7 +521,7 @@ static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) moder.cq_period_mode = cq_period_mode; moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + if (cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; return moder; @@ -533,39 +534,26 @@ static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) moder.cq_period_mode = cq_period_mode; moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE) + if (cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; return moder; } -static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode) -{ - return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ? - DIM_CQ_PERIOD_MODE_START_FROM_CQE : - DIM_CQ_PERIOD_MODE_START_FROM_EQE; -} - void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) { - if (params->tx_dim_enabled) { - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); - - params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode); - } else { + if (params->tx_dim_enabled) + params->tx_cq_moderation = net_dim_get_def_tx_moderation(cq_period_mode); + else params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); - } } void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) { - if (params->rx_dim_enabled) { - u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); - - params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode); - } else { + if (params->rx_dim_enabled) + params->rx_cq_moderation = net_dim_get_def_rx_moderation(cq_period_mode); + else params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); - } } void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) @@ -573,7 +561,7 @@ void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) mlx5e_reset_tx_moderation(params, cq_period_mode); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, params->tx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + DIM_CQ_PERIOD_MODE_START_FROM_CQE); } void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) @@ -581,7 +569,7 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) mlx5e_reset_rx_moderation(params, cq_period_mode); MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, params->rx_cq_moderation.cq_period_mode == - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); + DIM_CQ_PERIOD_MODE_START_FROM_CQE); } bool slow_pci_heuristic(struct mlx5_core_dev *mdev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 75bf7f3d9f25..c07785e675bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include #include #include "en.h" @@ -627,15 +628,6 @@ mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal } } -/* convert a boolean value of cq_mode to mlx5 period mode - * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE - * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE - */ -static int cqe_mode_to_period_mode(bool val) -{ - return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; -} - int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal, struct kernel_ethtool_coalesce *kernel_coal, @@ -688,13 +680,13 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; - cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx); + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx); if (cq_period_mode != rx_moder->cq_period_mode) { mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); reset_rx = true; } - cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx); + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx); if (cq_period_mode != tx_moder->cq_period_mode) { mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); reset_tx = true; @@ -1915,7 +1907,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe)) return -EOPNOTSUPP; - cq_period_mode = cqe_mode_to_period_mode(enable); + cq_period_mode = mlx5e_dim_cq_period_mode(enable); current_cq_period_mode = is_rx_cq ? priv->channels.params.rx_cq_moderation.cq_period_mode : diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index cf529f07faf1..12d1f4548343 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include #include #include #include @@ -962,15 +963,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, } INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work); - - switch (params->rx_cq_moderation.cq_period_mode) { - case MLX5_CQ_PERIOD_MODE_START_FROM_CQE: - rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; - break; - case MLX5_CQ_PERIOD_MODE_START_FROM_EQE: - default: - rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; - } + rq->dim.mode = params->rx_cq_moderation.cq_period_mode; return 0; @@ -2090,7 +2083,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); - MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); + MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode)); MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@ -5059,13 +5052,12 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ - rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + rx_cq_period_mode = + mlx5e_dim_cq_period_mode(MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)); params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); - mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); + mlx5e_set_tx_cq_mode_params(params, DIM_CQ_PERIOD_MODE_START_FROM_EQE); /* TX inline */ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 6acecf2e7cf6..309771300581 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include #include #include #include @@ -40,6 +41,7 @@ #include "eswitch.h" #include "en.h" +#include "en/dim.h" #include "en_rep.h" #include "en/params.h" #include "en/txrx.h" @@ -836,9 +838,8 @@ static void mlx5e_build_rep_params(struct net_device *netdev) struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_params *params; - u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? - MLX5_CQ_PERIOD_MODE_START_FROM_CQE : - MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + u8 cq_period_mode = + mlx5e_dim_cq_period_mode(MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)); params = &priv->channels.params; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 35ffc9b9f241..8c7ddb22bf20 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -4385,10 +4385,10 @@ enum { MLX5_CQC_ST_FIRED = 0xa, }; -enum { +enum mlx5_cq_period_mode { MLX5_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, MLX5_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, - MLX5_CQ_PERIOD_NUM_MODES + MLX5_CQ_PERIOD_NUM_MODES, }; struct mlx5_ifc_cqc_bits { -- cgit From 445a25f6e1a2f6a132b06af6ede4f3c9b5f9af68 Mon Sep 17 00:00:00 2001 From: Rahul Rameshbabu Date: Fri, 19 Apr 2024 11:04:44 +0300 Subject: net/mlx5e: Support updating coalescing configuration without resetting channels When CQE mode or DIM state is changed, gracefully reconfigure channels to handle new configuration. Previously, would create new channels that would reflect the changes rather than update the original channels. Co-developed-by: Nabil S. Alramli Signed-off-by: Nabil S. Alramli Co-developed-by: Joe Damato Signed-off-by: Joe Damato Signed-off-by: Rahul Rameshbabu Signed-off-by: Tariq Toukan Link: https://lore.kernel.org/r/20240419080445.417574-5-tariqt@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 20 ++ .../net/ethernet/mellanox/mlx5/core/en/channels.c | 83 ++++++++ .../net/ethernet/mellanox/mlx5/core/en/channels.h | 4 + drivers/net/ethernet/mellanox/mlx5/core/en/dim.h | 4 + .../net/ethernet/mellanox/mlx5/core/en/params.c | 58 ------ .../net/ethernet/mellanox/mlx5/core/en/params.h | 5 - drivers/net/ethernet/mellanox/mlx5/core/en_dim.c | 89 ++++++++- .../net/ethernet/mellanox/mlx5/core/en_ethtool.c | 141 ++++++++------ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 210 +++++++++++++++++---- drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | 5 +- include/linux/mlx5/cq.h | 7 +- include/linux/mlx5/mlx5_ifc.h | 3 +- 12 files changed, 460 insertions(+), 169 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index c8c0a0614e7b..eb09778327cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -320,6 +320,8 @@ struct mlx5e_params { bool scatter_fcs_en; bool rx_dim_enabled; bool tx_dim_enabled; + bool rx_moder_use_cqe_mode; + bool tx_moder_use_cqe_mode; u32 pflags; struct bpf_prog *xdp_prog; struct mlx5e_xsk *xsk; @@ -797,6 +799,10 @@ struct mlx5e_channel { int cpu; /* Sync between icosq recovery and XSK enable/disable. */ struct mutex icosq_recovery_lock; + + /* coalescing configuration */ + struct dim_cq_moder rx_cq_moder; + struct dim_cq_moder tx_cq_moder; }; struct mlx5e_ptp; @@ -1040,6 +1046,11 @@ void mlx5e_close_rq(struct mlx5e_rq *rq); int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter); void mlx5e_destroy_rq(struct mlx5e_rq *rq); +bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled); +bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state); + struct mlx5e_sq_param; int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool, @@ -1060,6 +1071,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp, struct mlx5e_cq *cq); void mlx5e_close_cq(struct mlx5e_cq *cq); +int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u8 cq_period_mode); +int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u16 cq_period, u16 cq_max_count, u8 cq_period_mode); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); @@ -1118,6 +1133,11 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev, void mlx5e_tx_err_cqe_work(struct work_struct *recover_work); void mlx5e_close_txqsq(struct mlx5e_txqsq *sq); +bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled); +bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state); + static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) { return MLX5_CAP_ETH(mdev, swp) && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c index 874a1016623c..66e719e88503 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c @@ -3,6 +3,7 @@ #include "channels.h" #include "en.h" +#include "en/dim.h" #include "en/ptp.h" unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs) @@ -55,3 +56,85 @@ bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn) *rqn = c->rq.rqn; return true; } + +int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enable) +{ + int i; + + for (i = 0; i < chs->num; i++) { + int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable); + + if (err) + return err; + } + + return 0; +} + +int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enable) +{ + int i, tc; + + for (i = 0; i < chs->num; i++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { + int err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], enable); + + if (err) + return err; + } + } + + return 0; +} + +int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs) +{ + int i; + + for (i = 0; i < chs->num; i++) { + /* If dim is enabled for the channel, reset the dim state so the + * collected statistics will be reset. This is useful for + * supporting legacy interfaces that allow things like changing + * the CQ period mode for all channels without disturbing + * individual channel configurations. + */ + if (chs->c[i]->rq.dim) { + int err; + + mlx5e_dim_rx_change(&chs->c[i]->rq, false); + err = mlx5e_dim_rx_change(&chs->c[i]->rq, true); + if (err) + return err; + } + } + + return 0; +} + +int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs) +{ + int i, tc; + + for (i = 0; i < chs->num; i++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { + int err; + + /* If dim is enabled for the channel, reset the dim + * state so the collected statistics will be reset. This + * is useful for supporting legacy interfaces that allow + * things like changing the CQ period mode for all + * channels without disturbing individual channel + * configurations. + */ + if (!chs->c[i]->sq[tc].dim) + continue; + + mlx5e_dim_tx_change(&chs->c[i]->sq[tc], false); + err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], true); + if (err) + return err; + } + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h index 6715aa9383b9..eda80f8c6c02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h @@ -15,5 +15,9 @@ void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn, u32 *vhca_id); bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); +int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enabled); +int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enabled); +int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs); +int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs); #endif /* __MLX5_EN_CHANNELS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h index 6411ae4c6b94..110e2c6b7e51 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h @@ -9,6 +9,8 @@ #include /* Forward declarations */ +struct mlx5e_rq; +struct mlx5e_txqsq; struct work_struct; /* convert a boolean value for cqe mode to appropriate dim constant @@ -37,5 +39,7 @@ mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode) void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); +int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled); +int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enabled); #endif /* __MLX5_EN_DIM_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 0424628405e0..ec819dfc98be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -514,64 +514,6 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param return 0; } -static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) -{ - struct dim_cq_moder moder = {}; - - moder.cq_period_mode = cq_period_mode; - moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS; - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC; - if (cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) - moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE; - - return moder; -} - -static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) -{ - struct dim_cq_moder moder = {}; - - moder.cq_period_mode = cq_period_mode; - moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS; - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC; - if (cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) - moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE; - - return moder; -} - -void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) -{ - if (params->tx_dim_enabled) - params->tx_cq_moderation = net_dim_get_def_tx_moderation(cq_period_mode); - else - params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); -} - -void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) -{ - if (params->rx_dim_enabled) - params->rx_cq_moderation = net_dim_get_def_rx_moderation(cq_period_mode); - else - params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); -} - -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) -{ - mlx5e_reset_tx_moderation(params, cq_period_mode); - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, - params->tx_cq_moderation.cq_period_mode == - DIM_CQ_PERIOD_MODE_START_FROM_CQE); -} - -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) -{ - mlx5e_reset_rx_moderation(params, cq_period_mode); - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, - params->rx_cq_moderation.cq_period_mode == - DIM_CQ_PERIOD_MODE_START_FROM_CQE); -} - bool slow_pci_heuristic(struct mlx5_core_dev *mdev) { u32 link_speed = 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index 9a781f18b57f..749b2ec0436e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -77,11 +77,6 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift, /* Parameter calculations */ -void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); -void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); - bool slow_pci_heuristic(struct mlx5_core_dev *mdev); int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params); int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index 106a1f70dd9a..298bb74ec5e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -37,7 +37,8 @@ static void mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder, struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) { - mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); + mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts, + mlx5e_cq_period_mode(moder.cq_period_mode)); dim->state = DIM_START_MEASURE; } @@ -60,3 +61,89 @@ void mlx5e_tx_dim_work(struct work_struct *work) mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); } + +static struct dim *mlx5e_dim_enable(struct mlx5_core_dev *mdev, + void (*work_fun)(struct work_struct *), int cpu, + u8 cq_period_mode, struct mlx5_core_cq *mcq, + void *queue) +{ + struct dim *dim; + int err; + + dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu)); + if (!dim) + return ERR_PTR(-ENOMEM); + + INIT_WORK(&dim->work, work_fun); + + dim->mode = cq_period_mode; + dim->priv = queue; + + err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode); + if (err) { + kvfree(dim); + return ERR_PTR(err); + } + + return dim; +} + +static void mlx5e_dim_disable(struct dim *dim) +{ + cancel_work_sync(&dim->work); + kvfree(dim); +} + +int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable) +{ + if (enable == !!rq->dim) + return 0; + + if (enable) { + struct mlx5e_channel *c = rq->channel; + struct dim *dim; + + dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu, + c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq); + if (IS_ERR(dim)) + return PTR_ERR(dim); + + rq->dim = dim; + + __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); + } else { + __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state); + + mlx5e_dim_disable(rq->dim); + rq->dim = NULL; + } + + return 0; +} + +int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) +{ + if (enable == !!sq->dim) + return 0; + + if (enable) { + struct mlx5e_channel *c = sq->channel; + struct dim *dim; + + dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu, + c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq); + if (IS_ERR(dim)) + return PTR_ERR(dim); + + sq->dim = dim; + + __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); + } else { + __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); + + mlx5e_dim_disable(sq->dim); + sq->dim = NULL; + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index c07785e675bc..c968874569cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -34,6 +34,7 @@ #include #include "en.h" +#include "en/channels.h" #include "en/dim.h" #include "en/port.h" #include "en/params.h" @@ -567,16 +568,13 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, coal->rx_coalesce_usecs = rx_moder->usec; coal->rx_max_coalesced_frames = rx_moder->pkts; coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled; + kernel_coal->use_cqe_mode_rx = priv->channels.params.rx_moder_use_cqe_mode; tx_moder = &priv->channels.params.tx_cq_moderation; coal->tx_coalesce_usecs = tx_moder->usec; coal->tx_max_coalesced_frames = tx_moder->pkts; coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled; - - kernel_coal->use_cqe_mode_rx = - MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER); - kernel_coal->use_cqe_mode_tx = - MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER); + kernel_coal->use_cqe_mode_tx = priv->channels.params.tx_moder_use_cqe_mode; return 0; } @@ -595,7 +593,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev, #define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT static void -mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) +mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder) { int tc; int i; @@ -603,28 +601,34 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal for (i = 0; i < priv->channels.num; ++i) { struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5_core_dev *mdev = c->mdev; + enum mlx5_cq_period_mode mode; + + mode = mlx5e_cq_period_mode(moder->cq_period_mode); + c->tx_cq_moder = *moder; for (tc = 0; tc < c->num_tc; tc++) { - mlx5_core_modify_cq_moderation(mdev, - &c->sq[tc].cq.mcq, - coal->tx_coalesce_usecs, - coal->tx_max_coalesced_frames); + mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq, + moder->usec, moder->pkts, + mode); } } } static void -mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal) +mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder) { int i; for (i = 0; i < priv->channels.num; ++i) { struct mlx5e_channel *c = priv->channels.c[i]; struct mlx5_core_dev *mdev = c->mdev; + enum mlx5_cq_period_mode mode; + + mode = mlx5e_cq_period_mode(moder->cq_period_mode); + c->rx_cq_moder = *moder; - mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq, - coal->rx_coalesce_usecs, - coal->rx_max_coalesced_frames); + mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts, + mode); } } @@ -635,13 +639,14 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, { struct dim_cq_moder *rx_moder, *tx_moder; struct mlx5_core_dev *mdev = priv->mdev; + bool rx_dim_enabled, tx_dim_enabled; struct mlx5e_params new_params; bool reset_rx, reset_tx; - bool reset = true; u8 cq_period_mode; int err = 0; - if (!MLX5_CAP_GEN(mdev, cq_moderation)) + if (!MLX5_CAP_GEN(mdev, cq_moderation) || + !MLX5_CAP_GEN(mdev, cq_period_mode_modify)) return -EOPNOTSUPP; if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME || @@ -664,60 +669,70 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, return -EOPNOTSUPP; } + rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + mutex_lock(&priv->state_lock); new_params = priv->channels.params; - rx_moder = &new_params.rx_cq_moderation; - rx_moder->usec = coal->rx_coalesce_usecs; - rx_moder->pkts = coal->rx_max_coalesced_frames; - new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce; + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx); + reset_rx = mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode, + rx_dim_enabled, false); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, cq_period_mode); - tx_moder = &new_params.tx_cq_moderation; - tx_moder->usec = coal->tx_coalesce_usecs; - tx_moder->pkts = coal->tx_max_coalesced_frames; - new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce; + cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx); + reset_tx = mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode, + tx_dim_enabled, false); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, cq_period_mode); - reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; - reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; + reset_rx |= rx_dim_enabled != new_params.rx_dim_enabled; + reset_tx |= tx_dim_enabled != new_params.tx_dim_enabled; - cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx); - if (cq_period_mode != rx_moder->cq_period_mode) { - mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); - reset_rx = true; - } + /* Solely used for global ethtool get coalesce */ + rx_moder = &new_params.rx_cq_moderation; + new_params.rx_dim_enabled = rx_dim_enabled; + new_params.rx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_rx; - cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx); - if (cq_period_mode != tx_moder->cq_period_mode) { - mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); - reset_tx = true; - } + tx_moder = &new_params.tx_cq_moderation; + new_params.tx_dim_enabled = tx_dim_enabled; + new_params.tx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_tx; if (reset_rx) { - u8 mode = MLX5E_GET_PFLAG(&new_params, - MLX5E_PFLAG_RX_CQE_BASED_MODER); + mlx5e_channels_rx_change_dim(&priv->channels, false); + mlx5e_reset_rx_moderation(rx_moder, new_params.rx_moder_use_cqe_mode, + rx_dim_enabled); - mlx5e_reset_rx_moderation(&new_params, mode); + mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder); + } else if (!rx_dim_enabled) { + rx_moder->usec = coal->rx_coalesce_usecs; + rx_moder->pkts = coal->rx_max_coalesced_frames; + + mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder); } + if (reset_tx) { - u8 mode = MLX5E_GET_PFLAG(&new_params, - MLX5E_PFLAG_TX_CQE_BASED_MODER); + mlx5e_channels_tx_change_dim(&priv->channels, false); + mlx5e_reset_tx_moderation(tx_moder, new_params.tx_moder_use_cqe_mode, + tx_dim_enabled); - mlx5e_reset_tx_moderation(&new_params, mode); - } + mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder); + } else if (!tx_dim_enabled) { + tx_moder->usec = coal->tx_coalesce_usecs; + tx_moder->pkts = coal->tx_max_coalesced_frames; - /* If DIM state hasn't changed, it's possible to modify interrupt - * moderation parameters on the fly, even if the channels are open. - */ - if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) { - if (!coal->use_adaptive_rx_coalesce) - mlx5e_set_priv_channels_rx_coalesce(priv, coal); - if (!coal->use_adaptive_tx_coalesce) - mlx5e_set_priv_channels_tx_coalesce(priv, coal); - reset = false; + mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder); } - err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset); + /* DIM enable/disable Rx and Tx channels */ + err = mlx5e_channels_rx_change_dim(&priv->channels, rx_dim_enabled); + if (err) + goto state_unlock; + err = mlx5e_channels_tx_change_dim(&priv->channels, tx_dim_enabled); + if (err) + goto state_unlock; + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false); +state_unlock: mutex_unlock(&priv->state_lock); return err; } @@ -1917,12 +1932,22 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, return 0; new_params = priv->channels.params; - if (is_rx_cq) - mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode); - else - mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode); + if (is_rx_cq) { + mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode, + false, true); + mlx5e_channels_rx_toggle_dim(&priv->channels); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, + cq_period_mode); + } else { + mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode, + false, true); + mlx5e_channels_tx_toggle_dim(&priv->channels); + MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, + cq_period_mode); + } - return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); + /* Update pflags of existing channels without resetting them */ + return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false); } static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8b4ecae0fd9f..3bd0695845c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -962,20 +962,8 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params, } } - rq->dim = kvzalloc_node(sizeof(*rq->dim), GFP_KERNEL, node); - if (!rq->dim) { - err = -ENOMEM; - goto err_unreg_xdp_rxq_info; - } - - rq->dim->priv = rq; - INIT_WORK(&rq->dim->work, mlx5e_rx_dim_work); - rq->dim->mode = params->rx_cq_moderation.cq_period_mode; - return 0; -err_unreg_xdp_rxq_info: - xdp_rxq_info_unreg(&rq->xdp_rxq); err_destroy_page_pool: page_pool_destroy(rq->page_pool); err_free_by_rq_type: @@ -1304,8 +1292,21 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param, if (MLX5_CAP_ETH(mdev, cqe_checksum_full)) __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state); - if (params->rx_dim_enabled) - __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); + if (rq->channel && !params->rx_dim_enabled) { + rq->channel->rx_cq_moder = params->rx_cq_moderation; + } else if (rq->channel) { + u8 cq_period_mode; + + cq_period_mode = params->rx_moder_use_cqe_mode ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode, + params->rx_dim_enabled); + + err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled); + if (err) + goto err_destroy_rq; + } /* We disable csum_complete when XDP is enabled since * XDP programs might manipulate packets which will render @@ -1351,7 +1352,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq) void mlx5e_close_rq(struct mlx5e_rq *rq) { - cancel_work_sync(&rq->dim->work); + if (rq->dim) + cancel_work_sync(&rq->dim->work); cancel_work_sync(&rq->recover_work); mlx5e_destroy_rq(rq); mlx5e_free_rx_descs(rq); @@ -1626,20 +1628,9 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c, err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); if (err) goto err_sq_wq_destroy; - sq->dim = kvzalloc_node(sizeof(*sq->dim), GFP_KERNEL, cpu_to_node(c->cpu)); - if (!sq->dim) { - err = -ENOMEM; - goto err_free_txqsq_db; - } - - sq->dim->priv = sq; - INIT_WORK(&sq->dim->work, mlx5e_tx_dim_work); - sq->dim->mode = params->tx_cq_moderation.cq_period_mode; return 0; -err_free_txqsq_db: - mlx5e_free_txqsq_db(sq); err_sq_wq_destroy: mlx5_wq_destroy(&sq->wq_ctrl); @@ -1804,11 +1795,27 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix, if (tx_rate) mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); - if (params->tx_dim_enabled) - sq->state |= BIT(MLX5E_SQ_STATE_DIM); + if (sq->channel && !params->tx_dim_enabled) { + sq->channel->tx_cq_moder = params->tx_cq_moderation; + } else if (sq->channel) { + u8 cq_period_mode; + + cq_period_mode = params->tx_moder_use_cqe_mode ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder, + cq_period_mode, + params->tx_dim_enabled); + + err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled); + if (err) + goto err_destroy_sq; + } return 0; +err_destroy_sq: + mlx5e_destroy_sq(c->mdev, sq->sqn); err_free_txqsq: mlx5e_free_txqsq(sq); @@ -1860,7 +1867,8 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) struct mlx5_core_dev *mdev = sq->mdev; struct mlx5_rate_limit rl = {0}; - cancel_work_sync(&sq->dim->work); + if (sq->dim) + cancel_work_sync(&sq->dim->work); cancel_work_sync(&sq->recover_work); mlx5e_destroy_sq(mdev, sq->sqn); if (sq->rate_limit) { @@ -1879,6 +1887,49 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work) mlx5e_reporter_tx_err_cqe(sq); } +static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode) +{ + return (struct dim_cq_moder) { + .cq_period_mode = cq_period_mode, + .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS, + .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE : + MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC, + }; +} + +bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled) +{ + bool reset_needed = cq_moder->cq_period_mode != cq_period_mode; + + if (dim_enabled) + *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode); + else + *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode); + + return reset_needed; +} + +bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state) +{ + bool reset = false; + int i, tc; + + for (i = 0; i < chs->num; i++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) { + if (keep_dim_state) + dim_enabled = !!chs->c[i]->sq[tc].dim; + + reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder, + cq_period_mode, dim_enabled); + } + } + + return reset; +} + static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_sq_param *param, struct mlx5e_icosq *sq, work_func_t recover_work_func) @@ -2102,7 +2153,8 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); - MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode)); + MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode)); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - @@ -2140,8 +2192,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder, if (err) goto err_free_cq; - if (MLX5_CAP_GEN(mdev, cq_moderation)) - mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts); + if (MLX5_CAP_GEN(mdev, cq_moderation) && + MLX5_CAP_GEN(mdev, cq_period_mode_modify)) + mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts, + mlx5e_cq_period_mode(moder.cq_period_mode)); return 0; err_free_cq: @@ -2156,6 +2210,40 @@ void mlx5e_close_cq(struct mlx5e_cq *cq) mlx5e_free_cq(cq); } +int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u8 cq_period_mode) +{ + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; + void *cqc; + + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode)); + MLX5_SET(modify_cq_in, in, + modify_field_select_resize_field_select.modify_field_select.modify_field_select, + MLX5_CQ_MODIFY_PERIOD_MODE); + + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); +} + +int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, + u16 cq_period, u16 cq_max_count, u8 cq_period_mode) +{ + u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {}; + void *cqc; + + MLX5_SET(modify_cq_in, in, cqn, cq->cqn); + cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, cq_period, cq_period); + MLX5_SET(cqc, cqc, cq_max_count, cq_max_count); + MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode); + MLX5_SET(modify_cq_in, in, + modify_field_select_resize_field_select.modify_field_select.modify_field_select, + MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE); + + return mlx5_core_modify_cq(dev, cq, in, sizeof(in)); +} + static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_create_cq_param *ccp, @@ -3973,6 +4061,47 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable) return mlx5_set_port_fcs(mdev, !enable); } +static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode) +{ + return (struct dim_cq_moder) { + .cq_period_mode = cq_period_mode, + .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS, + .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ? + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE : + MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC, + }; +} + +bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode, + bool dim_enabled) +{ + bool reset_needed = cq_moder->cq_period_mode != cq_period_mode; + + if (dim_enabled) + *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode); + else + *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode); + + return reset_needed; +} + +bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode, + bool dim_enabled, bool keep_dim_state) +{ + bool reset = false; + int i; + + for (i = 0; i < chs->num; i++) { + if (keep_dim_state) + dim_enabled = !!chs->c[i]->rq.dim; + + reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder, + cq_period_mode, dim_enabled); + } + + return reset; +} + static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable) { u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {}; @@ -5037,7 +5166,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 { struct mlx5e_params *params = &priv->channels.params; struct mlx5_core_dev *mdev = priv->mdev; - u8 rx_cq_period_mode; params->sw_mtu = mtu; params->hard_mtu = MLX5E_ETH_HARD_MTU; @@ -5071,12 +5199,16 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT); /* CQ moderation params */ - rx_cq_period_mode = - mlx5e_dim_cq_period_mode(MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)); - params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode); - mlx5e_set_tx_cq_mode_params(params, DIM_CQ_PERIOD_MODE_START_FROM_EQE); + params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) && + MLX5_CAP_GEN(mdev, cq_period_mode_modify); + params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) && + MLX5_CAP_GEN(mdev, cq_period_mode_modify); + params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe); + params->tx_moder_use_cqe_mode = false; + mlx5e_reset_rx_moderation(¶ms->rx_cq_moderation, params->rx_moder_use_cqe_mode, + params->rx_dim_enabled); + mlx5e_reset_tx_moderation(¶ms->tx_cq_moderation, params->tx_moder_use_cqe_mode, + params->tx_dim_enabled); /* TX inline */ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 309771300581..6477b91ff512 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -838,9 +838,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev) struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_params *params; - u8 cq_period_mode = - mlx5e_dim_cq_period_mode(MLX5_CAP_GEN(mdev, cq_period_start_from_cqe)); - params = &priv->channels.params; params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; @@ -868,7 +865,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev) /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); - mlx5e_set_rx_cq_mode_params(params, cq_period_mode); + params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe); params->mqprio.num_tc = 1; if (rep->vport != MLX5_VPORT_UPLINK) diff --git a/include/linux/mlx5/cq.h b/include/linux/mlx5/cq.h index cb15308b5cb0..991526039ccb 100644 --- a/include/linux/mlx5/cq.h +++ b/include/linux/mlx5/cq.h @@ -95,9 +95,10 @@ enum { }; enum { - MLX5_CQ_MODIFY_PERIOD = 1 << 0, - MLX5_CQ_MODIFY_COUNT = 1 << 1, - MLX5_CQ_MODIFY_OVERRUN = 1 << 2, + MLX5_CQ_MODIFY_PERIOD = BIT(0), + MLX5_CQ_MODIFY_COUNT = BIT(1), + MLX5_CQ_MODIFY_OVERRUN = BIT(2), + MLX5_CQ_MODIFY_PERIOD_MODE = BIT(4), }; enum { diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 8c7ddb22bf20..f468763478ae 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1686,7 +1686,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 cq_oi[0x1]; u8 cq_resize[0x1]; u8 cq_moderation[0x1]; - u8 reserved_at_223[0x3]; + u8 cq_period_mode_modify[0x1]; + u8 reserved_at_224[0x2]; u8 cq_eq_remap[0x1]; u8 pg[0x1]; u8 block_lb_mc[0x1]; -- cgit From 7ab4f16f9e2440e797eae88812f800458e5879d2 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 19 Apr 2024 12:08:39 +0100 Subject: net: extend ubuf_info callback to ops structure We'll need to associate additional callbacks with ubuf_info, introduce a structure holding ubuf_info callbacks. Apart from a more smarter io_uring notification management introduced in next patches, it can be used to generalise msg_zerocopy_put_abort() and also store ->sg_from_iter, which is currently passed in struct msghdr. Reviewed-by: Jens Axboe Reviewed-by: David Ahern Signed-off-by: Pavel Begunkov Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/all/a62015541de49c0e2a8a0377a1d5d0a5aeb07016.1713369317.git.asml.silence@gmail.com Signed-off-by: Jakub Kicinski --- drivers/net/tap.c | 2 +- drivers/net/tun.c | 2 +- drivers/net/xen-netback/common.h | 5 ++--- drivers/net/xen-netback/interface.c | 2 +- drivers/net/xen-netback/netback.c | 11 ++++++++--- drivers/vhost/net.c | 8 ++++++-- include/linux/skbuff.h | 19 +++++++++++-------- io_uring/notif.c | 18 +++++++++++++----- net/core/skbuff.c | 16 ++++++++++------ 9 files changed, 53 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 9f0495e8df4d..bfdd3875fe86 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -754,7 +754,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; - uarg->callback(NULL, uarg, false); + uarg->ops->complete(NULL, uarg, false); } dev_queue_xmit(skb); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 0b3f21cba552..b7401d990680 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1906,7 +1906,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, skb_zcopy_init(skb, msg_control); } else if (msg_control) { struct ubuf_info *uarg = msg_control; - uarg->callback(NULL, uarg, false); + uarg->ops->complete(NULL, uarg, false); } skb_reset_network_header(skb); diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 1fcbd83f7ff2..17421da139f2 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -390,9 +390,8 @@ bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); void xenvif_carrier_on(struct xenvif *vif); -/* Callback from stack when TX packet can be released */ -void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf, - bool zerocopy_success); +/* Callbacks from stack when TX packet can be released */ +extern const struct ubuf_info_ops xenvif_ubuf_ops; static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) { diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7cff90aa8d24..65db5f14465f 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -593,7 +593,7 @@ int xenvif_init_queue(struct xenvif_queue *queue) for (i = 0; i < MAX_PENDING_REQS; i++) { queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc) - { { .callback = xenvif_zerocopy_callback }, + { { .ops = &xenvif_ubuf_ops }, { { .ctx = NULL, .desc = i } } }; queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index ef76850d9bcd..bab7e43ea05b 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1156,7 +1156,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s uarg = skb_shinfo(skb)->destructor_arg; /* increase inflight counter to offset decrement in callback */ atomic_inc(&queue->inflight_packets); - uarg->callback(NULL, uarg, true); + uarg->ops->complete(NULL, uarg, true); skb_shinfo(skb)->destructor_arg = NULL; /* Fill the skb with the new (local) frags. */ @@ -1278,8 +1278,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue) return work_done; } -void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base, - bool zerocopy_success) +static void xenvif_zerocopy_callback(struct sk_buff *skb, + struct ubuf_info *ubuf_base, + bool zerocopy_success) { unsigned long flags; pending_ring_idx_t index; @@ -1312,6 +1313,10 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base, xenvif_skb_zerocopy_complete(queue); } +const struct ubuf_info_ops xenvif_ubuf_ops = { + .complete = xenvif_zerocopy_callback, +}; + static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) { struct gnttab_unmap_grant_ref *gop; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index c64ded183f8d..f16279351db5 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -380,7 +380,7 @@ static void vhost_zerocopy_signal_used(struct vhost_net *net, } } -static void vhost_zerocopy_callback(struct sk_buff *skb, +static void vhost_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *ubuf_base, bool success) { struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base); @@ -408,6 +408,10 @@ static void vhost_zerocopy_callback(struct sk_buff *skb, rcu_read_unlock_bh(); } +static const struct ubuf_info_ops vhost_ubuf_ops = { + .complete = vhost_zerocopy_complete, +}; + static inline unsigned long busy_clock(void) { return local_clock() >> 10; @@ -879,7 +883,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock) vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS; ubuf->ctx = nvq->ubufs; ubuf->desc = nvq->upend_idx; - ubuf->ubuf.callback = vhost_zerocopy_callback; + ubuf->ubuf.ops = &vhost_ubuf_ops; ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG; refcount_set(&ubuf->ubuf.refcnt, 1); msg.msg_control = &ctl; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9d24aec064e8..a110e97e074a 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -527,6 +527,11 @@ enum { #define SKBFL_ALL_ZEROCOPY (SKBFL_ZEROCOPY_FRAG | SKBFL_PURE_ZEROCOPY | \ SKBFL_DONT_ORPHAN | SKBFL_MANAGED_FRAG_REFS) +struct ubuf_info_ops { + void (*complete)(struct sk_buff *, struct ubuf_info *, + bool zerocopy_success); +}; + /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. @@ -536,8 +541,7 @@ enum { * The desc field is used to track userspace buffer index. */ struct ubuf_info { - void (*callback)(struct sk_buff *, struct ubuf_info *, - bool zerocopy_success); + const struct ubuf_info_ops *ops; refcount_t refcnt; u8 flags; }; @@ -1662,14 +1666,13 @@ static inline void skb_set_end_offset(struct sk_buff *skb, unsigned int offset) } #endif +extern const struct ubuf_info_ops msg_zerocopy_ubuf_ops; + struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, struct ubuf_info *uarg); void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref); -void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, - bool success); - int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, struct iov_iter *from, size_t length); @@ -1757,13 +1760,13 @@ static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) static inline void net_zcopy_put(struct ubuf_info *uarg) { if (uarg) - uarg->callback(NULL, uarg, true); + uarg->ops->complete(NULL, uarg, true); } static inline void net_zcopy_put_abort(struct ubuf_info *uarg, bool have_uref) { if (uarg) { - if (uarg->callback == msg_zerocopy_callback) + if (uarg->ops == &msg_zerocopy_ubuf_ops) msg_zerocopy_put_abort(uarg, have_uref); else if (have_uref) net_zcopy_put(uarg); @@ -1777,7 +1780,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy_success) if (uarg) { if (!skb_zcopy_is_nouarg(skb)) - uarg->callback(skb, uarg, zerocopy_success); + uarg->ops->complete(skb, uarg, zerocopy_success); skb_shinfo(skb)->flags &= ~SKBFL_ALL_ZEROCOPY; } diff --git a/io_uring/notif.c b/io_uring/notif.c index d3e703c37aba..204f0fc7d966 100644 --- a/io_uring/notif.c +++ b/io_uring/notif.c @@ -24,7 +24,7 @@ static void io_notif_complete_tw_ext(struct io_kiocb *notif, struct io_tw_state io_req_task_complete(notif, ts); } -static void io_tx_ubuf_callback(struct sk_buff *skb, struct ubuf_info *uarg, +static void io_tx_ubuf_complete(struct sk_buff *skb, struct ubuf_info *uarg, bool success) { struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg); @@ -45,19 +45,27 @@ static void io_tx_ubuf_callback_ext(struct sk_buff *skb, struct ubuf_info *uarg, else if (!success && !nd->zc_copied) WRITE_ONCE(nd->zc_copied, true); } - io_tx_ubuf_callback(skb, uarg, success); + io_tx_ubuf_complete(skb, uarg, success); } +static const struct ubuf_info_ops io_ubuf_ops = { + .complete = io_tx_ubuf_complete, +}; + +static const struct ubuf_info_ops io_ubuf_ops_ext = { + .complete = io_tx_ubuf_callback_ext, +}; + void io_notif_set_extended(struct io_kiocb *notif) { struct io_notif_data *nd = io_notif_to_data(notif); - if (nd->uarg.callback != io_tx_ubuf_callback_ext) { + if (nd->uarg.ops != &io_ubuf_ops_ext) { nd->account_pages = 0; nd->zc_report = false; nd->zc_used = false; nd->zc_copied = false; - nd->uarg.callback = io_tx_ubuf_callback_ext; + nd->uarg.ops = &io_ubuf_ops_ext; notif->io_task_work.func = io_notif_complete_tw_ext; } } @@ -80,7 +88,7 @@ struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx) nd = io_notif_to_data(notif); nd->uarg.flags = IO_NOTIF_UBUF_FLAGS; - nd->uarg.callback = io_tx_ubuf_callback; + nd->uarg.ops = &io_ubuf_ops; refcount_set(&nd->uarg.refcnt, 1); return notif; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b99127712e67..9d36750459f3 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1708,7 +1708,7 @@ static struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size) return NULL; } - uarg->ubuf.callback = msg_zerocopy_callback; + uarg->ubuf.ops = &msg_zerocopy_ubuf_ops; uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1; uarg->len = 1; uarg->bytelen = size; @@ -1734,7 +1734,7 @@ struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size, u32 bytelen, next; /* there might be non MSG_ZEROCOPY users */ - if (uarg->callback != msg_zerocopy_callback) + if (uarg->ops != &msg_zerocopy_ubuf_ops) return NULL; /* realloc only when socket is locked (TCP, UDP cork), @@ -1845,8 +1845,8 @@ release: sock_put(sk); } -void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, - bool success) +static void msg_zerocopy_complete(struct sk_buff *skb, struct ubuf_info *uarg, + bool success) { struct ubuf_info_msgzc *uarg_zc = uarg_to_msgzc(uarg); @@ -1855,7 +1855,6 @@ void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, if (refcount_dec_and_test(&uarg->refcnt)) __msg_zerocopy_callback(uarg_zc); } -EXPORT_SYMBOL_GPL(msg_zerocopy_callback); void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) { @@ -1865,10 +1864,15 @@ void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref) uarg_to_msgzc(uarg)->len--; if (have_uref) - msg_zerocopy_callback(NULL, uarg, true); + msg_zerocopy_complete(NULL, uarg, true); } EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort); +const struct ubuf_info_ops msg_zerocopy_ubuf_ops = { + .complete = msg_zerocopy_complete, +}; +EXPORT_SYMBOL_GPL(msg_zerocopy_ubuf_ops); + int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg) -- cgit From 65bada80dec1f2108a751644773b2120bd789934 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 19 Apr 2024 12:08:40 +0100 Subject: net: add callback for setting a ubuf_info to skb At the moment an skb can only have one ubuf_info associated with it, which might be a performance problem for zerocopy sends in cases like TCP via io_uring. Add a callback for assigning ubuf_info to skb, this way we will implement smarter assignment later like linking ubuf_info together. Note, it's an optional callback, which should be compatible with skb_zcopy_set(), that's because the net stack might potentially decide to clone an skb and take another reference to ubuf_info whenever it wishes. Also, a correct implementation should always be able to bind to an skb without prior ubuf_info, otherwise we could end up in a situation when the send would not be able to progress. Reviewed-by: Jens Axboe Reviewed-by: David Ahern Signed-off-by: Pavel Begunkov Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/all/b7918aadffeb787c84c9e72e34c729dc04f3a45d.1713369317.git.asml.silence@gmail.com Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 2 ++ net/core/skbuff.c | 20 ++++++++++++++------ 2 files changed, 16 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a110e97e074a..ced69f37977f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -530,6 +530,8 @@ enum { struct ubuf_info_ops { void (*complete)(struct sk_buff *, struct ubuf_info *, bool zerocopy_success); + /* has to be compatible with skb_zcopy_set() */ + int (*link_skb)(struct sk_buff *skb, struct ubuf_info *uarg); }; /* diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 9d36750459f3..4a0c10685cd2 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1880,11 +1880,18 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct ubuf_info *orig_uarg = skb_zcopy(skb); int err, orig_len = skb->len; - /* An skb can only point to one uarg. This edge case happens when - * TCP appends to an skb, but zerocopy_realloc triggered a new alloc. - */ - if (orig_uarg && uarg != orig_uarg) - return -EEXIST; + if (uarg->ops->link_skb) { + err = uarg->ops->link_skb(skb, uarg); + if (err) + return err; + } else { + /* An skb can only point to one uarg. This edge case happens + * when TCP appends to an skb, but zerocopy_realloc triggered + * a new alloc. + */ + if (orig_uarg && uarg != orig_uarg) + return -EEXIST; + } err = __zerocopy_sg_from_iter(msg, sk, skb, &msg->msg_iter, len); if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { @@ -1898,7 +1905,8 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, return err; } - skb_zcopy_set(skb, uarg, NULL); + if (!uarg->ops->link_skb) + skb_zcopy_set(skb, uarg, NULL); return skb->len - orig_len; } EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); -- cgit From 89d5d9e9500826cbd3b15ea7b6e8d9fae966f073 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 22 Apr 2024 17:48:50 +0300 Subject: counter: Don't use "proxy" headers Update header inclusions to follow IWYU (Include What You Use) principle. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240422144850.2031076-1-andriy.shevchenko@linux.intel.com Signed-off-by: William Breathitt Gray --- include/linux/counter.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/counter.h b/include/linux/counter.h index cd35d8574ee2..426b7d58a438 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -6,14 +6,15 @@ #ifndef _COUNTER_H_ #define _COUNTER_H_ +#include #include #include -#include #include #include #include #include #include + #include struct counter_device; -- cgit From 96e20adc43c4f81e9163a5188cee75a6dd393e09 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Mon, 22 Apr 2024 09:38:33 +0300 Subject: regulator: change stubbed devm_regulator_get_enable to return Ok The devm_regulator_get_enable() should be a 'call and forget' API, meaning, when it is used to enable the regulators, the API does not provide a handle to do any further control of the regulators. It gives no real benefit to return an error from the stub if CONFIG_REGULATOR is not set. On the contrary, returning and error is causing problems to drivers when hardware is such it works out just fine with no regulator control. Returning an error forces drivers to specifically handle the case where CONFIG_REGULATOR is not set, making the mere existence of the stub questionalble. Furthermore, the stub of the regulator_enable() seems to be returning Ok. Change the stub implementation for the devm_regulator_get_enable() to return Ok so drivers do not separately handle the case where the CONFIG_REGULATOR is not set. Signed-off-by: Matti Vaittinen Reported-by: Aleksander Mazur Suggested-by: Guenter Roeck Fixes: da279e6965b3 ("regulator: Add devm helpers for get and enable") Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/ZiYF6d1V1vSPcsJS@drtxq0yyyyyyyyyyyyyby-3.rev.dnainternet.fi Signed-off-by: Mark Brown --- include/linux/regulator/consumer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 4660582a3302..71232fb7dda3 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -320,7 +320,7 @@ devm_regulator_get_exclusive(struct device *dev, const char *id) static inline int devm_regulator_get_enable(struct device *dev, const char *id) { - return -ENODEV; + return 0; } static inline int devm_regulator_get_enable_optional(struct device *dev, -- cgit From 118005713e35a1893c6ee47ab2926cca277737de Mon Sep 17 00:00:00 2001 From: Sourabh Jain Date: Tue, 26 Mar 2024 11:24:08 +0530 Subject: crash: forward memory_notify arg to arch crash hotplug handler In the event of memory hotplug or online/offline events, the crash memory hotplug notifier `crash_memhp_notifier()` receives a `memory_notify` object but doesn't forward that object to the generic and architecture-specific crash hotplug handler. The `memory_notify` object contains the starting PFN (Page Frame Number) and the number of pages in the hot-removed memory. This information is necessary for architectures like PowerPC to update/recreate the kdump image, specifically `elfcorehdr`. So update the function signature of `crash_handle_hotplug_event()` and `arch_crash_handle_hotplug_event()` to accept the `memory_notify` object as an argument from crash memory hotplug notifier. Since no such object is available in the case of CPU hotplug event, the crash CPU hotplug notifier `crash_cpuhp_online()` passes NULL to the crash hotplug handler. Signed-off-by: Sourabh Jain Acked-by: Baoquan He Acked-by: Hari Bathini Signed-off-by: Michael Ellerman Link: https://msgid.link/20240326055413.186534-2-sourabhjain@linux.ibm.com --- arch/x86/include/asm/kexec.h | 2 +- arch/x86/kernel/crash.c | 4 +++- include/linux/crash_core.h | 2 +- kernel/crash_core.c | 14 +++++++------- 4 files changed, 12 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index 91ca9a9ee3a2..cb1320ebbc23 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -207,7 +207,7 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image); extern void kdump_nmi_shootdown_cpus(void); #ifdef CONFIG_CRASH_HOTPLUG -void arch_crash_handle_hotplug_event(struct kimage *image); +void arch_crash_handle_hotplug_event(struct kimage *image, void *arg); #define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event #ifdef CONFIG_HOTPLUG_CPU diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index e74d0c4286c1..2a682fe86352 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -432,10 +432,12 @@ unsigned int arch_crash_get_elfcorehdr_size(void) /** * arch_crash_handle_hotplug_event() - Handle hotplug elfcorehdr changes * @image: a pointer to kexec_crash_image + * @arg: struct memory_notify handler for memory hotplug case and + * NULL for CPU hotplug case. * * Prepare the new elfcorehdr and replace the existing elfcorehdr. */ -void arch_crash_handle_hotplug_event(struct kimage *image) +void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { void *elfbuf = NULL, *old_elfcorehdr; unsigned long nr_mem_ranges; diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index d33352c2e386..647e928efee8 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -37,7 +37,7 @@ static inline void arch_kexec_unprotect_crashkres(void) { } #ifndef arch_crash_handle_hotplug_event -static inline void arch_crash_handle_hotplug_event(struct kimage *image) { } +static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { } #endif int crash_check_update_elfcorehdr(void); diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 78b5dc7cee3a..70fa8111a9d6 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -534,7 +534,7 @@ int crash_check_update_elfcorehdr(void) * list of segments it checks (since the elfcorehdr changes and thus * would require an update to purgatory itself to update the digest). */ -static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu) +static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, void *arg) { struct kimage *image; @@ -596,7 +596,7 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu) image->hp_action = hp_action; /* Now invoke arch-specific update handler */ - arch_crash_handle_hotplug_event(image); + arch_crash_handle_hotplug_event(image, arg); /* No longer handling a hotplug event */ image->hp_action = KEXEC_CRASH_HP_NONE; @@ -612,17 +612,17 @@ out: crash_hotplug_unlock(); } -static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v) +static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *arg) { switch (val) { case MEM_ONLINE: crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_MEMORY, - KEXEC_CRASH_HP_INVALID_CPU); + KEXEC_CRASH_HP_INVALID_CPU, arg); break; case MEM_OFFLINE: crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_MEMORY, - KEXEC_CRASH_HP_INVALID_CPU); + KEXEC_CRASH_HP_INVALID_CPU, arg); break; } return NOTIFY_OK; @@ -635,13 +635,13 @@ static struct notifier_block crash_memhp_nb = { static int crash_cpuhp_online(unsigned int cpu) { - crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu); + crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu, NULL); return 0; } static int crash_cpuhp_offline(unsigned int cpu) { - crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu); + crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu, NULL); return 0; } -- cgit From 79365026f86948b52c3cb7bf099dded92c559b4c Mon Sep 17 00:00:00 2001 From: Sourabh Jain Date: Tue, 26 Mar 2024 11:24:09 +0530 Subject: crash: add a new kexec flag for hotplug support Commit a72bbec70da2 ("crash: hotplug support for kexec_load()") introduced a new kexec flag, `KEXEC_UPDATE_ELFCOREHDR`. Kexec tool uses this flag to indicate to the kernel that it is safe to modify the elfcorehdr of the kdump image loaded using the kexec_load system call. However, it is possible that architectures may need to update kexec segments other then elfcorehdr. For example, FDT (Flatten Device Tree) on PowerPC. Introducing a new kexec flag for every new kexec segment may not be a good solution. Hence, a generic kexec flag bit, `KEXEC_CRASH_HOTPLUG_SUPPORT`, is introduced to share the CPU/Memory hotplug support intent between the kexec tool and the kernel for the kexec_load system call. Now we have two kexec flags that enables crash hotplug support for kexec_load system call. First is KEXEC_UPDATE_ELFCOREHDR (only used in x86), and second is KEXEC_CRASH_HOTPLUG_SUPPORT (for all architectures). To simplify the process of finding and reporting the crash hotplug support the following changes are introduced. 1. Define arch specific function to process the kexec flags and determine crash hotplug support 2. Rename the @update_elfcorehdr member of struct kimage to @hotplug_support and populate it for both kexec_load and kexec_file_load syscalls, because architecture can update more than one kexec segment 3. Let generic function crash_check_hotplug_support report hotplug support for loaded kdump image based on value of @hotplug_support To bring the x86 crash hotplug support in line with the above points, the following changes have been made: - Introduce the arch_crash_hotplug_support function to process kexec flags and determine crash hotplug support - Remove the arch_crash_hotplug_[cpu|memory]_support functions Signed-off-by: Sourabh Jain Acked-by: Baoquan He Acked-by: Hari Bathini Signed-off-by: Michael Ellerman Link: https://msgid.link/20240326055413.186534-3-sourabhjain@linux.ibm.com --- arch/x86/include/asm/kexec.h | 11 ++--------- arch/x86/kernel/crash.c | 28 +++++++++++++++++----------- drivers/base/cpu.c | 2 +- drivers/base/memory.c | 2 +- include/linux/crash_core.h | 13 ++++++------- include/linux/kexec.h | 11 +++++++---- include/uapi/linux/kexec.h | 1 + kernel/crash_core.c | 15 ++++++--------- kernel/kexec.c | 4 ++-- kernel/kexec_file.c | 5 +++++ 10 files changed, 48 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/kexec.h b/arch/x86/include/asm/kexec.h index cb1320ebbc23..ae5482a2f0ca 100644 --- a/arch/x86/include/asm/kexec.h +++ b/arch/x86/include/asm/kexec.h @@ -210,15 +210,8 @@ extern void kdump_nmi_shootdown_cpus(void); void arch_crash_handle_hotplug_event(struct kimage *image, void *arg); #define arch_crash_handle_hotplug_event arch_crash_handle_hotplug_event -#ifdef CONFIG_HOTPLUG_CPU -int arch_crash_hotplug_cpu_support(void); -#define crash_hotplug_cpu_support arch_crash_hotplug_cpu_support -#endif - -#ifdef CONFIG_MEMORY_HOTPLUG -int arch_crash_hotplug_memory_support(void); -#define crash_hotplug_memory_support arch_crash_hotplug_memory_support -#endif +int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags); +#define arch_crash_hotplug_support arch_crash_hotplug_support unsigned int arch_crash_get_elfcorehdr_size(void); #define crash_get_elfcorehdr_size arch_crash_get_elfcorehdr_size diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 2a682fe86352..f06501445cd9 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -402,20 +402,26 @@ int crash_load_segments(struct kimage *image) #undef pr_fmt #define pr_fmt(fmt) "crash hp: " fmt -/* These functions provide the value for the sysfs crash_hotplug nodes */ -#ifdef CONFIG_HOTPLUG_CPU -int arch_crash_hotplug_cpu_support(void) +int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags) { - return crash_check_update_elfcorehdr(); -} -#endif -#ifdef CONFIG_MEMORY_HOTPLUG -int arch_crash_hotplug_memory_support(void) -{ - return crash_check_update_elfcorehdr(); -} +#ifdef CONFIG_KEXEC_FILE + if (image->file_mode) + return 1; #endif + /* + * Initially, crash hotplug support for kexec_load was added + * with the KEXEC_UPDATE_ELFCOREHDR flag. Later, this + * functionality was expanded to accommodate multiple kexec + * segment updates, leading to the introduction of the + * KEXEC_CRASH_HOTPLUG_SUPPORT kexec flag bit. Consequently, + * when the kexec tool sends either of these flags, it indicates + * that the required kexec segment (elfcorehdr) is excluded from + * the SHA calculation. + */ + return (kexec_flags & KEXEC_UPDATE_ELFCOREHDR || + kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT); +} unsigned int arch_crash_get_elfcorehdr_size(void) { diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 56fba44ba391..c61ecb0c2ae2 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -306,7 +306,7 @@ static ssize_t crash_hotplug_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d\n", crash_hotplug_cpu_support()); + return sysfs_emit(buf, "%d\n", crash_check_hotplug_support()); } static DEVICE_ATTR_ADMIN_RO(crash_hotplug); #endif diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c0436f46cfb7..67858eeb92ed 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -535,7 +535,7 @@ static DEVICE_ATTR_RW(auto_online_blocks); static ssize_t crash_hotplug_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sysfs_emit(buf, "%d\n", crash_hotplug_memory_support()); + return sysfs_emit(buf, "%d\n", crash_check_hotplug_support()); } static DEVICE_ATTR_RO(crash_hotplug); #endif diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h index 647e928efee8..44305336314e 100644 --- a/include/linux/crash_core.h +++ b/include/linux/crash_core.h @@ -40,14 +40,13 @@ static inline void arch_kexec_unprotect_crashkres(void) { } static inline void arch_crash_handle_hotplug_event(struct kimage *image, void *arg) { } #endif -int crash_check_update_elfcorehdr(void); +int crash_check_hotplug_support(void); -#ifndef crash_hotplug_cpu_support -static inline int crash_hotplug_cpu_support(void) { return 0; } -#endif - -#ifndef crash_hotplug_memory_support -static inline int crash_hotplug_memory_support(void) { return 0; } +#ifndef arch_crash_hotplug_support +static inline int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags) +{ + return 0; +} #endif #ifndef crash_get_elfcorehdr_size diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 060835bb82d5..5b93a5767413 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -319,8 +319,10 @@ struct kimage { /* If set, we are using file mode kexec syscall */ unsigned int file_mode:1; #ifdef CONFIG_CRASH_HOTPLUG - /* If set, allow changes to elfcorehdr of kexec_load'd image */ - unsigned int update_elfcorehdr:1; + /* If set, it is safe to update kexec segments that are + * excluded from SHA calculation. + */ + unsigned int hotplug_support:1; #endif #ifdef ARCH_HAS_KIMAGE_ARCH @@ -391,9 +393,10 @@ bool kexec_load_permitted(int kexec_image_type); /* List of defined/legal kexec flags */ #ifndef CONFIG_KEXEC_JUMP -#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR) +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR | KEXEC_CRASH_HOTPLUG_SUPPORT) #else -#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR) +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR | \ + KEXEC_CRASH_HOTPLUG_SUPPORT) #endif /* List of defined/legal kexec file flags */ diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index c17bb096ea68..5ae1741ea8ea 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -13,6 +13,7 @@ #define KEXEC_ON_CRASH 0x00000001 #define KEXEC_PRESERVE_CONTEXT 0x00000002 #define KEXEC_UPDATE_ELFCOREHDR 0x00000004 +#define KEXEC_CRASH_HOTPLUG_SUPPORT 0x00000008 #define KEXEC_ARCH_MASK 0xffff0000 /* diff --git a/kernel/crash_core.c b/kernel/crash_core.c index 70fa8111a9d6..394db3ebe835 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -493,10 +493,10 @@ static DEFINE_MUTEX(__crash_hotplug_lock); /* * This routine utilized when the crash_hotplug sysfs node is read. - * It reflects the kernel's ability/permission to update the crash - * elfcorehdr directly. + * It reflects the kernel's ability/permission to update the kdump + * image directly. */ -int crash_check_update_elfcorehdr(void) +int crash_check_hotplug_support(void) { int rc = 0; @@ -508,10 +508,7 @@ int crash_check_update_elfcorehdr(void) return 0; } if (kexec_crash_image) { - if (kexec_crash_image->file_mode) - rc = 1; - else - rc = kexec_crash_image->update_elfcorehdr; + rc = kexec_crash_image->hotplug_support; } /* Release lock now that update complete */ kexec_unlock(); @@ -552,8 +549,8 @@ static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu, image = kexec_crash_image; - /* Check that updating elfcorehdr is permitted */ - if (!(image->file_mode || image->update_elfcorehdr)) + /* Check that kexec segments update is permitted */ + if (!image->hotplug_support) goto out; if (hp_action == KEXEC_CRASH_HP_ADD_CPU || diff --git a/kernel/kexec.c b/kernel/kexec.c index bab542fc1463..a6b3f96bb50c 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -135,8 +135,8 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments, image->preserve_context = 1; #ifdef CONFIG_CRASH_HOTPLUG - if (flags & KEXEC_UPDATE_ELFCOREHDR) - image->update_elfcorehdr = 1; + if ((flags & KEXEC_ON_CRASH) && arch_crash_hotplug_support(image, flags)) + image->hotplug_support = 1; #endif ret = machine_kexec_prepare(image); diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index 2d1db05fbf04..3d64290d24c9 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -376,6 +376,11 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, if (ret) goto out; +#ifdef CONFIG_CRASH_HOTPLUG + if ((flags & KEXEC_FILE_ON_CRASH) && arch_crash_hotplug_support(image, flags)) + image->hotplug_support = 1; +#endif + ret = machine_kexec_prepare(image); if (ret) goto out; -- cgit From b7c0e1ecee403a43abc89eb3e75672b01ff2ece9 Mon Sep 17 00:00:00 2001 From: Marco Pagani Date: Fri, 19 Apr 2024 10:35:59 +0200 Subject: fpga: region: add owner module and take its refcount The current implementation of the fpga region assumes that the low-level module registers a driver for the parent device and uses its owner pointer to take the module's refcount. This approach is problematic since it can lead to a null pointer dereference while attempting to get the region during programming if the parent device does not have a driver. To address this problem, add a module owner pointer to the fpga_region struct and use it to take the module's refcount. Modify the functions for registering a region to take an additional owner module parameter and rename them to avoid conflicts. Use the old function names for helper macros that automatically set the module that registers the region as the owner. This ensures compatibility with existing low-level control modules and reduces the chances of registering a region without setting the owner. Also, update the documentation to keep it consistent with the new interface for registering an fpga region. Fixes: 0fa20cdfcc1f ("fpga: fpga-region: device tree control for FPGA") Suggested-by: Greg Kroah-Hartman Suggested-by: Xu Yilun Reviewed-by: Russ Weight Signed-off-by: Marco Pagani Acked-by: Xu Yilun Link: https://lore.kernel.org/r/20240419083601.77403-1-marpagan@redhat.com Signed-off-by: Xu Yilun --- Documentation/driver-api/fpga/fpga-region.rst | 13 ++++++++----- drivers/fpga/fpga-region.c | 24 ++++++++++++++---------- include/linux/fpga/fpga-region.h | 13 ++++++++++--- 3 files changed, 32 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst index dc55d60a0b4a..2d03b5fb7657 100644 --- a/Documentation/driver-api/fpga/fpga-region.rst +++ b/Documentation/driver-api/fpga/fpga-region.rst @@ -46,13 +46,16 @@ API to add a new FPGA region ---------------------------- * struct fpga_region - The FPGA region struct -* struct fpga_region_info - Parameter structure for fpga_region_register_full() -* fpga_region_register_full() - Create and register an FPGA region using the +* struct fpga_region_info - Parameter structure for __fpga_region_register_full() +* __fpga_region_register_full() - Create and register an FPGA region using the fpga_region_info structure to provide the full flexibility of options -* fpga_region_register() - Create and register an FPGA region using standard +* __fpga_region_register() - Create and register an FPGA region using standard arguments * fpga_region_unregister() - Unregister an FPGA region +Helper macros ``fpga_region_register()`` and ``fpga_region_register_full()`` +automatically set the module that registers the FPGA region as the owner. + The FPGA region's probe function will need to get a reference to the FPGA Manager it will be using to do the programming. This usually would happen during the region's probe function. @@ -82,10 +85,10 @@ following APIs to handle building or tearing down that list. :functions: fpga_region_info .. kernel-doc:: drivers/fpga/fpga-region.c - :functions: fpga_region_register_full + :functions: __fpga_region_register_full .. kernel-doc:: drivers/fpga/fpga-region.c - :functions: fpga_region_register + :functions: __fpga_region_register .. kernel-doc:: drivers/fpga/fpga-region.c :functions: fpga_region_unregister diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c index b364a929425c..753cd142503e 100644 --- a/drivers/fpga/fpga-region.c +++ b/drivers/fpga/fpga-region.c @@ -53,7 +53,7 @@ static struct fpga_region *fpga_region_get(struct fpga_region *region) } get_device(dev); - if (!try_module_get(dev->parent->driver->owner)) { + if (!try_module_get(region->ops_owner)) { put_device(dev); mutex_unlock(®ion->mutex); return ERR_PTR(-ENODEV); @@ -75,7 +75,7 @@ static void fpga_region_put(struct fpga_region *region) dev_dbg(dev, "put\n"); - module_put(dev->parent->driver->owner); + module_put(region->ops_owner); put_device(dev); mutex_unlock(®ion->mutex); } @@ -181,14 +181,16 @@ static struct attribute *fpga_region_attrs[] = { ATTRIBUTE_GROUPS(fpga_region); /** - * fpga_region_register_full - create and register an FPGA Region device + * __fpga_region_register_full - create and register an FPGA Region device * @parent: device parent * @info: parameters for FPGA Region + * @owner: module containing the get_bridges function * * Return: struct fpga_region or ERR_PTR() */ struct fpga_region * -fpga_region_register_full(struct device *parent, const struct fpga_region_info *info) +__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info, + struct module *owner) { struct fpga_region *region; int id, ret = 0; @@ -213,6 +215,7 @@ fpga_region_register_full(struct device *parent, const struct fpga_region_info * region->compat_id = info->compat_id; region->priv = info->priv; region->get_bridges = info->get_bridges; + region->ops_owner = owner; mutex_init(®ion->mutex); INIT_LIST_HEAD(®ion->bridge_list); @@ -241,13 +244,14 @@ err_free: return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(fpga_region_register_full); +EXPORT_SYMBOL_GPL(__fpga_region_register_full); /** - * fpga_region_register - create and register an FPGA Region device + * __fpga_region_register - create and register an FPGA Region device * @parent: device parent * @mgr: manager that programs this region * @get_bridges: optional function to get bridges to a list + * @owner: module containing the get_bridges function * * This simple version of the register function should be sufficient for most users. * The fpga_region_register_full() function is available for users that need to @@ -256,17 +260,17 @@ EXPORT_SYMBOL_GPL(fpga_region_register_full); * Return: struct fpga_region or ERR_PTR() */ struct fpga_region * -fpga_region_register(struct device *parent, struct fpga_manager *mgr, - int (*get_bridges)(struct fpga_region *)) +__fpga_region_register(struct device *parent, struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *), struct module *owner) { struct fpga_region_info info = { 0 }; info.mgr = mgr; info.get_bridges = get_bridges; - return fpga_region_register_full(parent, &info); + return __fpga_region_register_full(parent, &info, owner); } -EXPORT_SYMBOL_GPL(fpga_region_register); +EXPORT_SYMBOL_GPL(__fpga_region_register); /** * fpga_region_unregister - unregister an FPGA region diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h index 9d4d32909340..5fbc05fe70a6 100644 --- a/include/linux/fpga/fpga-region.h +++ b/include/linux/fpga/fpga-region.h @@ -36,6 +36,7 @@ struct fpga_region_info { * @mgr: FPGA manager * @info: FPGA image info * @compat_id: FPGA region id for compatibility check. + * @ops_owner: module containing the get_bridges function * @priv: private data * @get_bridges: optional function to get bridges to a list */ @@ -46,6 +47,7 @@ struct fpga_region { struct fpga_manager *mgr; struct fpga_image_info *info; struct fpga_compat_id *compat_id; + struct module *ops_owner; void *priv; int (*get_bridges)(struct fpga_region *region); }; @@ -58,12 +60,17 @@ fpga_region_class_find(struct device *start, const void *data, int fpga_region_program_fpga(struct fpga_region *region); +#define fpga_region_register_full(parent, info) \ + __fpga_region_register_full(parent, info, THIS_MODULE) struct fpga_region * -fpga_region_register_full(struct device *parent, const struct fpga_region_info *info); +__fpga_region_register_full(struct device *parent, const struct fpga_region_info *info, + struct module *owner); +#define fpga_region_register(parent, mgr, get_bridges) \ + __fpga_region_register(parent, mgr, get_bridges, THIS_MODULE) struct fpga_region * -fpga_region_register(struct device *parent, struct fpga_manager *mgr, - int (*get_bridges)(struct fpga_region *)); +__fpga_region_register(struct device *parent, struct fpga_manager *mgr, + int (*get_bridges)(struct fpga_region *), struct module *owner); void fpga_region_unregister(struct fpga_region *region); #endif /* _FPGA_REGION_H */ -- cgit From ade17653411284e8f7d07279097039f8b84f85d3 Mon Sep 17 00:00:00 2001 From: Chun-Kuang Hu Date: Thu, 22 Feb 2024 15:41:10 +0000 Subject: soc: mediatek: cmdq: Add parameter shift_pa to cmdq_pkt_jump() In original design, cmdq_pkt_jump() call cmdq_get_shift_pa() every time to get shift_pa. But the shift_pa is constant value for each SoC, so client driver just need to call cmdq_get_shift_pa() once and pass shift_pa to cmdq_pkt_jump() to prevent frequent function call. Signed-off-by: Chun-Kuang Hu Link: https://lore.kernel.org/r/20240222154120.16959-3-chunkuang.hu@kernel.org Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 5 ++--- include/linux/soc/mediatek/mtk-cmdq.h | 6 ++++-- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 0b2e5690dacf..3380e56dd69b 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -393,14 +393,13 @@ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) } EXPORT_SYMBOL(cmdq_pkt_assign); -int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr) +int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) { struct cmdq_instruction inst = {}; inst.op = CMDQ_CODE_JUMP; inst.offset = CMDQ_JUMP_ABSOLUTE; - inst.value = addr >> - cmdq_get_shift_pa(((struct cmdq_client *)pkt->cl)->chan); + inst.value = addr >> shift_pa; return cmdq_pkt_append_command(pkt, inst); } EXPORT_SYMBOL(cmdq_pkt_jump); diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 649955d2cf5c..72adfd867cd9 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -253,10 +253,12 @@ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); * a physical address which should contains more instruction. * @pkt: the CMDQ packet * @addr: physical address of target instruction buffer + * @shift_pa: shift bits of physical address in CMDQ instruction. This value + * is got by cmdq_get_shift_pa(). * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr); +int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa); /** * cmdq_pkt_finalize() - Append EOC and jump command to pkt. @@ -374,7 +376,7 @@ static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) return -EINVAL; } -static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr) +static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) { return -EINVAL; } -- cgit From 7218be3b6fcf00d1a3a6867aa1be6b9ab91cce45 Mon Sep 17 00:00:00 2001 From: Chun-Kuang Hu Date: Thu, 22 Feb 2024 15:41:11 +0000 Subject: soc: mediatek: cmdq: Rename cmdq_pkt_jump() to cmdq_pkt_jump_abs() In order to distinguish absolute jump and relative jump, cmdq_pkt_jump() append absolute jump command, so rename it to cmdq_pkt_jump_abs(). Signed-off-by: Chun-Kuang Hu Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20240222154120.16959-4-chunkuang.hu@kernel.org [Angelo: Added temporary wrapper to avoid build breakage] Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 4 ++-- include/linux/soc/mediatek/mtk-cmdq.h | 22 +++++++++++++++++----- 2 files changed, 19 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 3380e56dd69b..38d9077725d2 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -393,7 +393,7 @@ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) } EXPORT_SYMBOL(cmdq_pkt_assign); -int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) +int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) { struct cmdq_instruction inst = {}; @@ -402,7 +402,7 @@ int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) inst.value = addr >> shift_pa; return cmdq_pkt_append_command(pkt, inst); } -EXPORT_SYMBOL(cmdq_pkt_jump); +EXPORT_SYMBOL(cmdq_pkt_jump_abs); int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 72adfd867cd9..961f4c521067 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -248,17 +248,24 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); /** - * cmdq_pkt_jump() - Append jump command to the CMDQ packet, ask GCE - * to execute an instruction that change current thread PC to - * a physical address which should contains more instruction. + * cmdq_pkt_jump_abs() - Append jump command to the CMDQ packet, ask GCE + * to execute an instruction that change current thread + * PC to a absolute physical address which should + * contains more instruction. * @pkt: the CMDQ packet - * @addr: physical address of target instruction buffer + * @addr: absolute physical address of target instruction buffer * @shift_pa: shift bits of physical address in CMDQ instruction. This value * is got by cmdq_get_shift_pa(). * * Return: 0 for success; else the error code is returned */ -int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa); +int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa); + +/* This wrapper has to be removed after all users migrated to jump_abs */ +static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) +{ + return cmdq_pkt_jump_abs(pkt, addr, shift_pa); +} /** * cmdq_pkt_finalize() - Append EOC and jump command to pkt. @@ -376,6 +383,11 @@ static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) return -EINVAL; } +static inline int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) +{ + return -EINVAL; +} + static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) { return -EINVAL; -- cgit From 698cdcb19579f9de005698f9dfb660ab6ec65d32 Mon Sep 17 00:00:00 2001 From: Chun-Kuang Hu Date: Thu, 22 Feb 2024 15:41:12 +0000 Subject: soc: mediatek: cmdq: Add cmdq_pkt_jump_rel() helper function cmdq_pkt_jump_rel() append relative jump command to the packet. Relative jump change PC to the target address with offset from current PC. Signed-off-by: Chun-Kuang Hu Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20240222154120.16959-5-chunkuang.hu@kernel.org Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 10 ++++++++++ include/linux/soc/mediatek/mtk-cmdq.h | 19 +++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 38d9077725d2..678db09983d4 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -404,6 +404,16 @@ int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) } EXPORT_SYMBOL(cmdq_pkt_jump_abs); +int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa) +{ + struct cmdq_instruction inst = { {0} }; + + inst.op = CMDQ_CODE_JUMP; + inst.value = (u32)offset >> shift_pa; + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_jump_rel); + int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { struct cmdq_instruction inst = { {0} }; diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 961f4c521067..cadb6a157607 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -267,6 +267,20 @@ static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_ return cmdq_pkt_jump_abs(pkt, addr, shift_pa); } +/** + * cmdq_pkt_jump_rel() - Append jump command to the CMDQ packet, ask GCE + * to execute an instruction that change current thread + * PC to a physical address with relative offset. The + * target address should contains more instruction. + * @pkt: the CMDQ packet + * @offset: relative offset of target instruction buffer from current PC. + * @shift_pa: shift bits of physical address in CMDQ instruction. This value + * is got by cmdq_get_shift_pa(). + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa); + /** * cmdq_pkt_finalize() - Append EOC and jump command to pkt. * @pkt: the CMDQ packet @@ -393,6 +407,11 @@ static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_ return -EINVAL; } +static inline int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa) +{ + return -EINVAL; +} + static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { return -EINVAL; -- cgit From 3d86ced95d8c09d11c172f542edd40f58a4098fa Mon Sep 17 00:00:00 2001 From: Chun-Kuang Hu Date: Thu, 22 Feb 2024 15:41:13 +0000 Subject: soc: mediatek: cmdq: Add cmdq_pkt_eoc() helper function cmdq_pkt_eoc() append eoc command to CMDQ packet. eoc command would ask GCE to generate IRQ. It's usually appended to the end of packet to notify all command in the packet is done. Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Chun-Kuang Hu Link: https://lore.kernel.org/r/20240222154120.16959-6-chunkuang.hu@kernel.org Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 10 ++++++++++ include/linux/soc/mediatek/mtk-cmdq.h | 19 +++++++++++++++++++ 2 files changed, 29 insertions(+) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 678db09983d4..766dbafaef62 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -414,6 +414,16 @@ int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa) } EXPORT_SYMBOL(cmdq_pkt_jump_rel); +int cmdq_pkt_eoc(struct cmdq_pkt *pkt) +{ + struct cmdq_instruction inst = { {0} }; + + inst.op = CMDQ_CODE_EOC; + inst.value = CMDQ_EOC_IRQ_EN; + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_eoc); + int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { struct cmdq_instruction inst = { {0} }; diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index cadb6a157607..921d631361d2 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -281,6 +281,20 @@ static inline int cmdq_pkt_jump(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_ */ int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_pa); +/** + * cmdq_pkt_eoc() - Append EOC and ask GCE to generate an IRQ at end of execution + * @pkt: The CMDQ packet + * + * Appends an End Of Code (EOC) command to the CMDQ packet and asks the GCE + * to generate an interrupt at the end of the execution of all commands in + * the pipeline. + * The EOC command is usually appended to the end of the pipeline to notify + * that all commands are done. + * + * Return: 0 for success or negative error number + */ +int cmdq_pkt_eoc(struct cmdq_pkt *pkt); + /** * cmdq_pkt_finalize() - Append EOC and jump command to pkt. * @pkt: the CMDQ packet @@ -412,6 +426,11 @@ static inline int cmdq_pkt_jump_rel(struct cmdq_pkt *pkt, s32 offset, u8 shift_p return -EINVAL; } +static inline int cmdq_pkt_eoc(struct cmdq_pkt *pkt) +{ + return -EINVAL; +} + static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt) { return -EINVAL; -- cgit From 9935af96a72735e8e86aaaf06b031b014109ebc1 Mon Sep 17 00:00:00 2001 From: Chun-Kuang Hu Date: Thu, 22 Feb 2024 15:41:14 +0000 Subject: soc: mediatek: cmdq: Remove cmdq_pkt_flush_async() helper function cmdq_pkt_flush_async() is not used by all client drivers (MediaTek drm driver and MediaTek mdp3 driver), so remove it. Signed-off-by: Chun-Kuang Hu Link: https://lore.kernel.org/r/20240222154120.16959-7-chunkuang.hu@kernel.org Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 15 --------------- include/linux/soc/mediatek/mtk-cmdq.h | 18 ------------------ 2 files changed, 33 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 766dbafaef62..bbe41302210a 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -446,19 +446,4 @@ int cmdq_pkt_finalize(struct cmdq_pkt *pkt) } EXPORT_SYMBOL(cmdq_pkt_finalize); -int cmdq_pkt_flush_async(struct cmdq_pkt *pkt) -{ - int err; - struct cmdq_client *client = (struct cmdq_client *)pkt->cl; - - err = mbox_send_message(client->chan, pkt); - if (err < 0) - return err; - /* We can send next packet immediately, so just call txdone. */ - mbox_client_txdone(client->chan, 0); - - return 0; -} -EXPORT_SYMBOL(cmdq_pkt_flush_async); - MODULE_LICENSE("GPL v2"); diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 921d631361d2..e79dc8c4ad41 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -303,19 +303,6 @@ int cmdq_pkt_eoc(struct cmdq_pkt *pkt); */ int cmdq_pkt_finalize(struct cmdq_pkt *pkt); -/** - * cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ - * packet and call back at the end of done packet - * @pkt: the CMDQ packet - * - * Return: 0 for success; else the error code is returned - * - * Trigger CMDQ to asynchronously execute the CMDQ packet and call back - * at the end of done packet. Note that this is an ASYNC function. When the - * function returned, it may or may not be finished. - */ -int cmdq_pkt_flush_async(struct cmdq_pkt *pkt); - #else /* IS_ENABLED(CONFIG_MTK_CMDQ) */ static inline int cmdq_dev_get_client_reg(struct device *dev, @@ -436,11 +423,6 @@ static inline int cmdq_pkt_finalize(struct cmdq_pkt *pkt) return -EINVAL; } -static inline int cmdq_pkt_flush_async(struct cmdq_pkt *pkt) -{ - return -EINVAL; -} - #endif /* IS_ENABLED(CONFIG_MTK_CMDQ) */ #endif /* __MTK_CMDQ_H__ */ -- cgit From b81b2d5534fc72f70d808c72b19f0e3f9df32a90 Mon Sep 17 00:00:00 2001 From: Chun-Kuang Hu Date: Thu, 22 Feb 2024 15:41:15 +0000 Subject: soc: mediatek: cmdq: Refine cmdq_pkt_create() and cmdq_pkt_destroy() cmdq_pkt_create() and cmdq_pkt_destroy() is not suitable for client drivers so each client driver has implement its own function. This refinement would pass struct cmdq_pkt pointer into cmdq_pkt_create(). In addition, client driver has the struct cmdq_client information, so it's not necessary to store this information in struct cmdq_pkt. After this refinement, client drivers could use these helper funciton instead of implementing its own version. Signed-off-by: Chun-Kuang Hu Link: https://lore.kernel.org/r/20240222154120.16959-8-chunkuang.hu@kernel.org Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 24 +++++++----------------- include/linux/soc/mediatek/mtk-cmdq.h | 14 ++++++++------ 2 files changed, 15 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index bbe41302210a..1a1f1b260a06 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -106,22 +106,16 @@ void cmdq_mbox_destroy(struct cmdq_client *client) } EXPORT_SYMBOL(cmdq_mbox_destroy); -struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size) +int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size) { - struct cmdq_pkt *pkt; struct device *dev; dma_addr_t dma_addr; - pkt = kzalloc(sizeof(*pkt), GFP_KERNEL); - if (!pkt) - return ERR_PTR(-ENOMEM); pkt->va_base = kzalloc(size, GFP_KERNEL); - if (!pkt->va_base) { - kfree(pkt); - return ERR_PTR(-ENOMEM); - } + if (!pkt->va_base) + return -ENOMEM; + pkt->buf_size = size; - pkt->cl = (void *)client; dev = client->chan->mbox->dev; dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, @@ -129,24 +123,20 @@ struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size) if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size); kfree(pkt->va_base); - kfree(pkt); - return ERR_PTR(-ENOMEM); + return -ENOMEM; } pkt->pa_base = dma_addr; - return pkt; + return 0; } EXPORT_SYMBOL(cmdq_pkt_create); -void cmdq_pkt_destroy(struct cmdq_pkt *pkt) +void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt) { - struct cmdq_client *client = (struct cmdq_client *)pkt->cl; - dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, DMA_TO_DEVICE); kfree(pkt->va_base); - kfree(pkt); } EXPORT_SYMBOL(cmdq_pkt_destroy); diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index e79dc8c4ad41..e5335fb22a3b 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -62,17 +62,19 @@ void cmdq_mbox_destroy(struct cmdq_client *client); /** * cmdq_pkt_create() - create a CMDQ packet * @client: the CMDQ mailbox client + * @pkt: the CMDQ packet * @size: required CMDQ buffer size * - * Return: CMDQ packet pointer + * Return: 0 for success; else the error code is returned */ -struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size); +int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size); /** * cmdq_pkt_destroy() - destroy the CMDQ packet + * @client: the CMDQ mailbox client * @pkt: the CMDQ packet */ -void cmdq_pkt_destroy(struct cmdq_pkt *pkt); +void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt); /** * cmdq_pkt_write() - append write command to the CMDQ packet @@ -318,12 +320,12 @@ static inline struct cmdq_client *cmdq_mbox_create(struct device *dev, int index static inline void cmdq_mbox_destroy(struct cmdq_client *client) { } -static inline struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size) +static inline int cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt, size_t size) { - return ERR_PTR(-EINVAL); + return -EINVAL; } -static inline void cmdq_pkt_destroy(struct cmdq_pkt *pkt) { } +static inline void cmdq_pkt_destroy(struct cmdq_client *client, struct cmdq_pkt *pkt) { } static inline int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value) { -- cgit From 49ddaa495f8b60c8c4022ebe540024dd870e4dd1 Mon Sep 17 00:00:00 2001 From: "Jason-JH.Lin" Date: Thu, 7 Mar 2024 09:34:55 +0800 Subject: soc: mediatek: mtk-cmdq: Add specific purpose register definitions for GCE Add specific purpose register definitions for GCE, so CMDQ users can use them as a buffer to store data. Signed-off-by: Jason-JH.Lin Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20240307013458.23550-2-jason-jh.lin@mediatek.com Signed-off-by: AngeloGioacchino Del Regno --- include/linux/soc/mediatek/mtk-cmdq.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/linux') diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index e5335fb22a3b..042c61f29dc1 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -14,6 +14,15 @@ #define CMDQ_ADDR_HIGH(addr) ((u32)(((addr) >> 16) & GENMASK(31, 0))) #define CMDQ_ADDR_LOW(addr) ((u16)(addr) | BIT(1)) +/* + * Every cmdq thread has its own SPRs (Specific Purpose Registers), + * so there are 4 * N (threads) SPRs in GCE that shares the same indexes below. + */ +#define CMDQ_THR_SPR_IDX0 (0) +#define CMDQ_THR_SPR_IDX1 (1) +#define CMDQ_THR_SPR_IDX2 (2) +#define CMDQ_THR_SPR_IDX3 (3) + struct cmdq_pkt; struct cmdq_client_reg { -- cgit From 263801f8e5a159cff2dfbc2a1d9cf9f1afecfa81 Mon Sep 17 00:00:00 2001 From: "Jason-JH.Lin" Date: Thu, 7 Mar 2024 09:34:56 +0800 Subject: soc: mediatek: mtk-cmdq: Add cmdq_pkt_mem_move() function Add cmdq_pkt_mem_move() function to support CMDQ user making an instruction for moving a value from a source address to a destination address. Signed-off-by: Jason-JH.Lin Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20240307013458.23550-3-jason-jh.lin@mediatek.com Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 26 ++++++++++++++++++++++++++ include/linux/soc/mediatek/mtk-cmdq.h | 12 ++++++++++++ 2 files changed, 38 insertions(+) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 1a1f1b260a06..4cd1a25866a9 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -290,6 +290,32 @@ int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx, } EXPORT_SYMBOL(cmdq_pkt_write_s_mask_value); +int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr) +{ + const u16 high_addr_reg_idx = CMDQ_THR_SPR_IDX0; + const u16 value_reg_idx = CMDQ_THR_SPR_IDX1; + int ret; + + /* read the value of src_addr into high_addr_reg_idx */ + ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(src_addr)); + if (ret < 0) + return ret; + ret = cmdq_pkt_read_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(src_addr), value_reg_idx); + if (ret < 0) + return ret; + + /* write the value of value_reg_idx into dst_addr */ + ret = cmdq_pkt_assign(pkt, high_addr_reg_idx, CMDQ_ADDR_HIGH(dst_addr)); + if (ret < 0) + return ret; + ret = cmdq_pkt_write_s(pkt, high_addr_reg_idx, CMDQ_ADDR_LOW(dst_addr), value_reg_idx); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(cmdq_pkt_mem_move); + int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear) { struct cmdq_instruction inst = { {0} }; diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 042c61f29dc1..167c8fecac30 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -184,6 +184,18 @@ int cmdq_pkt_write_s_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx, int cmdq_pkt_write_s_mask_value(struct cmdq_pkt *pkt, u8 high_addr_reg_idx, u16 addr_low, u32 value, u32 mask); +/** + * cmdq_pkt_mem_move() - append memory move command to the CMDQ packet + * @pkt: the CMDQ packet + * @src_addr: source address + * @dst_addr: destination address + * + * Appends a CMDQ command to copy the value found in `src_addr` to `dst_addr`. + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_addr); + /** * cmdq_pkt_wfe() - append wait for event command to the CMDQ packet * @pkt: the CMDQ packet -- cgit From 400e2fa8058e7032974a892c7336b3ea20e24cfd Mon Sep 17 00:00:00 2001 From: "Jason-JH.Lin" Date: Thu, 7 Mar 2024 09:34:57 +0800 Subject: soc: mediatek: mtk-cmdq: Add cmdq_pkt_poll_addr() function Add cmdq_pkt_poll_addr function to support CMDQ user making an instruction for polling a specific address of hardware rigster to check the value with or without mask. POLL is a legacy operation in GCE, so it does not support SPR and CMDQ_CODE_LOGIC. To support polling the register address which doesn't have the subsys id, CMDQ users need to make an instruction with GPR and CMDQ_CODE_MASK operation to move the register address to be poll into GPR. Then users can make an POLL instruction with GPR to poll the register address assigned in previous instruction. Signed-off-by: Jason-JH.Lin Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20240307013458.23550-4-jason-jh.lin@mediatek.com Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 49 ++++++++++++++++++++++++++++++++++ include/linux/soc/mediatek/mtk-cmdq.h | 21 +++++++++++++++ 2 files changed, 70 insertions(+) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index 4cd1a25866a9..ee55992f6491 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -12,6 +12,8 @@ #define CMDQ_WRITE_ENABLE_MASK BIT(0) #define CMDQ_POLL_ENABLE_MASK BIT(0) +/* dedicate the last GPR_R15 to assign the register address to be poll */ +#define CMDQ_POLL_ADDR_GPR (15) #define CMDQ_EOC_IRQ_EN BIT(0) #define CMDQ_REG_TYPE 1 #define CMDQ_JUMP_RELATIVE 0 @@ -397,6 +399,53 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, } EXPORT_SYMBOL(cmdq_pkt_poll_mask); +int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask) +{ + struct cmdq_instruction inst = { {0} }; + u8 use_mask = 0; + int ret; + + /* + * Append an MASK instruction to set the mask for following POLL instruction + * which enables use_mask bit. + */ + if (mask != GENMASK(31, 0)) { + inst.op = CMDQ_CODE_MASK; + inst.mask = ~mask; + ret = cmdq_pkt_append_command(pkt, inst); + if (ret < 0) + return ret; + use_mask = CMDQ_POLL_ENABLE_MASK; + } + + /* + * POLL is an legacy operation in GCE and it does not support SPR and CMDQ_CODE_LOGIC, + * so it can not use cmdq_pkt_assign to keep polling register address to SPR. + * If user wants to poll a register address which doesn't have a subsys id, + * user needs to use GPR and CMDQ_CODE_MASK to move polling register address to GPR. + */ + inst.op = CMDQ_CODE_MASK; + inst.dst_t = CMDQ_REG_TYPE; + inst.sop = CMDQ_POLL_ADDR_GPR; + inst.value = addr; + ret = cmdq_pkt_append_command(pkt, inst); + if (ret < 0) + return ret; + + /* Append POLL instruction to poll the register address assign to GPR previously. */ + inst.op = CMDQ_CODE_POLL; + inst.dst_t = CMDQ_REG_TYPE; + inst.sop = CMDQ_POLL_ADDR_GPR; + inst.offset = use_mask; + inst.value = value; + ret = cmdq_pkt_append_command(pkt, inst); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(cmdq_pkt_poll_addr); + int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) { struct cmdq_instruction inst = {}; diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index 167c8fecac30..f59f42e46ef1 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -270,6 +270,22 @@ int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys, */ int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value); +/** + * cmdq_pkt_poll_addr() - Append blocking POLL command to CMDQ packet + * @pkt: the CMDQ packet + * @addr: the hardware register address + * @value: the specified target register value + * @mask: the specified target register mask + * + * Appends a polling (POLL) command to the CMDQ packet and asks the GCE + * to execute an instruction that checks for the specified `value` (with + * or without `mask`) to appear in the specified hardware register `addr`. + * All GCE threads will be blocked by this instruction. + * + * Return: 0 for success or negative error code + */ +int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask); + /** * cmdq_pkt_jump_abs() - Append jump command to the CMDQ packet, ask GCE * to execute an instruction that change current thread @@ -421,6 +437,11 @@ static inline int cmdq_pkt_assign(struct cmdq_pkt *pkt, u16 reg_idx, u32 value) return -EINVAL; } +static inline int cmdq_pkt_poll_addr(struct cmdq_pkt *pkt, dma_addr_t addr, u32 value, u32 mask) +{ + return -EINVAL; +} + static inline int cmdq_pkt_jump_abs(struct cmdq_pkt *pkt, dma_addr_t addr, u8 shift_pa) { return -EINVAL; -- cgit From 69ff68332dc5005539ac37c5c85444aaaec7c914 Mon Sep 17 00:00:00 2001 From: "Jason-JH.Lin" Date: Thu, 7 Mar 2024 09:34:58 +0800 Subject: soc: mediatek: mtk-cmdq: Add cmdq_pkt_acquire_event() function Add cmdq_pkt_acquire_event() function to support CMDQ user making an instruction for acquiring event. CMDQ users can use cmdq_pkt_acquire_event() as `mutex_lock` and cmdq_pkt_clear_event() as `mutex_unlock` to protect the global resource modified instructions between them. cmdq_pkt_acquire_event() would wait for event to be cleared. After event is cleared by cmdq_pkt_clear_event() in other GCE threads, cmdq_pkt_acquire_event() would set event and keep executing next instruction. So the mutex would work like this: cmdq_pkt_acquire_event() /* mutex lock */ /* critical secton instructions that modified global resource */ cmdq_pkt_clear_event() /* mutex unlock */ Prevent the critical section instructions from being affected by other GCE threads. Signed-off-by: Jason-JH.Lin Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20240307013458.23550-5-jason-jh.lin@mediatek.com Signed-off-by: AngeloGioacchino Del Regno --- drivers/soc/mediatek/mtk-cmdq-helper.c | 15 +++++++++++++++ include/linux/soc/mediatek/mtk-cmdq.h | 15 +++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'include/linux') diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c index ee55992f6491..d1ca4b21587e 100644 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c @@ -334,6 +334,21 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear) } EXPORT_SYMBOL(cmdq_pkt_wfe); +int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event) +{ + struct cmdq_instruction inst = {}; + + if (event >= CMDQ_MAX_EVENT) + return -EINVAL; + + inst.op = CMDQ_CODE_WFE; + inst.value = CMDQ_WFE_UPDATE | CMDQ_WFE_UPDATE_VALUE | CMDQ_WFE_WAIT; + inst.event = event; + + return cmdq_pkt_append_command(pkt, inst); +} +EXPORT_SYMBOL(cmdq_pkt_acquire_event); + int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event) { struct cmdq_instruction inst = { {0} }; diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h index f59f42e46ef1..d4a8e34505e6 100644 --- a/include/linux/soc/mediatek/mtk-cmdq.h +++ b/include/linux/soc/mediatek/mtk-cmdq.h @@ -206,6 +206,21 @@ int cmdq_pkt_mem_move(struct cmdq_pkt *pkt, dma_addr_t src_addr, dma_addr_t dst_ */ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event, bool clear); +/** + * cmdq_pkt_acquire_event() - append acquire event command to the CMDQ packet + * @pkt: the CMDQ packet + * @event: the desired event to be acquired + * + * User can use cmdq_pkt_acquire_event() as `mutex_lock` and cmdq_pkt_clear_event() + * as `mutex_unlock` to protect some `critical section` instructions between them. + * cmdq_pkt_acquire_event() would wait for event to be cleared. + * After event is cleared by cmdq_pkt_clear_event in other GCE threads, + * cmdq_pkt_acquire_event() would set event and keep executing next instruction. + * + * Return: 0 for success; else the error code is returned + */ +int cmdq_pkt_acquire_event(struct cmdq_pkt *pkt, u16 event); + /** * cmdq_pkt_clear_event() - append clear event command to the CMDQ packet * @pkt: the CMDQ packet -- cgit From a8f59e5a5deaf3e99a8b7252e96cee9af67858a9 Mon Sep 17 00:00:00 2001 From: Damien Le Moal Date: Sat, 20 Apr 2024 16:58:11 +0900 Subject: block: use a per disk workqueue for zone write plugging A zone write plug BIO work function blk_zone_wplug_bio_work() calls submit_bio_noacct_nocheck() to execute the next unplugged BIO. This function may block. So executing zone plugs BIO works using the block layer global kblockd workqueue can potentially lead to preformance or latency issues as the number of concurrent work for a workqueue is limited to WQ_DFL_ACTIVE (256). 1) For a system with a large number of zoned disks, issuing write requests to otherwise unused zones may be delayed wiating for a work thread to become available. 2) Requeue operations which use kblockd but are independent of zone write plugging may alsoi end up being delayed. To avoid these potential performance issues, create a workqueue per zoned device to execute zone plugs BIO work. The workqueue max active parameter is set to the maximum number of zone write plugs allocated with the zone write plug mempool. This limit is equal to the maximum number of open zones of the disk and defaults to 128 for disks that do not have a limit on the number of open zones. Fixes: dd291d77cc90 ("block: Introduce zone write plugging") Signed-off-by: Damien Le Moal Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240420075811.1276893-3-dlemoal@kernel.org Signed-off-by: Jens Axboe --- block/blk-zoned.c | 32 ++++++++++++++++++++++++-------- include/linux/blkdev.h | 1 + 2 files changed, 25 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 3befebe6b319..3a796420f240 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -1129,7 +1129,7 @@ static void disk_zone_wplug_unplug_bio(struct gendisk *disk, /* Schedule submission of the next plugged BIO if we have one. */ if (!bio_list_empty(&zwplug->bio_list)) { spin_unlock_irqrestore(&zwplug->lock, flags); - kblockd_schedule_work(&zwplug->bio_work); + queue_work(disk->zone_wplugs_wq, &zwplug->bio_work); return; } @@ -1332,7 +1332,7 @@ static void disk_zone_wplug_handle_error(struct gendisk *disk, /* Restart BIO submission if we still have any BIO left. */ if (!bio_list_empty(&zwplug->bio_list)) { WARN_ON_ONCE(!(zwplug->flags & BLK_ZONE_WPLUG_PLUGGED)); - kblockd_schedule_work(&zwplug->bio_work); + queue_work(disk->zone_wplugs_wq, &zwplug->bio_work); goto unlock; } @@ -1409,14 +1409,25 @@ static int disk_alloc_zone_resources(struct gendisk *disk, disk->zone_wplugs_pool = mempool_create_kmalloc_pool(pool_size, sizeof(struct blk_zone_wplug)); - if (!disk->zone_wplugs_pool) { - kfree(disk->zone_wplugs_hash); - disk->zone_wplugs_hash = NULL; - disk->zone_wplugs_hash_bits = 0; - return -ENOMEM; - } + if (!disk->zone_wplugs_pool) + goto free_hash; + + disk->zone_wplugs_wq = + alloc_workqueue("%s_zwplugs", WQ_MEM_RECLAIM | WQ_HIGHPRI, + pool_size, disk->disk_name); + if (!disk->zone_wplugs_wq) + goto destroy_pool; return 0; + +destroy_pool: + mempool_destroy(disk->zone_wplugs_pool); + disk->zone_wplugs_pool = NULL; +free_hash: + kfree(disk->zone_wplugs_hash); + disk->zone_wplugs_hash = NULL; + disk->zone_wplugs_hash_bits = 0; + return -ENOMEM; } static void disk_destroy_zone_wplugs_hash_table(struct gendisk *disk) @@ -1447,6 +1458,11 @@ void disk_free_zone_resources(struct gendisk *disk) { cancel_work_sync(&disk->zone_wplugs_work); + if (disk->zone_wplugs_wq) { + destroy_workqueue(disk->zone_wplugs_wq); + disk->zone_wplugs_wq = NULL; + } + disk_destroy_zone_wplugs_hash_table(disk); /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1ce8ba08e318..d22eb41a05b8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -191,6 +191,7 @@ struct gendisk { struct hlist_head *zone_wplugs_hash; struct list_head zone_wplugs_err_list; struct work_struct zone_wplugs_work; + struct workqueue_struct *zone_wplugs_wq; #endif /* CONFIG_BLK_DEV_ZONED */ #if IS_ENABLED(CONFIG_CDROM) -- cgit From 06fe8fd6808562971637c6b133c806bcf49097ad Mon Sep 17 00:00:00 2001 From: Nipun Gupta Date: Tue, 23 Apr 2024 16:40:20 +0530 Subject: genirq/msi: Add MSI allocation helper and export MSI functions MSI functions for allocation and free can be directly used by the device drivers without any wrapper provided by bus drivers. So export these MSI functions. Also, add a wrapper API to allocate MSIs providing only the number of interrupts rather than range for simpler driver usage. Signed-off-by: Nipun Gupta Reviewed-by: Thomas Gleixner Link: https://lore.kernel.org/r/20240423111021.1686144-1-nipun.gupta@amd.com Signed-off-by: Alex Williamson --- include/linux/msi.h | 6 ++++++ kernel/irq/msi.c | 2 ++ 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/msi.h b/include/linux/msi.h index 26d07e23052e..765a65581a66 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -676,6 +676,12 @@ int platform_device_msi_init_and_alloc_irqs(struct device *dev, unsigned int nve void platform_device_msi_free_irqs_all(struct device *dev); bool msi_device_has_isolated_msi(struct device *dev); + +static inline int msi_domain_alloc_irqs(struct device *dev, unsigned int domid, int nirqs) +{ + return msi_domain_alloc_irqs_range(dev, domid, 0, nirqs - 1); +} + #else /* CONFIG_GENERIC_MSI_IRQ */ static inline bool msi_device_has_isolated_msi(struct device *dev) { diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index f90952ebc494..2024f89baea4 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -1434,6 +1434,7 @@ int msi_domain_alloc_irqs_range(struct device *dev, unsigned int domid, msi_unlock_descs(dev); return ret; } +EXPORT_SYMBOL_GPL(msi_domain_alloc_irqs_range); /** * msi_domain_alloc_irqs_all_locked - Allocate all interrupts from a MSI interrupt domain @@ -1680,6 +1681,7 @@ void msi_domain_free_irqs_range(struct device *dev, unsigned int domid, msi_domain_free_irqs_range_locked(dev, domid, first, last); msi_unlock_descs(dev); } +EXPORT_SYMBOL_GPL(msi_domain_free_irqs_all); /** * msi_domain_free_irqs_all_locked - Free all interrupts from a MSI interrupt domain -- cgit From ff33132605c1a0acea59e4c523cb7c6fabe856b2 Mon Sep 17 00:00:00 2001 From: Matti Vaittinen Date: Tue, 23 Apr 2024 14:38:28 +0300 Subject: regulator: change devm_regulator_get_enable_optional() stub to return Ok The devm_regulator_get_enable_optional() should be a 'call and forget' API, meaning, when it is used to enable the regulators, the API does not provide a handle to do any further control of the regulators. It gives no real benefit to return an error from the stub if CONFIG_REGULATOR is not set. On the contrary, returning an error is causing problems to drivers when hardware is such it works out just fine with no regulator control. Returning an error forces drivers to specifically handle the case where CONFIG_REGULATOR is not set, making the mere existence of the stub questionalble. Change the stub implementation for the devm_regulator_get_enable_optional() to return Ok so drivers do not separately handle the case where the CONFIG_REGULATOR is not set. Signed-off-by: Matti Vaittinen Fixes: da279e6965b3 ("regulator: Add devm helpers for get and enable") Reviewed-by: Guenter Roeck Link: https://lore.kernel.org/r/ZiedtOE00Zozd3XO@fedora Signed-off-by: Mark Brown --- include/linux/regulator/consumer.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 71232fb7dda3..ed180ca419da 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -326,7 +326,7 @@ static inline int devm_regulator_get_enable(struct device *dev, const char *id) static inline int devm_regulator_get_enable_optional(struct device *dev, const char *id) { - return -ENODEV; + return 0; } static inline struct regulator *__must_check -- cgit From d56b63cf0c0f71e1b2e04dd8220b408f049e67ff Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Sat, 20 Apr 2024 11:09:05 +0200 Subject: bpf: add support for bpf_wq user type Mostly a copy/paste from the bpf_timer API, without the initialization and free, as they will be done in a separate patch. Signed-off-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-5-6c986a5a741f@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 11 ++++++++++- include/uapi/linux/bpf.h | 4 ++++ kernel/bpf/btf.c | 17 +++++++++++++++++ kernel/bpf/syscall.c | 6 +++++- kernel/bpf/verifier.c | 9 +++++++++ 5 files changed, 45 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5034c1b4ded7..c7dcfd395555 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -185,7 +185,7 @@ struct bpf_map_ops { enum { /* Support at most 10 fields in a BTF type */ - BTF_FIELDS_MAX = 10, + BTF_FIELDS_MAX = 11, }; enum btf_field_type { @@ -202,6 +202,7 @@ enum btf_field_type { BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE, BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, BPF_REFCOUNT = (1 << 9), + BPF_WORKQUEUE = (1 << 10), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -238,6 +239,7 @@ struct btf_record { u32 field_mask; int spin_lock_off; int timer_off; + int wq_off; int refcount_off; struct btf_field fields[]; }; @@ -312,6 +314,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) return "bpf_spin_lock"; case BPF_TIMER: return "bpf_timer"; + case BPF_WORKQUEUE: + return "bpf_wq"; case BPF_KPTR_UNREF: case BPF_KPTR_REF: return "kptr"; @@ -340,6 +344,8 @@ static inline u32 btf_field_type_size(enum btf_field_type type) return sizeof(struct bpf_spin_lock); case BPF_TIMER: return sizeof(struct bpf_timer); + case BPF_WORKQUEUE: + return sizeof(struct bpf_wq); case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: @@ -367,6 +373,8 @@ static inline u32 btf_field_type_align(enum btf_field_type type) return __alignof__(struct bpf_spin_lock); case BPF_TIMER: return __alignof__(struct bpf_timer); + case BPF_WORKQUEUE: + return __alignof__(struct bpf_wq); case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: @@ -406,6 +414,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */ case BPF_SPIN_LOCK: case BPF_TIMER: + case BPF_WORKQUEUE: case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index cee0a7915c08..e4ae83550fb3 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -7306,6 +7306,10 @@ struct bpf_timer { __u64 __opaque[2]; } __attribute__((aligned(8))); +struct bpf_wq { + __u64 __opaque[2]; +} __attribute__((aligned(8))); + struct bpf_dynptr { __u64 __opaque[2]; } __attribute__((aligned(8))); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 6d46cee47ae3..8291fbfd27b1 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -3464,6 +3464,15 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, goto end; } } + if (field_mask & BPF_WORKQUEUE) { + if (!strcmp(name, "bpf_wq")) { + if (*seen_mask & BPF_WORKQUEUE) + return -E2BIG; + *seen_mask |= BPF_WORKQUEUE; + type = BPF_WORKQUEUE; + goto end; + } + } field_mask_test_name(BPF_LIST_HEAD, "bpf_list_head"); field_mask_test_name(BPF_LIST_NODE, "bpf_list_node"); field_mask_test_name(BPF_RB_ROOT, "bpf_rb_root"); @@ -3515,6 +3524,7 @@ static int btf_find_struct_field(const struct btf *btf, switch (field_type) { case BPF_SPIN_LOCK: case BPF_TIMER: + case BPF_WORKQUEUE: case BPF_LIST_NODE: case BPF_RB_NODE: case BPF_REFCOUNT: @@ -3582,6 +3592,7 @@ static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, switch (field_type) { case BPF_SPIN_LOCK: case BPF_TIMER: + case BPF_WORKQUEUE: case BPF_LIST_NODE: case BPF_RB_NODE: case BPF_REFCOUNT: @@ -3816,6 +3827,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type rec->spin_lock_off = -EINVAL; rec->timer_off = -EINVAL; + rec->wq_off = -EINVAL; rec->refcount_off = -EINVAL; for (i = 0; i < cnt; i++) { field_type_size = btf_field_type_size(info_arr[i].type); @@ -3846,6 +3858,11 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type /* Cache offset for faster lookup at runtime */ rec->timer_off = rec->fields[i].offset; break; + case BPF_WORKQUEUE: + WARN_ON_ONCE(rec->wq_off >= 0); + /* Cache offset for faster lookup at runtime */ + rec->wq_off = rec->fields[i].offset; + break; case BPF_REFCOUNT: WARN_ON_ONCE(rec->refcount_off >= 0); /* Cache offset for faster lookup at runtime */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 7d392ec83655..0848e4141b00 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -559,6 +559,7 @@ void btf_record_free(struct btf_record *rec) case BPF_SPIN_LOCK: case BPF_TIMER: case BPF_REFCOUNT: + case BPF_WORKQUEUE: /* Nothing to release */ break; default: @@ -608,6 +609,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec) case BPF_SPIN_LOCK: case BPF_TIMER: case BPF_REFCOUNT: + case BPF_WORKQUEUE: /* Nothing to acquire */ break; default: @@ -679,6 +681,8 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) case BPF_TIMER: bpf_timer_cancel_and_free(field_ptr); break; + case BPF_WORKQUEUE: + break; case BPF_KPTR_UNREF: WRITE_ONCE(*(u64 *)field_ptr, 0); break; @@ -1085,7 +1089,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token, map->record = btf_parse_fields(btf, value_type, BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD | - BPF_RB_ROOT | BPF_REFCOUNT, + BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE, map->value_size); if (!IS_ERR_OR_NULL(map->record)) { int i; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 5a7e34e83a5b..89490a95b120 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1838,6 +1838,8 @@ static void mark_ptr_not_null_reg(struct bpf_reg_state *reg) */ if (btf_record_has_field(map->inner_map_meta->record, BPF_TIMER)) reg->map_uid = reg->id; + if (btf_record_has_field(map->inner_map_meta->record, BPF_WORKQUEUE)) + reg->map_uid = reg->id; } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { reg->type = PTR_TO_XDP_SOCK; } else if (map->map_type == BPF_MAP_TYPE_SOCKMAP || @@ -18141,6 +18143,13 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env, } } + if (btf_record_has_field(map->record, BPF_WORKQUEUE)) { + if (is_tracing_prog_type(prog_type)) { + verbose(env, "tracing progs cannot use bpf_wq yet\n"); + return -EINVAL; + } + } + if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) && !bpf_offload_prog_map_match(prog, map)) { verbose(env, "offload device mismatch between prog and map\n"); -- cgit From 246331e3f1eac905170a923f0ec76725c2558232 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Sat, 20 Apr 2024 11:09:09 +0200 Subject: bpf: allow struct bpf_wq to be embedded in arraymaps and hashmaps Currently bpf_wq_cancel_and_free() is just a placeholder as there is no memory allocation for bpf_wq just yet. Again, duplication of the bpf_timer approach Signed-off-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-9-6c986a5a741f@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 2 ++ kernel/bpf/arraymap.c | 18 ++++++++++------- kernel/bpf/hashtab.c | 55 ++++++++++++++++++++++++++++++++++++++++----------- kernel/bpf/helpers.c | 8 ++++++++ kernel/bpf/syscall.c | 9 +++++++++ 5 files changed, 73 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c7dcfd395555..64bf0cf3ee95 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -534,6 +534,7 @@ static inline void zero_map_value(struct bpf_map *map, void *dst) void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); void bpf_timer_cancel_and_free(void *timer); +void bpf_wq_cancel_and_free(void *timer); void bpf_list_head_free(const struct btf_field *field, void *list_head, struct bpf_spin_lock *spin_lock); void bpf_rb_root_free(const struct btf_field *field, void *rb_root, @@ -2204,6 +2205,7 @@ void bpf_map_free_record(struct bpf_map *map); struct btf_record *btf_record_dup(const struct btf_record *rec); bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); void bpf_obj_free_timer(const struct btf_record *rec, void *obj); +void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj); void bpf_obj_free_fields(const struct btf_record *rec, void *obj); void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 8c1e6d7654bb..580d07b15471 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -428,17 +428,21 @@ static void *array_map_vmalloc_addr(struct bpf_array *array) return (void *)round_down((unsigned long)array, PAGE_SIZE); } -static void array_map_free_timers(struct bpf_map *map) +static void array_map_free_timers_wq(struct bpf_map *map) { struct bpf_array *array = container_of(map, struct bpf_array, map); int i; - /* We don't reset or free fields other than timer on uref dropping to zero. */ - if (!btf_record_has_field(map->record, BPF_TIMER)) - return; + /* We don't reset or free fields other than timer and workqueue + * on uref dropping to zero. + */ + if (btf_record_has_field(map->record, BPF_TIMER)) + for (i = 0; i < array->map.max_entries; i++) + bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); - for (i = 0; i < array->map.max_entries; i++) - bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i)); + if (btf_record_has_field(map->record, BPF_WORKQUEUE)) + for (i = 0; i < array->map.max_entries; i++) + bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i)); } /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ @@ -782,7 +786,7 @@ const struct bpf_map_ops array_map_ops = { .map_alloc = array_map_alloc, .map_free = array_map_free, .map_get_next_key = array_map_get_next_key, - .map_release_uref = array_map_free_timers, + .map_release_uref = array_map_free_timers_wq, .map_lookup_elem = array_map_lookup_elem, .map_update_elem = array_map_update_elem, .map_delete_elem = array_map_delete_elem, diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index c3e79a0b9361..0179183c543a 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -240,6 +240,26 @@ static void htab_free_prealloced_timers(struct bpf_htab *htab) } } +static void htab_free_prealloced_wq(struct bpf_htab *htab) +{ + u32 num_entries = htab->map.max_entries; + int i; + + if (!btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) + return; + if (htab_has_extra_elems(htab)) + num_entries += num_possible_cpus(); + + for (i = 0; i < num_entries; i++) { + struct htab_elem *elem; + + elem = get_htab_elem(htab, i); + bpf_obj_free_workqueue(htab->map.record, + elem->key + round_up(htab->map.key_size, 8)); + cond_resched(); + } +} + static void htab_free_prealloced_fields(struct bpf_htab *htab) { u32 num_entries = htab->map.max_entries; @@ -1495,7 +1515,7 @@ static void delete_all_elements(struct bpf_htab *htab) migrate_enable(); } -static void htab_free_malloced_timers(struct bpf_htab *htab) +static void htab_free_malloced_timers_or_wq(struct bpf_htab *htab, bool is_timer) { int i; @@ -1507,24 +1527,35 @@ static void htab_free_malloced_timers(struct bpf_htab *htab) hlist_nulls_for_each_entry(l, n, head, hash_node) { /* We only free timer on uref dropping to zero */ - bpf_obj_free_timer(htab->map.record, l->key + round_up(htab->map.key_size, 8)); + if (is_timer) + bpf_obj_free_timer(htab->map.record, + l->key + round_up(htab->map.key_size, 8)); + else + bpf_obj_free_workqueue(htab->map.record, + l->key + round_up(htab->map.key_size, 8)); } cond_resched_rcu(); } rcu_read_unlock(); } -static void htab_map_free_timers(struct bpf_map *map) +static void htab_map_free_timers_and_wq(struct bpf_map *map) { struct bpf_htab *htab = container_of(map, struct bpf_htab, map); - /* We only free timer on uref dropping to zero */ - if (!btf_record_has_field(htab->map.record, BPF_TIMER)) - return; - if (!htab_is_prealloc(htab)) - htab_free_malloced_timers(htab); - else - htab_free_prealloced_timers(htab); + /* We only free timer and workqueue on uref dropping to zero */ + if (btf_record_has_field(htab->map.record, BPF_TIMER)) { + if (!htab_is_prealloc(htab)) + htab_free_malloced_timers_or_wq(htab, true); + else + htab_free_prealloced_timers(htab); + } + if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE)) { + if (!htab_is_prealloc(htab)) + htab_free_malloced_timers_or_wq(htab, false); + else + htab_free_prealloced_wq(htab); + } } /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ @@ -2260,7 +2291,7 @@ const struct bpf_map_ops htab_map_ops = { .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, - .map_release_uref = htab_map_free_timers, + .map_release_uref = htab_map_free_timers_and_wq, .map_lookup_elem = htab_map_lookup_elem, .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem, .map_update_elem = htab_map_update_elem, @@ -2281,7 +2312,7 @@ const struct bpf_map_ops htab_lru_map_ops = { .map_alloc = htab_map_alloc, .map_free = htab_map_free, .map_get_next_key = htab_map_get_next_key, - .map_release_uref = htab_map_free_timers, + .map_release_uref = htab_map_free_timers_and_wq, .map_lookup_elem = htab_lru_map_lookup_elem, .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem, .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 0b8ba6c81994..fe827f6e5234 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1468,6 +1468,14 @@ void bpf_timer_cancel_and_free(void *val) kfree_rcu(t, cb.rcu); } +/* This function is called by map_delete/update_elem for individual element and + * by ops->map_release_uref when the user space reference to a map reaches zero. + */ +void bpf_wq_cancel_and_free(void *val) +{ + BTF_TYPE_EMIT(struct bpf_wq); +} + BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr) { unsigned long *kptr = map_value; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0848e4141b00..63e368337483 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -661,6 +661,13 @@ void bpf_obj_free_timer(const struct btf_record *rec, void *obj) bpf_timer_cancel_and_free(obj + rec->timer_off); } +void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj) +{ + if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE))) + return; + bpf_wq_cancel_and_free(obj + rec->wq_off); +} + void bpf_obj_free_fields(const struct btf_record *rec, void *obj) { const struct btf_field *fields; @@ -682,6 +689,7 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) bpf_timer_cancel_and_free(field_ptr); break; case BPF_WORKQUEUE: + bpf_wq_cancel_and_free(field_ptr); break; case BPF_KPTR_UNREF: WRITE_ONCE(*(u64 *)field_ptr, 0); @@ -1119,6 +1127,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token, } break; case BPF_TIMER: + case BPF_WORKQUEUE: if (map->map_type != BPF_MAP_TYPE_HASH && map->map_type != BPF_MAP_TYPE_LRU_HASH && map->map_type != BPF_MAP_TYPE_ARRAY) { -- cgit From 81f1d7a583fa1fa14f0c4e6140d34b5e3d08d227 Mon Sep 17 00:00:00 2001 From: Benjamin Tissoires Date: Sat, 20 Apr 2024 11:09:13 +0200 Subject: bpf: wq: add bpf_wq_set_callback_impl To support sleepable async callbacks, we need to tell push_async_cb() whether the cb is sleepable or not. The verifier now detects that we are in bpf_wq_set_callback_impl and can allow a sleepable callback to happen. Signed-off-by: Benjamin Tissoires Link: https://lore.kernel.org/r/20240420-bpf_wq-v2-13-6c986a5a741f@kernel.org Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 1 + kernel/bpf/helpers.c | 15 +++++++++++ kernel/bpf/verifier.c | 60 +++++++++++++++++++++++++++++++++++++++----- 3 files changed, 70 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 36d19cd32eb5..9db35530c878 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -426,6 +426,7 @@ struct bpf_verifier_state { * while they are still in use. */ bool used_as_loop_entry; + bool in_sleepable; /* first and last insn idx of this verifier state */ u32 first_insn_idx; diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 1fc002cfe2b1..dd2eaaf77c93 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2711,6 +2711,20 @@ __bpf_kfunc int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) return __bpf_async_init(async, map, flags, BPF_ASYNC_TYPE_WQ); } +__bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, + int (callback_fn)(void *map, int *key, struct bpf_wq *wq), + unsigned int flags, + void *aux__ign) +{ + struct bpf_prog_aux *aux = (struct bpf_prog_aux *)aux__ign; + struct bpf_async_kern *async = (struct bpf_async_kern *)wq; + + if (flags) + return -EINVAL; + + return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) @@ -2789,6 +2803,7 @@ BTF_ID_FLAGS(func, bpf_dynptr_size) BTF_ID_FLAGS(func, bpf_dynptr_clone) BTF_ID_FLAGS(func, bpf_modify_return_test_tp) BTF_ID_FLAGS(func, bpf_wq_init) +BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index cc20fda907fb..9715c88cc025 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -501,8 +501,12 @@ static bool is_dynptr_ref_function(enum bpf_func_id func_id) } static bool is_sync_callback_calling_kfunc(u32 btf_id); +static bool is_async_callback_calling_kfunc(u32 btf_id); +static bool is_callback_calling_kfunc(u32 btf_id); static bool is_bpf_throw_kfunc(struct bpf_insn *insn); +static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id); + static bool is_sync_callback_calling_function(enum bpf_func_id func_id) { return func_id == BPF_FUNC_for_each_map_elem || @@ -530,7 +534,8 @@ static bool is_sync_callback_calling_insn(struct bpf_insn *insn) static bool is_async_callback_calling_insn(struct bpf_insn *insn) { - return bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm); + return (bpf_helper_call(insn) && is_async_callback_calling_function(insn->imm)) || + (bpf_pseudo_kfunc_call(insn) && is_async_callback_calling_kfunc(insn->imm)); } static bool is_may_goto_insn(struct bpf_insn *insn) @@ -1429,6 +1434,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, } dst_state->speculative = src->speculative; dst_state->active_rcu_lock = src->active_rcu_lock; + dst_state->in_sleepable = src->in_sleepable; dst_state->curframe = src->curframe; dst_state->active_lock.ptr = src->active_lock.ptr; dst_state->active_lock.id = src->active_lock.id; @@ -2404,7 +2410,7 @@ static void init_func_state(struct bpf_verifier_env *env, /* Similar to push_stack(), but for async callbacks */ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx, - int subprog) + int subprog, bool is_sleepable) { struct bpf_verifier_stack_elem *elem; struct bpf_func_state *frame; @@ -2431,6 +2437,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env, * Initialize it similar to do_check_common(). */ elem->st.branches = 1; + elem->st.in_sleepable = is_sleepable; frame = kzalloc(sizeof(*frame), GFP_KERNEL); if (!frame) goto err; @@ -5278,7 +5285,8 @@ bad_type: static bool in_sleepable(struct bpf_verifier_env *env) { - return env->prog->sleepable; + return env->prog->sleepable || + (env->cur_state && env->cur_state->in_sleepable); } /* The non-sleepable programs and sleepable programs with explicit bpf_rcu_read_lock() @@ -9513,7 +9521,7 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins */ env->subprog_info[subprog].is_cb = true; if (bpf_pseudo_kfunc_call(insn) && - !is_sync_callback_calling_kfunc(insn->imm)) { + !is_callback_calling_kfunc(insn->imm)) { verbose(env, "verifier bug: kfunc %s#%d not marked as callback-calling\n", func_id_name(insn->imm), insn->imm); return -EFAULT; @@ -9527,10 +9535,11 @@ static int push_callback_call(struct bpf_verifier_env *env, struct bpf_insn *ins if (is_async_callback_calling_insn(insn)) { struct bpf_verifier_state *async_cb; - /* there is no real recursion here. timer callbacks are async */ + /* there is no real recursion here. timer and workqueue callbacks are async */ env->subprog_info[subprog].is_async_cb = true; async_cb = push_async_cb(env, env->subprog_info[subprog].start, - insn_idx, subprog); + insn_idx, subprog, + is_bpf_wq_set_callback_impl_kfunc(insn->imm)); if (!async_cb) return -EFAULT; callee = async_cb->frame[0]; @@ -11017,6 +11026,7 @@ enum special_kfunc_type { KF_bpf_percpu_obj_new_impl, KF_bpf_percpu_obj_drop_impl, KF_bpf_throw, + KF_bpf_wq_set_callback_impl, KF_bpf_iter_css_task_new, }; @@ -11041,6 +11051,7 @@ BTF_ID(func, bpf_dynptr_clone) BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_throw) +BTF_ID(func, bpf_wq_set_callback_impl) #ifdef CONFIG_CGROUPS BTF_ID(func, bpf_iter_css_task_new) #endif @@ -11069,6 +11080,7 @@ BTF_ID(func, bpf_dynptr_clone) BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_throw) +BTF_ID(func, bpf_wq_set_callback_impl) #ifdef CONFIG_CGROUPS BTF_ID(func, bpf_iter_css_task_new) #else @@ -11402,12 +11414,28 @@ static bool is_sync_callback_calling_kfunc(u32 btf_id) return btf_id == special_kfunc_list[KF_bpf_rbtree_add_impl]; } +static bool is_async_callback_calling_kfunc(u32 btf_id) +{ + return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl]; +} + static bool is_bpf_throw_kfunc(struct bpf_insn *insn) { return bpf_pseudo_kfunc_call(insn) && insn->off == 0 && insn->imm == special_kfunc_list[KF_bpf_throw]; } +static bool is_bpf_wq_set_callback_impl_kfunc(u32 btf_id) +{ + return btf_id == special_kfunc_list[KF_bpf_wq_set_callback_impl]; +} + +static bool is_callback_calling_kfunc(u32 btf_id) +{ + return is_sync_callback_calling_kfunc(btf_id) || + is_async_callback_calling_kfunc(btf_id); +} + static bool is_rbtree_lock_required_kfunc(u32 btf_id) { return is_bpf_rbtree_api_kfunc(btf_id); @@ -12219,6 +12247,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, } } + if (is_bpf_wq_set_callback_impl_kfunc(meta.func_id)) { + err = push_callback_call(env, insn, insn_idx, meta.subprogno, + set_timer_callback_state); + if (err) { + verbose(env, "kfunc %s#%d failed callback verification\n", + func_name, meta.func_id); + return err; + } + } + rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); @@ -16968,6 +17006,9 @@ static bool states_equal(struct bpf_verifier_env *env, if (old->active_rcu_lock != cur->active_rcu_lock) return false; + if (old->in_sleepable != cur->in_sleepable) + return false; + /* for states to be equal callsites have to be the same * and all frame states need to be equivalent */ @@ -19639,6 +19680,13 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, desc->func_id == special_kfunc_list[KF_bpf_rdonly_cast]) { insn_buf[0] = BPF_MOV64_REG(BPF_REG_0, BPF_REG_1); *cnt = 1; + } else if (is_bpf_wq_set_callback_impl_kfunc(desc->func_id)) { + struct bpf_insn ld_addrs[2] = { BPF_LD_IMM64(BPF_REG_4, (long)env->prog->aux) }; + + insn_buf[0] = ld_addrs[0]; + insn_buf[1] = ld_addrs[1]; + insn_buf[2] = *insn; + *cnt = 3; } return 0; } -- cgit From 520713a93d550406dae14d49cdb8778d70cecdfd Mon Sep 17 00:00:00 2001 From: Thomas Weißschuh Date: Fri, 15 Mar 2024 19:11:30 +0100 Subject: sysctl: treewide: drop unused argument ctl_table_root::set_ownership(table) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the 'table' argument from set_ownership as it is never used. This change is a step towards putting "struct ctl_table" into .rodata and eventually having sysctl core only use "const struct ctl_table". The patch was created with the following coccinelle script: @@ identifier func, head, table, uid, gid; @@ void func( struct ctl_table_header *head, - struct ctl_table *table, kuid_t *uid, kgid_t *gid) { ... } No additional occurrences of 'set_ownership' were found after doing a tree-wide search. Reviewed-by: Joel Granados Signed-off-by: Thomas Weißschuh Signed-off-by: Joel Granados --- fs/proc/proc_sysctl.c | 2 +- include/linux/sysctl.h | 1 - ipc/ipc_sysctl.c | 3 +-- ipc/mq_sysctl.c | 3 +-- net/sysctl_net.c | 1 - 5 files changed, 3 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 37cde0efee57..ed3a41ed9705 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -480,7 +480,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb, } if (root->set_ownership) - root->set_ownership(head, table, &inode->i_uid, &inode->i_gid); + root->set_ownership(head, &inode->i_uid, &inode->i_gid); else { inode->i_uid = GLOBAL_ROOT_UID; inode->i_gid = GLOBAL_ROOT_GID; diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index ee7d33b89e9e..60333a6b9370 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -205,7 +205,6 @@ struct ctl_table_root { struct ctl_table_set default_set; struct ctl_table_set *(*lookup)(struct ctl_table_root *root); void (*set_ownership)(struct ctl_table_header *head, - struct ctl_table *table, kuid_t *uid, kgid_t *gid); int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); }; diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 45cb1dabce29..1a5085e5b178 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -192,7 +192,6 @@ static int set_is_seen(struct ctl_table_set *set) } static void ipc_set_ownership(struct ctl_table_header *head, - struct ctl_table *table, kuid_t *uid, kgid_t *gid) { struct ipc_namespace *ns = @@ -224,7 +223,7 @@ static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *tabl kuid_t ns_root_uid; kgid_t ns_root_gid; - ipc_set_ownership(head, table, &ns_root_uid, &ns_root_gid); + ipc_set_ownership(head, &ns_root_uid, &ns_root_gid); if (uid_eq(current_euid(), ns_root_uid)) mode >>= 6; diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c index 21fba3a6edaf..6bb1c5397c69 100644 --- a/ipc/mq_sysctl.c +++ b/ipc/mq_sysctl.c @@ -78,7 +78,6 @@ static int set_is_seen(struct ctl_table_set *set) } static void mq_set_ownership(struct ctl_table_header *head, - struct ctl_table *table, kuid_t *uid, kgid_t *gid) { struct ipc_namespace *ns = @@ -97,7 +96,7 @@ static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table kuid_t ns_root_uid; kgid_t ns_root_gid; - mq_set_ownership(head, table, &ns_root_uid, &ns_root_gid); + mq_set_ownership(head, &ns_root_uid, &ns_root_gid); if (uid_eq(current_euid(), ns_root_uid)) mode >>= 6; diff --git a/net/sysctl_net.c b/net/sysctl_net.c index 051ed5f6fc93..a0a7a79991f9 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -54,7 +54,6 @@ static int net_ctl_permissions(struct ctl_table_header *head, } static void net_ctl_set_ownership(struct ctl_table_header *head, - struct ctl_table *table, kuid_t *uid, kgid_t *gid) { struct net *net = container_of(head->set, struct net, sysctls); -- cgit From 795f90c6f13c30484ff10355a6775979f57f78cb Mon Sep 17 00:00:00 2001 From: Thomas Weißschuh Date: Fri, 15 Mar 2024 19:11:31 +0100 Subject: sysctl: treewide: constify argument ctl_table_root::permissions(table) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The permissions callback should not modify the ctl_table. Enforce this expectation via the typesystem. This is a step to put "struct ctl_table" into .rodata throughout the kernel. The patch was created with the following coccinelle script: @@ identifier func, head, ctl; @@ int func( struct ctl_table_header *head, - struct ctl_table *ctl) + const struct ctl_table *ctl) { ... } (insert_entry() from fs/proc/proc_sysctl.c is a false-positive) No additional occurrences of '.permissions =' were found after a tree-wide search for places missed by the conccinelle script. Reviewed-by: Joel Granados Signed-off-by: Thomas Weißschuh Signed-off-by: Joel Granados --- include/linux/sysctl.h | 2 +- ipc/ipc_sysctl.c | 2 +- ipc/mq_sysctl.c | 2 +- kernel/ucount.c | 2 +- net/sysctl_net.c | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 60333a6b9370..f9214de0490c 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -206,7 +206,7 @@ struct ctl_table_root { struct ctl_table_set *(*lookup)(struct ctl_table_root *root); void (*set_ownership)(struct ctl_table_header *head, kuid_t *uid, kgid_t *gid); - int (*permissions)(struct ctl_table_header *head, struct ctl_table *table); + int (*permissions)(struct ctl_table_header *head, const struct ctl_table *table); }; #define register_sysctl(path, table) \ diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 1a5085e5b178..19b2a67aef40 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -204,7 +204,7 @@ static void ipc_set_ownership(struct ctl_table_header *head, *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID; } -static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table) +static int ipc_permissions(struct ctl_table_header *head, const struct ctl_table *table) { int mode = table->mode; diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c index 6bb1c5397c69..43c0825da9e8 100644 --- a/ipc/mq_sysctl.c +++ b/ipc/mq_sysctl.c @@ -90,7 +90,7 @@ static void mq_set_ownership(struct ctl_table_header *head, *gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID; } -static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table) +static int mq_permissions(struct ctl_table_header *head, const struct ctl_table *table) { int mode = table->mode; kuid_t ns_root_uid; diff --git a/kernel/ucount.c b/kernel/ucount.c index e196da0204dc..4d5b9c12c014 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -38,7 +38,7 @@ static int set_is_seen(struct ctl_table_set *set) } static int set_permissions(struct ctl_table_header *head, - struct ctl_table *table) + const struct ctl_table *table) { struct user_namespace *user_ns = container_of(head->set, struct user_namespace, set); diff --git a/net/sysctl_net.c b/net/sysctl_net.c index a0a7a79991f9..f5017012a049 100644 --- a/net/sysctl_net.c +++ b/net/sysctl_net.c @@ -40,7 +40,7 @@ static int is_seen(struct ctl_table_set *set) /* Return standard mode bits for table entry. */ static int net_ctl_permissions(struct ctl_table_header *head, - struct ctl_table *table) + const struct ctl_table *table) { struct net *net = container_of(head->set, struct net, sysctls); -- cgit From 4a7b29f6509470a7e63dd67e516268fbef226194 Mon Sep 17 00:00:00 2001 From: Thomas Weißschuh Date: Fri, 22 Mar 2024 18:05:57 +0100 Subject: sysctl: move sysctl type to ctl_table_header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move the SYSCTL_TABLE_TYPE_{DEFAULT,PERMANENTLY_EMPTY} enums from ctl_table to ctl_table_header. Removing the mutable member is necessary to constify static instances of struct ctl_table. Move the initialization of the sysctl_mount_point type into init_header() where all the other header fields are also initialized. As a side-effect the memory usage of the sysctl core is reduced. Each ctl_table_header instance can manage multiple ctl_table instances and is only allocated when the table is actually registered. This saves 8 bytes of memory per ctl_table on 64bit, 4 due to the enum field itself and 4 due to padding. Signed-off-by: Thomas Weißschuh Signed-off-by: Joel Granados --- fs/proc/proc_sysctl.c | 10 ++++++---- include/linux/sysctl.h | 22 +++++++++++----------- 2 files changed, 17 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 3f8abb6d2ad5..a847b0bc63c4 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -31,7 +31,7 @@ static const struct inode_operations proc_sys_dir_operations; /* Support for permanently empty directories */ static struct ctl_table sysctl_mount_point[] = { - {.type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY } + { } }; /** @@ -49,11 +49,11 @@ struct ctl_table_header *register_sysctl_mount_point(const char *path) EXPORT_SYMBOL(register_sysctl_mount_point); #define sysctl_is_perm_empty_ctl_header(hptr) \ - (hptr->ctl_table[0].type == SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY) + (hptr->type == SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY) #define sysctl_set_perm_empty_ctl_header(hptr) \ - (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY) + (hptr->type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY) #define sysctl_clear_perm_empty_ctl_header(hptr) \ - (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_DEFAULT) + (hptr->type = SYSCTL_TABLE_TYPE_DEFAULT) void proc_sys_poll_notify(struct ctl_table_poll *poll) { @@ -208,6 +208,8 @@ static void init_header(struct ctl_table_header *head, node++; } } + if (table == sysctl_mount_point) + sysctl_set_perm_empty_ctl_header(head); } static void erase_header(struct ctl_table_header *head) diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index f9214de0490c..47bd28ffa88f 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -137,17 +137,6 @@ struct ctl_table { void *data; int maxlen; umode_t mode; - /** - * enum type - Enumeration to differentiate between ctl target types - * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations - * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently - * empty directory target to serve - * as mount point. - */ - enum { - SYSCTL_TABLE_TYPE_DEFAULT, - SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY - } type; proc_handler *proc_handler; /* Callback for text formatting */ struct ctl_table_poll *poll; void *extra1; @@ -188,6 +177,17 @@ struct ctl_table_header { struct ctl_dir *parent; struct ctl_node *node; struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */ + /** + * enum type - Enumeration to differentiate between ctl target types + * @SYSCTL_TABLE_TYPE_DEFAULT: ctl target with no special considerations + * @SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY: Used to identify a permanently + * empty directory target to serve + * as mount point. + */ + enum { + SYSCTL_TABLE_TYPE_DEFAULT, + SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY, + } type; }; struct ctl_dir { -- cgit From 75d659317bb136d849a10a30ffe7e0462c982d29 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 26 Mar 2024 10:16:12 +0100 Subject: cpufreq: Add a cpufreq pressure feedback for the scheduler Provide to the scheduler a feedback about the temporary max available capacity. Unlike arch_update_thermal_pressure(), this doesn't need to be filtered as the pressure will happen for dozens of ms or more. Signed-off-by: Vincent Guittot Signed-off-by: Ingo Molnar Tested-by: Lukasz Luba Reviewed-by: Qais Yousef Reviewed-by: Lukasz Luba Reviewed-by: Dhruva Gole Acked-by: Rafael J. Wysocki Acked-by: Viresh Kumar Link: https://lore.kernel.org/r/20240326091616.3696851-2-vincent.guittot@linaro.org --- drivers/cpufreq/cpufreq.c | 36 ++++++++++++++++++++++++++++++++++++ include/linux/cpufreq.h | 10 ++++++++++ 2 files changed, 46 insertions(+) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 66e10a19d76a..1de8bd105934 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2582,6 +2582,40 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) } EXPORT_SYMBOL(cpufreq_get_policy); +DEFINE_PER_CPU(unsigned long, cpufreq_pressure); + +/** + * cpufreq_update_pressure() - Update cpufreq pressure for CPUs + * @policy: cpufreq policy of the CPUs. + * + * Update the value of cpufreq pressure for all @cpus in the policy. + */ +static void cpufreq_update_pressure(struct cpufreq_policy *policy) +{ + unsigned long max_capacity, capped_freq, pressure; + u32 max_freq; + int cpu; + + cpu = cpumask_first(policy->related_cpus); + max_freq = arch_scale_freq_ref(cpu); + capped_freq = policy->max; + + /* + * Handle properly the boost frequencies, which should simply clean + * the cpufreq pressure value. + */ + if (max_freq <= capped_freq) { + pressure = 0; + } else { + max_capacity = arch_scale_cpu_capacity(cpu); + pressure = max_capacity - + mult_frac(max_capacity, capped_freq, max_freq); + } + + for_each_cpu(cpu, policy->related_cpus) + WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure); +} + /** * cpufreq_set_policy - Modify cpufreq policy parameters. * @policy: Policy object to modify. @@ -2637,6 +2671,8 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H); trace_cpu_frequency_limits(policy); + cpufreq_update_pressure(policy); + policy->cached_target_freq = UINT_MAX; pr_debug("new min and max freqs are %u - %u kHz\n", diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9956afb9acc2..20f7e98ee8af 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -241,6 +241,12 @@ struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); void cpufreq_enable_fast_switch(struct cpufreq_policy *policy); void cpufreq_disable_fast_switch(struct cpufreq_policy *policy); bool has_target_index(void); + +DECLARE_PER_CPU(unsigned long, cpufreq_pressure); +static inline unsigned long cpufreq_get_pressure(int cpu) +{ + return READ_ONCE(per_cpu(cpufreq_pressure, cpu)); +} #else static inline unsigned int cpufreq_get(unsigned int cpu) { @@ -264,6 +270,10 @@ static inline bool cpufreq_supports_freq_invariance(void) } static inline void disable_cpufreq(void) { } static inline void cpufreq_update_limits(unsigned int cpu) { } +static inline unsigned long cpufreq_get_pressure(int cpu) +{ + return 0; +} #endif #ifdef CONFIG_CPU_FREQ_STAT -- cgit From d4dbc991714eefcbd8d54a3204bd77a0a52bd32d Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Tue, 26 Mar 2024 10:16:15 +0100 Subject: sched/cpufreq: Rename arch_update_thermal_pressure() => arch_update_hw_pressure() Now that cpufreq provides a pressure value to the scheduler, rename arch_update_thermal_pressure into HW pressure to reflect that it returns a pressure applied by HW (i.e. with a high frequency change) and not always related to thermal mitigation but also generated by max current limitation as an example. Such high frequency signal needs filtering to be smoothed and provide an value that reflects the average available capacity into the scheduler time scale. Signed-off-by: Vincent Guittot Signed-off-by: Ingo Molnar Tested-by: Lukasz Luba Reviewed-by: Qais Yousef Reviewed-by: Lukasz Luba Link: https://lore.kernel.org/r/20240326091616.3696851-5-vincent.guittot@linaro.org --- arch/arm/include/asm/topology.h | 6 +++--- arch/arm64/include/asm/topology.h | 6 +++--- drivers/base/arch_topology.c | 26 +++++++++++++------------- drivers/cpufreq/qcom-cpufreq-hw.c | 4 ++-- include/linux/arch_topology.h | 8 ++++---- include/linux/sched/topology.h | 8 ++++---- include/trace/events/hw_pressure.h | 29 +++++++++++++++++++++++++++++ include/trace/events/sched.h | 2 +- include/trace/events/thermal_pressure.h | 29 ----------------------------- init/Kconfig | 12 ++++++------ kernel/sched/core.c | 8 ++++---- kernel/sched/fair.c | 16 ++++++++-------- kernel/sched/pelt.c | 18 +++++++++--------- kernel/sched/pelt.h | 16 ++++++++-------- kernel/sched/sched.h | 10 +++++----- 15 files changed, 99 insertions(+), 99 deletions(-) create mode 100644 include/trace/events/hw_pressure.h delete mode 100644 include/trace/events/thermal_pressure.h (limited to 'include/linux') diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 853c4f81ba4a..ad36b6570067 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -22,9 +22,9 @@ /* Enable topology flag updates */ #define arch_update_cpu_topology topology_update_cpu_topology -/* Replace task scheduler's default thermal pressure API */ -#define arch_scale_thermal_pressure topology_get_thermal_pressure -#define arch_update_thermal_pressure topology_update_thermal_pressure +/* Replace task scheduler's default HW pressure API */ +#define arch_scale_hw_pressure topology_get_hw_pressure +#define arch_update_hw_pressure topology_update_hw_pressure #else diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index a323b109b9c4..0f6ef432fb84 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -35,9 +35,9 @@ void update_freq_counters_refs(void); /* Enable topology flag updates */ #define arch_update_cpu_topology topology_update_cpu_topology -/* Replace task scheduler's default thermal pressure API */ -#define arch_scale_thermal_pressure topology_get_thermal_pressure -#define arch_update_thermal_pressure topology_update_thermal_pressure +/* Replace task scheduler's default HW pressure API */ +#define arch_scale_hw_pressure topology_get_hw_pressure +#define arch_update_hw_pressure topology_update_hw_pressure #include diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 024b78a0cfc1..0248912ff687 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -22,7 +22,7 @@ #include #define CREATE_TRACE_POINTS -#include +#include static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data); static struct cpumask scale_freq_counters_mask; @@ -160,26 +160,26 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) per_cpu(cpu_scale, cpu) = capacity; } -DEFINE_PER_CPU(unsigned long, thermal_pressure); +DEFINE_PER_CPU(unsigned long, hw_pressure); /** - * topology_update_thermal_pressure() - Update thermal pressure for CPUs + * topology_update_hw_pressure() - Update HW pressure for CPUs * @cpus : The related CPUs for which capacity has been reduced * @capped_freq : The maximum allowed frequency that CPUs can run at * - * Update the value of thermal pressure for all @cpus in the mask. The + * Update the value of HW pressure for all @cpus in the mask. The * cpumask should include all (online+offline) affected CPUs, to avoid * operating on stale data when hot-plug is used for some CPUs. The * @capped_freq reflects the currently allowed max CPUs frequency due to - * thermal capping. It might be also a boost frequency value, which is bigger + * HW capping. It might be also a boost frequency value, which is bigger * than the internal 'capacity_freq_ref' max frequency. In such case the * pressure value should simply be removed, since this is an indication that - * there is no thermal throttling. The @capped_freq must be provided in kHz. + * there is no HW throttling. The @capped_freq must be provided in kHz. */ -void topology_update_thermal_pressure(const struct cpumask *cpus, +void topology_update_hw_pressure(const struct cpumask *cpus, unsigned long capped_freq) { - unsigned long max_capacity, capacity, th_pressure; + unsigned long max_capacity, capacity, hw_pressure; u32 max_freq; int cpu; @@ -189,21 +189,21 @@ void topology_update_thermal_pressure(const struct cpumask *cpus, /* * Handle properly the boost frequencies, which should simply clean - * the thermal pressure value. + * the HW pressure value. */ if (max_freq <= capped_freq) capacity = max_capacity; else capacity = mult_frac(max_capacity, capped_freq, max_freq); - th_pressure = max_capacity - capacity; + hw_pressure = max_capacity - capacity; - trace_thermal_pressure_update(cpu, th_pressure); + trace_hw_pressure_update(cpu, hw_pressure); for_each_cpu(cpu, cpus) - WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure); + WRITE_ONCE(per_cpu(hw_pressure, cpu), hw_pressure); } -EXPORT_SYMBOL_GPL(topology_update_thermal_pressure); +EXPORT_SYMBOL_GPL(topology_update_hw_pressure); static ssize_t cpu_capacity_show(struct device *dev, struct device_attribute *attr, diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c index 70b0f21968a0..ec8df5496a0c 100644 --- a/drivers/cpufreq/qcom-cpufreq-hw.c +++ b/drivers/cpufreq/qcom-cpufreq-hw.c @@ -347,8 +347,8 @@ static void qcom_lmh_dcvs_notify(struct qcom_cpufreq_data *data) throttled_freq = freq_hz / HZ_PER_KHZ; - /* Update thermal pressure (the boost frequencies are accepted) */ - arch_update_thermal_pressure(policy->related_cpus, throttled_freq); + /* Update HW pressure (the boost frequencies are accepted) */ + arch_update_hw_pressure(policy->related_cpus, throttled_freq); /* * In the unlikely case policy is unregistered do not enable diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index a63d61ca55af..b721f360d759 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -60,14 +60,14 @@ void topology_scale_freq_tick(void); void topology_set_scale_freq_source(struct scale_freq_data *data, const struct cpumask *cpus); void topology_clear_scale_freq_source(enum scale_freq_source source, const struct cpumask *cpus); -DECLARE_PER_CPU(unsigned long, thermal_pressure); +DECLARE_PER_CPU(unsigned long, hw_pressure); -static inline unsigned long topology_get_thermal_pressure(int cpu) +static inline unsigned long topology_get_hw_pressure(int cpu) { - return per_cpu(thermal_pressure, cpu); + return per_cpu(hw_pressure, cpu); } -void topology_update_thermal_pressure(const struct cpumask *cpus, +void topology_update_hw_pressure(const struct cpumask *cpus, unsigned long capped_freq); struct cpu_topology { diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index c8fe9bab981b..4237daa5ac7a 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h @@ -270,17 +270,17 @@ unsigned long arch_scale_cpu_capacity(int cpu) } #endif -#ifndef arch_scale_thermal_pressure +#ifndef arch_scale_hw_pressure static __always_inline -unsigned long arch_scale_thermal_pressure(int cpu) +unsigned long arch_scale_hw_pressure(int cpu) { return 0; } #endif -#ifndef arch_update_thermal_pressure +#ifndef arch_update_hw_pressure static __always_inline -void arch_update_thermal_pressure(const struct cpumask *cpus, +void arch_update_hw_pressure(const struct cpumask *cpus, unsigned long capped_frequency) { } #endif diff --git a/include/trace/events/hw_pressure.h b/include/trace/events/hw_pressure.h new file mode 100644 index 000000000000..b9cd68854128 --- /dev/null +++ b/include/trace/events/hw_pressure.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hw_pressure + +#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_THERMAL_PRESSURE_H + +#include + +TRACE_EVENT(hw_pressure_update, + TP_PROTO(int cpu, unsigned long hw_pressure), + TP_ARGS(cpu, hw_pressure), + + TP_STRUCT__entry( + __field(unsigned long, hw_pressure) + __field(int, cpu) + ), + + TP_fast_assign( + __entry->hw_pressure = hw_pressure; + __entry->cpu = cpu; + ), + + TP_printk("cpu=%d hw_pressure=%lu", __entry->cpu, __entry->hw_pressure) +); +#endif /* _TRACE_THERMAL_PRESSURE_H */ + +/* This part must be outside protection */ +#include diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index dbb01b4b7451..d115d64c4011 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -752,7 +752,7 @@ DECLARE_TRACE(pelt_dl_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); -DECLARE_TRACE(pelt_thermal_tp, +DECLARE_TRACE(pelt_hw_tp, TP_PROTO(struct rq *rq), TP_ARGS(rq)); diff --git a/include/trace/events/thermal_pressure.h b/include/trace/events/thermal_pressure.h deleted file mode 100644 index b68680201360..000000000000 --- a/include/trace/events/thermal_pressure.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM thermal_pressure - -#if !defined(_TRACE_THERMAL_PRESSURE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_THERMAL_PRESSURE_H - -#include - -TRACE_EVENT(thermal_pressure_update, - TP_PROTO(int cpu, unsigned long thermal_pressure), - TP_ARGS(cpu, thermal_pressure), - - TP_STRUCT__entry( - __field(unsigned long, thermal_pressure) - __field(int, cpu) - ), - - TP_fast_assign( - __entry->thermal_pressure = thermal_pressure; - __entry->cpu = cpu; - ), - - TP_printk("cpu=%d thermal_pressure=%lu", __entry->cpu, __entry->thermal_pressure) -); -#endif /* _TRACE_THERMAL_PRESSURE_H */ - -/* This part must be outside protection */ -#include diff --git a/init/Kconfig b/init/Kconfig index aa02aec6aa7d..f0c9117962ec 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -547,24 +547,24 @@ config HAVE_SCHED_AVG_IRQ depends on IRQ_TIME_ACCOUNTING || PARAVIRT_TIME_ACCOUNTING depends on SMP -config SCHED_THERMAL_PRESSURE +config SCHED_HW_PRESSURE bool default y if ARM && ARM_CPU_TOPOLOGY default y if ARM64 depends on SMP depends on CPU_FREQ_THERMAL help - Select this option to enable thermal pressure accounting in the - scheduler. Thermal pressure is the value conveyed to the scheduler + Select this option to enable HW pressure accounting in the + scheduler. HW pressure is the value conveyed to the scheduler that reflects the reduction in CPU compute capacity resulted from - thermal throttling. Thermal throttling occurs when the performance of - a CPU is capped due to high operating temperatures. + HW throttling. HW throttling occurs when the performance of + a CPU is capped due to high operating temperatures as an example. If selected, the scheduler will be able to balance tasks accordingly, i.e. put less load on throttled CPUs than on non/less throttled ones. This requires the architecture to implement - arch_update_thermal_pressure() and arch_scale_thermal_pressure(). + arch_update_hw_pressure() and arch_scale_thermal_pressure(). config BSD_PROCESS_ACCT bool "BSD Process Accounting" diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0621e4ee31de..67a8302c3131 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -108,7 +108,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_rt_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_dl_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); -EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_thermal_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_hw_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_cpu_capacity_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); @@ -5668,7 +5668,7 @@ void sched_tick(void) struct rq *rq = cpu_rq(cpu); struct task_struct *curr = rq->curr; struct rq_flags rf; - unsigned long thermal_pressure; + unsigned long hw_pressure; u64 resched_latency; if (housekeeping_cpu(cpu, HK_TYPE_TICK)) @@ -5679,8 +5679,8 @@ void sched_tick(void) rq_lock(rq, &rf); update_rq_clock(rq); - thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); - update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); + hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); + update_hw_load_avg(rq_clock_hw(rq), rq, hw_pressure); curr->sched_class->task_tick(rq, curr, 0); if (sched_feat(LATENCY_WARN)) resched_latency = cpu_resched_latency(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 19199c119829..eef39ae3efcf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -78,7 +78,7 @@ static unsigned int normalized_sysctl_sched_base_slice = 750000ULL; const_debug unsigned int sysctl_sched_migration_cost = 500000UL; -int sched_thermal_decay_shift; +int sched_hw_decay_shift; static int __init setup_sched_thermal_decay_shift(char *str) { int _shift = 0; @@ -86,7 +86,7 @@ static int __init setup_sched_thermal_decay_shift(char *str) if (kstrtoint(str, 0, &_shift)) pr_warn("Unable to set scheduler thermal pressure decay shift parameter\n"); - sched_thermal_decay_shift = clamp(_shift, 0, 10); + sched_hw_decay_shift = clamp(_shift, 0, 10); return 1; } __setup("sched_thermal_decay_shift=", setup_sched_thermal_decay_shift); @@ -4969,7 +4969,7 @@ static inline unsigned long get_actual_cpu_capacity(int cpu) { unsigned long capacity = arch_scale_cpu_capacity(cpu); - capacity -= max(thermal_load_avg(cpu_rq(cpu)), cpufreq_get_pressure(cpu)); + capacity -= max(hw_load_avg(cpu_rq(cpu)), cpufreq_get_pressure(cpu)); return capacity; } @@ -5002,7 +5002,7 @@ static inline int util_fits_cpu(unsigned long util, * Similarly if a task is capped to arch_scale_cpu_capacity(little_cpu), it * should fit a little cpu even if there's some pressure. * - * Only exception is for thermal pressure since it has a direct impact + * Only exception is for HW or cpufreq pressure since it has a direct impact * on available OPP of the system. * * We honour it for uclamp_min only as a drop in performance level @@ -9324,7 +9324,7 @@ static inline bool others_have_blocked(struct rq *rq) if (cpu_util_dl(rq)) return true; - if (thermal_load_avg(rq)) + if (hw_load_avg(rq)) return true; if (cpu_util_irq(rq)) @@ -9354,7 +9354,7 @@ static bool __update_blocked_others(struct rq *rq, bool *done) { const struct sched_class *curr_class; u64 now = rq_clock_pelt(rq); - unsigned long thermal_pressure; + unsigned long hw_pressure; bool decayed; /* @@ -9363,11 +9363,11 @@ static bool __update_blocked_others(struct rq *rq, bool *done) */ curr_class = rq->curr->sched_class; - thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); + hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | - update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure) | + update_hw_load_avg(rq_clock_hw(rq), rq, hw_pressure) | update_irq_load_avg(rq, 0); if (others_have_blocked(rq)) diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 3a96da25b67c..ef00382de595 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -384,30 +384,30 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) return 0; } -#ifdef CONFIG_SCHED_THERMAL_PRESSURE +#ifdef CONFIG_SCHED_HW_PRESSURE /* - * thermal: + * hardware: * * load_sum = \Sum se->avg.load_sum but se->avg.load_sum is not tracked * * util_avg and runnable_load_avg are not supported and meaningless. * * Unlike rt/dl utilization tracking that track time spent by a cpu - * running a rt/dl task through util_avg, the average thermal pressure is - * tracked through load_avg. This is because thermal pressure signal is + * running a rt/dl task through util_avg, the average HW pressure is + * tracked through load_avg. This is because HW pressure signal is * time weighted "delta" capacity unlike util_avg which is binary. * "delta capacity" = actual capacity - - * capped capacity a cpu due to a thermal event. + * capped capacity a cpu due to a HW event. */ -int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) +int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) { - if (___update_load_sum(now, &rq->avg_thermal, + if (___update_load_sum(now, &rq->avg_hw, capacity, capacity, capacity)) { - ___update_load_avg(&rq->avg_thermal, 1); - trace_pelt_thermal_tp(rq); + ___update_load_avg(&rq->avg_hw, 1); + trace_pelt_hw_tp(rq); return 1; } diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h index 9e1083465fbc..2150062949d4 100644 --- a/kernel/sched/pelt.h +++ b/kernel/sched/pelt.h @@ -7,21 +7,21 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); -#ifdef CONFIG_SCHED_THERMAL_PRESSURE -int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); +#ifdef CONFIG_SCHED_HW_PRESSURE +int update_hw_load_avg(u64 now, struct rq *rq, u64 capacity); -static inline u64 thermal_load_avg(struct rq *rq) +static inline u64 hw_load_avg(struct rq *rq) { - return READ_ONCE(rq->avg_thermal.load_avg); + return READ_ONCE(rq->avg_hw.load_avg); } #else static inline int -update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) +update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) { return 0; } -static inline u64 thermal_load_avg(struct rq *rq) +static inline u64 hw_load_avg(struct rq *rq) { return 0; } @@ -202,12 +202,12 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) } static inline int -update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) +update_hw_load_avg(u64 now, struct rq *rq, u64 capacity) { return 0; } -static inline u64 thermal_load_avg(struct rq *rq) +static inline u64 hw_load_avg(struct rq *rq) { return 0; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7c39dbf31f75..993edb02fb0d 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1108,8 +1108,8 @@ struct rq { #ifdef CONFIG_HAVE_SCHED_AVG_IRQ struct sched_avg avg_irq; #endif -#ifdef CONFIG_SCHED_THERMAL_PRESSURE - struct sched_avg avg_thermal; +#ifdef CONFIG_SCHED_HW_PRESSURE + struct sched_avg avg_hw; #endif u64 idle_stamp; u64 avg_idle; @@ -1561,11 +1561,11 @@ static inline u64 rq_clock_task(struct rq *rq) * 3 256 * 4 512 */ -extern int sched_thermal_decay_shift; +extern int sched_hw_decay_shift; -static inline u64 rq_clock_thermal(struct rq *rq) +static inline u64 rq_clock_hw(struct rq *rq) { - return rq_clock_task(rq) >> sched_thermal_decay_shift; + return rq_clock_task(rq) >> sched_hw_decay_shift; } static inline void rq_clock_skip_update(struct rq *rq) -- cgit From c661050f93d3fd37a33c06041bb18a89688de7d2 Mon Sep 17 00:00:00 2001 From: Breno Leitao Date: Mon, 22 Apr 2024 05:38:56 -0700 Subject: net: create a dummy net_device allocator It is impossible to use init_dummy_netdev together with alloc_netdev() as the 'setup' argument. This is because alloc_netdev() initializes some fields in the net_device structure, and later init_dummy_netdev() memzero them all. This causes some problems as reported here: https://lore.kernel.org/all/20240322082336.49f110cc@kernel.org/ Split the init_dummy_netdev() function in two. Create a new function called init_dummy_netdev_core() that does not memzero the net_device structure. Then have init_dummy_netdev() memzero-ing and calling init_dummy_netdev_core(), keeping the old behaviour. init_dummy_netdev_core() is the new function that could be called as an argument for alloc_netdev(). Also, create a helper to allocate and initialize dummy net devices, leveraging init_dummy_netdev_core() as the setup argument. This function basically simplify the allocation of dummy devices, by allocating and initializing it. Freeing the device continue to be done through free_netdev() Suggested-by: Jakub Kicinski Signed-off-by: Breno Leitao Reviewed-by: Ido Schimmel Signed-off-by: David S. Miller --- include/linux/netdevice.h | 3 +++ net/core/dev.c | 56 ++++++++++++++++++++++++++++++++--------------- 2 files changed, 41 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d45f330d083d..f849e7d110ed 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4519,6 +4519,9 @@ static inline void netif_addr_unlock_bh(struct net_device *dev) void ether_setup(struct net_device *dev); +/* Allocate dummy net_device */ +struct net_device *alloc_netdev_dummy(int sizeof_priv); + /* Support for loadable net-drivers */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned char name_assign_type, diff --git a/net/core/dev.c b/net/core/dev.c index 62b39d6b1d8f..e09aa3785c15 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -10420,25 +10420,12 @@ err_free_name: } EXPORT_SYMBOL(register_netdevice); -/** - * init_dummy_netdev - init a dummy network device for NAPI - * @dev: device to init - * - * This takes a network device structure and initializes the minimum - * amount of fields so it can be used to schedule NAPI polls without - * registering a full blown interface. This is to be used by drivers - * that need to tie several hardware interfaces to a single NAPI - * poll scheduler due to HW limitations. +/* Initialize the core of a dummy net device. + * This is useful if you are calling this function after alloc_netdev(), + * since it does not memset the net_device fields. */ -void init_dummy_netdev(struct net_device *dev) +static void init_dummy_netdev_core(struct net_device *dev) { - /* Clear everything. Note we don't initialize spinlocks - * as they aren't supposed to be taken by any of the - * NAPI code and this dummy netdev is supposed to be - * only ever used for NAPI polls - */ - memset(dev, 0, sizeof(struct net_device)); - /* make sure we BUG if trying to hit standard * register/unregister code path */ @@ -10459,8 +10446,28 @@ void init_dummy_netdev(struct net_device *dev) * its refcount. */ } -EXPORT_SYMBOL_GPL(init_dummy_netdev); +/** + * init_dummy_netdev - init a dummy network device for NAPI + * @dev: device to init + * + * This takes a network device structure and initializes the minimum + * amount of fields so it can be used to schedule NAPI polls without + * registering a full blown interface. This is to be used by drivers + * that need to tie several hardware interfaces to a single NAPI + * poll scheduler due to HW limitations. + */ +void init_dummy_netdev(struct net_device *dev) +{ + /* Clear everything. Note we don't initialize spinlocks + * as they aren't supposed to be taken by any of the + * NAPI code and this dummy netdev is supposed to be + * only ever used for NAPI polls + */ + memset(dev, 0, sizeof(struct net_device)); + init_dummy_netdev_core(dev); +} +EXPORT_SYMBOL_GPL(init_dummy_netdev); /** * register_netdev - register a network device @@ -11080,6 +11087,19 @@ void free_netdev(struct net_device *dev) } EXPORT_SYMBOL(free_netdev); +/** + * alloc_netdev_dummy - Allocate and initialize a dummy net device. + * @sizeof_priv: size of private data to allocate space for + * + * Return: the allocated net_device on success, NULL otherwise + */ +struct net_device *alloc_netdev_dummy(int sizeof_priv) +{ + return alloc_netdev(sizeof_priv, "dummy#", NET_NAME_UNKNOWN, + init_dummy_netdev_core); +} +EXPORT_SYMBOL_GPL(alloc_netdev_dummy); + /** * synchronize_net - Synchronize with packet receive processing * -- cgit From e8293395b9caa57e38cc05b9102d4ec7835b0a0f Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 24 Apr 2024 10:24:20 +0800 Subject: coresight: Remove duplicate linux/amba/bus.h header ./include/linux/coresight.h: linux/amba/bus.h is included more than once. Reported-by: Abaci Robot Closes: https://bugzilla.openanolis.cn/show_bug.cgi?id=8869 Signed-off-by: Jiapeng Chong Reviewed-by: James Clark Signed-off-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20240424022420.58516-1-jiapeng.chong@linux.alibaba.com --- include/linux/coresight.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 653f1712eb77..f09ace92176e 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -12,7 +12,6 @@ #include #include #include -#include #include /* Peripheral id registers (0xFD0-0xFEC) */ -- cgit From 0efc5990bca540b8d438fda23db3a72efa733eb0 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 9 Apr 2024 19:31:50 -0700 Subject: string.h: Introduce memtostr() and memtostr_pad() Another ambiguous use of strncpy() is to copy from strings that may not be NUL-terminated. These cases depend on having the destination buffer be explicitly larger than the source buffer's maximum size, having the size of the copy exactly match the source buffer's maximum size, and for the destination buffer to get explicitly NUL terminated. This usually happens when parsing protocols or hardware character arrays that are not guaranteed to be NUL-terminated. The code pattern is effectively this: char dest[sizeof(src) + 1]; strncpy(dest, src, sizeof(src)); dest[sizeof(dest) - 1] = '\0'; In practice it usually looks like: struct from_hardware { ... char name[HW_NAME_SIZE] __nonstring; ... }; struct from_hardware *p = ...; char name[HW_NAME_SIZE + 1]; strncpy(name, p->name, HW_NAME_SIZE); name[NW_NAME_SIZE] = '\0'; This cannot be replaced with: strscpy(name, p->name, sizeof(name)); because p->name is smaller and not NUL-terminated, so FORTIFY will trigger when strnlen(p->name, sizeof(name)) is used. And it cannot be replaced with: strscpy(name, p->name, sizeof(p->name)); because then "name" may contain a 1 character early truncation of p->name. Provide an unambiguous interface for converting a maybe not-NUL-terminated string to a NUL-terminated string, with compile-time buffer size checking so that it can never fail at runtime: memtostr() and memtostr_pad(). Also add KUnit tests for both. Link: https://lore.kernel.org/r/20240410023155.2100422-1-keescook@chromium.org Signed-off-by: Kees Cook --- include/linux/string.h | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ lib/string_kunit.c | 26 ++++++++++++++++++++++++++ 2 files changed, 75 insertions(+) (limited to 'include/linux') diff --git a/include/linux/string.h b/include/linux/string.h index 9ba8b4597009..86aa6cd35167 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -422,6 +422,55 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count, memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \ } while (0) +/** + * memtostr - Copy a possibly non-NUL-term string to a NUL-term string + * @dest: Pointer to destination NUL-terminates string + * @src: Pointer to character array (likely marked as __nonstring) + * + * This is a replacement for strncpy() uses where the source is not + * a NUL-terminated string. + * + * Note that sizes of @dest and @src must be known at compile-time. + */ +#define memtostr(dest, src) do { \ + const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ + const size_t _src_chars = strnlen(src, _src_len); \ + const size_t _copy_len = min(_dest_len - 1, _src_chars); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ + !__builtin_constant_p(_src_len) || \ + _dest_len == 0 || _dest_len == (size_t)-1 || \ + _src_len == 0 || _src_len == (size_t)-1); \ + memcpy(dest, src, _copy_len); \ + dest[_copy_len] = '\0'; \ +} while (0) + +/** + * memtostr_pad - Copy a possibly non-NUL-term string to a NUL-term string + * with NUL padding in the destination + * @dest: Pointer to destination NUL-terminates string + * @src: Pointer to character array (likely marked as __nonstring) + * + * This is a replacement for strncpy() uses where the source is not + * a NUL-terminated string. + * + * Note that sizes of @dest and @src must be known at compile-time. + */ +#define memtostr_pad(dest, src) do { \ + const size_t _dest_len = __builtin_object_size(dest, 1); \ + const size_t _src_len = __builtin_object_size(src, 1); \ + const size_t _src_chars = strnlen(src, _src_len); \ + const size_t _copy_len = min(_dest_len - 1, _src_chars); \ + \ + BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \ + !__builtin_constant_p(_src_len) || \ + _dest_len == 0 || _dest_len == (size_t)-1 || \ + _src_len == 0 || _src_len == (size_t)-1); \ + memcpy(dest, src, _copy_len); \ + memset(&dest[_copy_len], 0, _dest_len - _copy_len); \ +} while (0) + /** * memset_after - Set a value after a struct member to the end of a struct * diff --git a/lib/string_kunit.c b/lib/string_kunit.c index de4eae91403f..fadad46c1b05 100644 --- a/lib/string_kunit.c +++ b/lib/string_kunit.c @@ -524,6 +524,31 @@ static void string_test_strlcat(struct kunit *test) KUNIT_EXPECT_STREQ(test, dest, "fourABE"); } +static void string_test_memtostr(struct kunit *test) +{ + char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' }; + char nonstring_small[3] = { 'a', 'b', 'c' }; + char dest[sizeof(nonstring) + 1]; + + /* Copy in a non-NUL-terminated string into exactly right-sized dest. */ + KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], 'X'); + + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring); + KUNIT_EXPECT_STREQ(test, dest, "abcdefg"); + memset(dest, 'X', sizeof(dest)); + memtostr_pad(dest, nonstring_small); + KUNIT_EXPECT_STREQ(test, dest, "abc"); + KUNIT_EXPECT_EQ(test, dest[7], '\0'); +} + static struct kunit_case string_test_cases[] = { KUNIT_CASE(string_test_memset16), KUNIT_CASE(string_test_memset32), @@ -543,6 +568,7 @@ static struct kunit_case string_test_cases[] = { KUNIT_CASE(string_test_strcat), KUNIT_CASE(string_test_strncat), KUNIT_CASE(string_test_strlcat), + KUNIT_CASE(string_test_memtostr), {} }; -- cgit From fc7566ad0a826cdc8886c5dbbb39ce72a0dc6333 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Wed, 24 Apr 2024 03:13:14 +0000 Subject: bpf: Introduce bpf_preempt_[disable,enable] kfuncs Introduce two new BPF kfuncs, bpf_preempt_disable and bpf_preempt_enable. These kfuncs allow disabling preemption in BPF programs. Nesting is allowed, since the intended use cases includes building native BPF spin locks without kernel helper involvement. Apart from that, this can be used to per-CPU data structures for cases where programs (or userspace) may preempt one or the other. Currently, while per-CPU access is stable, whether it will be consistent is not guaranteed, as only migration is disabled for BPF programs. Global functions are disallowed from being called, but support for them will be added as a follow up not just preempt kfuncs, but rcu_read_lock kfuncs as well. Static subprog calls are permitted. Sleepable helpers and kfuncs are disallowed in non-preemptible regions. Signed-off-by: Kumar Kartikeya Dwivedi Link: https://lore.kernel.org/r/20240424031315.2757363-2-memxor@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 1 + kernel/bpf/helpers.c | 12 ++++++++ kernel/bpf/verifier.c | 71 ++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 82 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 9db35530c878..50aa87f8d77f 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -421,6 +421,7 @@ struct bpf_verifier_state { struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; + u32 active_preempt_lock; /* If this state was ever pointed-to by other state's loop_entry field * this flag would be set to true. Used to avoid freeing such states * while they are still in use. diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 5f61ffa7505a..14c401b78e12 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2734,6 +2734,16 @@ __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, return __bpf_async_set_callback(async, callback_fn, aux, flags, BPF_ASYNC_TYPE_WQ); } +__bpf_kfunc void bpf_preempt_disable(void) +{ + preempt_disable(); +} + +__bpf_kfunc void bpf_preempt_enable(void) +{ + preempt_enable(); +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) @@ -2814,6 +2824,8 @@ BTF_ID_FLAGS(func, bpf_modify_return_test_tp) BTF_ID_FLAGS(func, bpf_wq_init) BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) BTF_ID_FLAGS(func, bpf_wq_start) +BTF_ID_FLAGS(func, bpf_preempt_disable) +BTF_ID_FLAGS(func, bpf_preempt_enable) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 9715c88cc025..8312c7a0ee19 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1434,6 +1434,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, } dst_state->speculative = src->speculative; dst_state->active_rcu_lock = src->active_rcu_lock; + dst_state->active_preempt_lock = src->active_preempt_lock; dst_state->in_sleepable = src->in_sleepable; dst_state->curframe = src->curframe; dst_state->active_lock.ptr = src->active_lock.ptr; @@ -9599,6 +9600,13 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EINVAL; } + /* Only global subprogs cannot be called with preemption disabled. */ + if (env->cur_state->active_preempt_lock) { + verbose(env, "global function calls are not allowed with preemption disabled,\n" + "use static function instead\n"); + return -EINVAL; + } + if (err) { verbose(env, "Caller passes invalid args into func#%d ('%s')\n", subprog, sub_name); @@ -10285,6 +10293,17 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn env->insn_aux_data[insn_idx].storage_get_func_atomic = true; } + if (env->cur_state->active_preempt_lock) { + if (fn->might_sleep) { + verbose(env, "sleepable helper %s#%d in non-preemptible region\n", + func_id_name(func_id), func_id); + return -EINVAL; + } + + if (in_sleepable(env) && is_storage_get_function(func_id)) + env->insn_aux_data[insn_idx].storage_get_func_atomic = true; + } + meta.func_id = func_id; /* check args */ for (i = 0; i < MAX_BPF_FUNC_REG_ARGS; i++) { @@ -11027,6 +11046,8 @@ enum special_kfunc_type { KF_bpf_percpu_obj_drop_impl, KF_bpf_throw, KF_bpf_wq_set_callback_impl, + KF_bpf_preempt_disable, + KF_bpf_preempt_enable, KF_bpf_iter_css_task_new, }; @@ -11081,6 +11102,8 @@ BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_throw) BTF_ID(func, bpf_wq_set_callback_impl) +BTF_ID(func, bpf_preempt_disable) +BTF_ID(func, bpf_preempt_enable) #ifdef CONFIG_CGROUPS BTF_ID(func, bpf_iter_css_task_new) #else @@ -11107,6 +11130,16 @@ static bool is_kfunc_bpf_rcu_read_unlock(struct bpf_kfunc_call_arg_meta *meta) return meta->func_id == special_kfunc_list[KF_bpf_rcu_read_unlock]; } +static bool is_kfunc_bpf_preempt_disable(struct bpf_kfunc_call_arg_meta *meta) +{ + return meta->func_id == special_kfunc_list[KF_bpf_preempt_disable]; +} + +static bool is_kfunc_bpf_preempt_enable(struct bpf_kfunc_call_arg_meta *meta) +{ + return meta->func_id == special_kfunc_list[KF_bpf_preempt_enable]; +} + static enum kfunc_ptr_arg_type get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, @@ -12195,11 +12228,11 @@ static int check_return_code(struct bpf_verifier_env *env, int regno, const char static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx_p) { - const struct btf_type *t, *ptr_type; + bool sleepable, rcu_lock, rcu_unlock, preempt_disable, preempt_enable; u32 i, nargs, ptr_type_id, release_ref_obj_id; struct bpf_reg_state *regs = cur_regs(env); const char *func_name, *ptr_type_name; - bool sleepable, rcu_lock, rcu_unlock; + const struct btf_type *t, *ptr_type; struct bpf_kfunc_call_arg_meta meta; struct bpf_insn_aux_data *insn_aux; int err, insn_idx = *insn_idx_p; @@ -12260,6 +12293,9 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, rcu_lock = is_kfunc_bpf_rcu_read_lock(&meta); rcu_unlock = is_kfunc_bpf_rcu_read_unlock(&meta); + preempt_disable = is_kfunc_bpf_preempt_disable(&meta); + preempt_enable = is_kfunc_bpf_preempt_enable(&meta); + if (env->cur_state->active_rcu_lock) { struct bpf_func_state *state; struct bpf_reg_state *reg; @@ -12292,6 +12328,22 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return -EINVAL; } + if (env->cur_state->active_preempt_lock) { + if (preempt_disable) { + env->cur_state->active_preempt_lock++; + } else if (preempt_enable) { + env->cur_state->active_preempt_lock--; + } else if (sleepable) { + verbose(env, "kernel func %s is sleepable within non-preemptible region\n", func_name); + return -EACCES; + } + } else if (preempt_disable) { + env->cur_state->active_preempt_lock++; + } else if (preempt_enable) { + verbose(env, "unmatched attempt to enable preemption (kernel function %s)\n", func_name); + return -EINVAL; + } + /* In case of release function, we get register number of refcounted * PTR_TO_BTF_ID in bpf_kfunc_arg_meta, do the release now. */ @@ -15439,6 +15491,11 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) return -EINVAL; } + if (env->cur_state->active_preempt_lock) { + verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_preempt_disable-ed region\n"); + return -EINVAL; + } + if (regs[ctx_reg].type != PTR_TO_CTX) { verbose(env, "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); @@ -17006,6 +17063,9 @@ static bool states_equal(struct bpf_verifier_env *env, if (old->active_rcu_lock != cur->active_rcu_lock) return false; + if (old->active_preempt_lock != cur->active_preempt_lock) + return false; + if (old->in_sleepable != cur->in_sleepable) return false; @@ -17957,6 +18017,13 @@ process_bpf_exit_full: return -EINVAL; } + if (env->cur_state->active_preempt_lock && !env->cur_state->curframe) { + verbose(env, "%d bpf_preempt_enable%s missing\n", + env->cur_state->active_preempt_lock, + env->cur_state->active_preempt_lock == 1 ? " is" : "(s) are"); + return -EINVAL; + } + /* We must do check_reference_leak here before * prepare_func_exit to handle the case when * state->curframe > 0, it may be a callback -- cgit From 306ec721d043bbe5e818d59fbb37c28d999b5d8b Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 18 Apr 2024 13:36:07 +0200 Subject: net: intel: introduce {, Intel} Ethernet common library Not a secret there's a ton of code duplication between two and more Intel ethernet modules. Before introducing new changes, which would need to be copied over again, start decoupling the already existing duplicate functionality into a new module, which will be shared between several Intel Ethernet drivers. Add the lookup table which converts 8/10-bit hardware packet type into a parsed bitfield structure for easy checking packet format parameters, such as payload level, IP version, etc. This is currently used by i40e, ice and iavf and it's all the same in all three drivers. The only difference introduced in this implementation is that instead of defining a 256 (or 1024 in case of ice) element array, add unlikely() condition to limit the input to 154 (current maximum non-reserved packet type). There's no reason to waste 600 (or even 3600) bytes only to not hurt very unlikely exception packets. The hash computation function now takes payload level directly as a pkt_hash_type. There's a couple cases when non-IP ptypes are marked as L3 payload and in the previous versions their hash level would be 2, not 3. But skb_set_hash() only sees difference between L4 and non-L4, thus this won't change anything at all. The module is behind the hidden Kconfig symbol, which the drivers will select when needed. The exports are behind 'LIBIE' namespace to limit the scope of the functions. Not that non-HW-specific symbols will live in yet another module, libeth. This is done to easily distinguish pretty generic code ready for reusing by any other vendor and/or for moving the layer up from the code useful in Intel's 1-100G drivers only. Signed-off-by: Alexander Lobakin Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/Kconfig | 7 + drivers/net/ethernet/intel/Makefile | 3 + drivers/net/ethernet/intel/i40e/i40e_common.c | 253 ------------------ drivers/net/ethernet/intel/i40e/i40e_main.c | 1 + drivers/net/ethernet/intel/i40e/i40e_prototype.h | 7 - drivers/net/ethernet/intel/i40e/i40e_txrx.c | 72 ++--- drivers/net/ethernet/intel/i40e/i40e_type.h | 88 ------- drivers/net/ethernet/intel/iavf/iavf_common.c | 253 ------------------ drivers/net/ethernet/intel/iavf/iavf_main.c | 1 + drivers/net/ethernet/intel/iavf/iavf_prototype.h | 7 - drivers/net/ethernet/intel/iavf/iavf_txrx.c | 70 ++--- drivers/net/ethernet/intel/iavf/iavf_type.h | 88 ------- drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h | 320 ----------------------- drivers/net/ethernet/intel/ice/ice_main.c | 1 + drivers/net/ethernet/intel/ice/ice_txrx_lib.c | 111 ++------ drivers/net/ethernet/intel/libeth/Kconfig | 8 + drivers/net/ethernet/intel/libeth/Makefile | 6 + drivers/net/ethernet/intel/libeth/rx.c | 52 ++++ drivers/net/ethernet/intel/libie/Kconfig | 10 + drivers/net/ethernet/intel/libie/Makefile | 6 + drivers/net/ethernet/intel/libie/rx.c | 124 +++++++++ include/linux/net/intel/libie/rx.h | 33 +++ include/net/libeth/rx.h | 125 +++++++++ 23 files changed, 426 insertions(+), 1220 deletions(-) create mode 100644 drivers/net/ethernet/intel/libeth/Kconfig create mode 100644 drivers/net/ethernet/intel/libeth/Makefile create mode 100644 drivers/net/ethernet/intel/libeth/rx.c create mode 100644 drivers/net/ethernet/intel/libie/Kconfig create mode 100644 drivers/net/ethernet/intel/libie/Makefile create mode 100644 drivers/net/ethernet/intel/libie/rx.c create mode 100644 include/linux/net/intel/libie/rx.h create mode 100644 include/net/libeth/rx.h (limited to 'include/linux') diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 6e7901e12699..e0287fbd501d 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -16,6 +16,9 @@ config NET_VENDOR_INTEL if NET_VENDOR_INTEL +source "drivers/net/ethernet/intel/libeth/Kconfig" +source "drivers/net/ethernet/intel/libie/Kconfig" + config E100 tristate "Intel(R) PRO/100+ support" depends on PCI @@ -225,6 +228,7 @@ config I40E depends on PTP_1588_CLOCK_OPTIONAL depends on PCI select AUXILIARY_BUS + select LIBIE select NET_DEVLINK help This driver supports Intel(R) Ethernet Controller XL710 Family of @@ -253,6 +257,8 @@ config I40E_DCB # so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF config IAVF tristate + select LIBIE + config I40EVF tristate "Intel(R) Ethernet Adaptive Virtual Function support" select IAVF @@ -283,6 +289,7 @@ config ICE depends on GNSS || GNSS = n select AUXILIARY_BUS select DIMLIB + select LIBIE select NET_DEVLINK select PLDMFW select DPLL diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile index dacb481ee5b1..04c844ef4964 100644 --- a/drivers/net/ethernet/intel/Makefile +++ b/drivers/net/ethernet/intel/Makefile @@ -3,6 +3,9 @@ # Makefile for the Intel network device drivers. # +obj-$(CONFIG_LIBETH) += libeth/ +obj-$(CONFIG_LIBIE) += libie/ + obj-$(CONFIG_E100) += e100.o obj-$(CONFIG_E1000) += e1000/ obj-$(CONFIG_E1000E) += e1000e/ diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index de6ca6295742..e8031f1a9b4f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -381,259 +381,6 @@ int i40e_aq_set_rss_key(struct i40e_hw *hw, return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } -/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT i40e_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum i40e_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - I40E_RX_PTYPE_##OUTER_FRAG, \ - I40E_RX_PTYPE_TUNNEL_##T, \ - I40E_RX_PTYPE_TUNNEL_END_##TE, \ - I40E_RX_PTYPE_##TEF, \ - I40E_RX_PTYPE_INNER_PROT_##I, \ - I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG -#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG -#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ -struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { - /* L2 Packet types */ - I40E_PTT_UNUSED_ENTRY(0), - I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(4), - I40E_PTT_UNUSED_ENTRY(5), - I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(8), - I40E_PTT_UNUSED_ENTRY(9), - I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(25), - I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(32), - I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(39), - I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(47), - I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(54), - I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(62), - I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(69), - I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(77), - I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(84), - I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(91), - I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(98), - I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(105), - I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(113), - I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(120), - I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(128), - I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(135), - I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(143), - I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(150), - I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0bdcdea0be3e..164afd8ce5a0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -100,6 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); static struct workqueue_struct *i40e_wq; diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index ce1f11b8ad65..5a0699ca7ce5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); int i40e_set_mac_type(struct i40e_hw *hw); -extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; - -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return i40e_ptype_lookup[ptype]; -} - /** * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition * @link_speed: the speed to convert diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 64d198ed166b..bc9e766d88cb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include +#include #include #include #include @@ -1741,38 +1742,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); - rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); - decoded = decode_rx_desc_ptype(ptype); - skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); - /* Rx csum enabled and ip headers found? */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) return; + rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); + /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* both known and outer_ip must be set for the below code to work */ - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -1800,49 +1793,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case I40E_RX_PTYPE_INNER_PROT_TCP: - case I40E_RX_PTYPE_INNER_PROT_UDP: - case I40E_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - fallthrough; - default: - break; - } - + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: vsi->back->hw_csum_rx_error++; } -/** - * i40e_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - **/ -static inline int i40e_ptype_to_htype(u8 ptype) -{ - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - else - return PKT_HASH_TYPE_L2; -} - /** * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring @@ -1855,17 +1815,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, struct sk_buff *skb, u8 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (!(ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index d9031499697e..28568e126850 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks { #define I40E_RXD_QW1_PTYPE_SHIFT 30 #define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) -/* Packet type non-ip values */ -enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct i40e_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum i40e_rx_ptype_outer_ip { - I40E_RX_PTYPE_OUTER_L2 = 0, - I40E_RX_PTYPE_OUTER_IP = 1 -}; - -enum i40e_rx_ptype_outer_ip_ver { - I40E_RX_PTYPE_OUTER_NONE = 0, - I40E_RX_PTYPE_OUTER_IPV4 = 0, - I40E_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum i40e_rx_ptype_outer_fragmented { - I40E_RX_PTYPE_NOT_FRAG = 0, - I40E_RX_PTYPE_FRAG = 1 -}; - -enum i40e_rx_ptype_tunnel_type { - I40E_RX_PTYPE_TUNNEL_NONE = 0, - I40E_RX_PTYPE_TUNNEL_IP_IP = 1, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum i40e_rx_ptype_tunnel_end_prot { - I40E_RX_PTYPE_TUNNEL_END_NONE = 0, - I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, - I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum i40e_rx_ptype_inner_prot { - I40E_RX_PTYPE_INNER_PROT_NONE = 0, - I40E_RX_PTYPE_INNER_PROT_UDP = 1, - I40E_RX_PTYPE_INNER_PROT_TCP = 2, - I40E_RX_PTYPE_INNER_PROT_SCTP = 3, - I40E_RX_PTYPE_INNER_PROT_ICMP = 4, - I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum i40e_rx_ptype_payload_layer { - I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - #define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c index 5a25233a89d5..aa751ce3425b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_common.c +++ b/drivers/net/ethernet/intel/iavf/iavf_common.c @@ -432,259 +432,6 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id, return iavf_aq_get_set_rss_key(hw, vsi_id, key, true); } -/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT iavf_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum iavf_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - IAVF_RX_PTYPE_OUTER_##OUTER_IP, \ - IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - IAVF_RX_PTYPE_##OUTER_FRAG, \ - IAVF_RX_PTYPE_TUNNEL_##T, \ - IAVF_RX_PTYPE_TUNNEL_END_##TE, \ - IAVF_RX_PTYPE_##TEF, \ - IAVF_RX_PTYPE_INNER_PROT_##I, \ - IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG -#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG -#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */ -struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = { - /* L2 Packet types */ - IAVF_PTT_UNUSED_ENTRY(0), - IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT_UNUSED_ENTRY(4), - IAVF_PTT_UNUSED_ENTRY(5), - IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT_UNUSED_ENTRY(8), - IAVF_PTT_UNUSED_ENTRY(9), - IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(25), - IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(32), - IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(39), - IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(47), - IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(54), - IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(62), - IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(69), - IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(77), - IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(84), - IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(91), - IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(98), - IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(105), - IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(113), - IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(120), - IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(128), - IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(135), - IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(143), - IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - IAVF_PTT_UNUSED_ENTRY(150), - IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - /** * iavf_aq_send_msg_to_pf * @hw: pointer to the hardware structure diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 13361a780ece..d6cbe5022815 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -45,6 +45,7 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); static const struct net_device_ops iavf_netdev_ops; diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h index 4a48e6171405..48c3901381b4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h +++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h @@ -45,13 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid, enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid, struct iavf_aqc_get_set_rss_key_data *key); -extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[]; - -static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return iavf_ptype_lookup[ptype]; -} - void iavf_vf_parse_hw_config(struct iavf_hw *hw, struct virtchnl_vf_resource *msg); enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw, diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 32bb604a1382..33ec01d0ed67 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include +#include #include #include "iavf.h" @@ -982,38 +983,30 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, struct sk_buff *skb, union iavf_rx_desc *rx_desc) { - struct iavf_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); - decoded = decode_rx_desc_ptype(ptype); - skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword); - /* Rx csum enabled and ip headers found? */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) return; + rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword); + /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* both known and outer_ip must be set for the below code to work */ - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) | @@ -1037,46 +1030,13 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi, if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT)) return; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case IAVF_RX_PTYPE_INNER_PROT_TCP: - case IAVF_RX_PTYPE_INNER_PROT_UDP: - case IAVF_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - fallthrough; - default: - break; - } - + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: vsi->back->hw_csum_rx_error++; } -/** - * iavf_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - **/ -static int iavf_ptype_to_htype(u8 ptype) -{ - struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - - if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && - decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP && - decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - else - return PKT_HASH_TYPE_L2; -} - /** * iavf_rx_hash - set the hash value in the skb * @ring: descriptor ring @@ -1089,17 +1049,19 @@ static void iavf_rx_hash(struct iavf_ring *ring, struct sk_buff *skb, u8 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; const __le64 rss_mask = cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH << IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (!(ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h index 2b6a207fa441..23ded4fcd94f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_type.h +++ b/drivers/net/ethernet/intel/iavf/iavf_type.h @@ -327,94 +327,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks { #define IAVF_RXD_QW1_PTYPE_SHIFT 30 #define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT) -/* Packet type non-ip values */ -enum iavf_rx_l2_ptype { - IAVF_RX_PTYPE_L2_RESERVED = 0, - IAVF_RX_PTYPE_L2_MAC_PAY2 = 1, - IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - IAVF_RX_PTYPE_L2_FIP_PAY2 = 3, - IAVF_RX_PTYPE_L2_OUI_PAY2 = 4, - IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6, - IAVF_RX_PTYPE_L2_ECP_PAY2 = 7, - IAVF_RX_PTYPE_L2_EVB_PAY2 = 8, - IAVF_RX_PTYPE_L2_QCN_PAY2 = 9, - IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10, - IAVF_RX_PTYPE_L2_ARP = 11, - IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12, - IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct iavf_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum iavf_rx_ptype_outer_ip { - IAVF_RX_PTYPE_OUTER_L2 = 0, - IAVF_RX_PTYPE_OUTER_IP = 1 -}; - -enum iavf_rx_ptype_outer_ip_ver { - IAVF_RX_PTYPE_OUTER_NONE = 0, - IAVF_RX_PTYPE_OUTER_IPV4 = 0, - IAVF_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum iavf_rx_ptype_outer_fragmented { - IAVF_RX_PTYPE_NOT_FRAG = 0, - IAVF_RX_PTYPE_FRAG = 1 -}; - -enum iavf_rx_ptype_tunnel_type { - IAVF_RX_PTYPE_TUNNEL_NONE = 0, - IAVF_RX_PTYPE_TUNNEL_IP_IP = 1, - IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum iavf_rx_ptype_tunnel_end_prot { - IAVF_RX_PTYPE_TUNNEL_END_NONE = 0, - IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1, - IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum iavf_rx_ptype_inner_prot { - IAVF_RX_PTYPE_INNER_PROT_NONE = 0, - IAVF_RX_PTYPE_INNER_PROT_UDP = 1, - IAVF_RX_PTYPE_INNER_PROT_TCP = 2, - IAVF_RX_PTYPE_INNER_PROT_SCTP = 3, - IAVF_RX_PTYPE_INNER_PROT_ICMP = 4, - IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum iavf_rx_ptype_payload_layer { - IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - #define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ IAVF_RXD_QW1_LENGTH_PBUF_SHIFT) diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index d384ddfcb83e..611577ebc29d 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -160,64 +160,6 @@ struct ice_fltr_desc { (0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S) #define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL -struct ice_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:2; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum ice_rx_ptype_outer_ip { - ICE_RX_PTYPE_OUTER_L2 = 0, - ICE_RX_PTYPE_OUTER_IP = 1, -}; - -enum ice_rx_ptype_outer_ip_ver { - ICE_RX_PTYPE_OUTER_NONE = 0, - ICE_RX_PTYPE_OUTER_IPV4 = 1, - ICE_RX_PTYPE_OUTER_IPV6 = 2, -}; - -enum ice_rx_ptype_outer_fragmented { - ICE_RX_PTYPE_NOT_FRAG = 0, - ICE_RX_PTYPE_FRAG = 1, -}; - -enum ice_rx_ptype_tunnel_type { - ICE_RX_PTYPE_TUNNEL_NONE = 0, - ICE_RX_PTYPE_TUNNEL_IP_IP = 1, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum ice_rx_ptype_tunnel_end_prot { - ICE_RX_PTYPE_TUNNEL_END_NONE = 0, - ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1, - ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum ice_rx_ptype_inner_prot { - ICE_RX_PTYPE_INNER_PROT_NONE = 0, - ICE_RX_PTYPE_INNER_PROT_UDP = 1, - ICE_RX_PTYPE_INNER_PROT_TCP = 2, - ICE_RX_PTYPE_INNER_PROT_SCTP = 3, - ICE_RX_PTYPE_INNER_PROT_ICMP = 4, - ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5, -}; - -enum ice_rx_ptype_payload_layer { - ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - /* Rx Flex Descriptor * This descriptor is used instead of the legacy version descriptor when * ice_rlan_ctx.adv_desc is set @@ -651,266 +593,4 @@ struct ice_tlan_ctx { u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */ }; -/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT ice_ptype_lkup[ptype].known - * THEN - * Packet is unknown - * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum ice_rx_l2_ptype to decode the packet type - * ENDIF - */ -#define ICE_PTYPES \ - /* L2 Packet types */ \ - ICE_PTT_UNUSED_ENTRY(0), \ - ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \ - ICE_PTT_UNUSED_ENTRY(2), \ - ICE_PTT_UNUSED_ENTRY(3), \ - ICE_PTT_UNUSED_ENTRY(4), \ - ICE_PTT_UNUSED_ENTRY(5), \ - ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(8), \ - ICE_PTT_UNUSED_ENTRY(9), \ - ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \ - ICE_PTT_UNUSED_ENTRY(12), \ - ICE_PTT_UNUSED_ENTRY(13), \ - ICE_PTT_UNUSED_ENTRY(14), \ - ICE_PTT_UNUSED_ENTRY(15), \ - ICE_PTT_UNUSED_ENTRY(16), \ - ICE_PTT_UNUSED_ENTRY(17), \ - ICE_PTT_UNUSED_ENTRY(18), \ - ICE_PTT_UNUSED_ENTRY(19), \ - ICE_PTT_UNUSED_ENTRY(20), \ - ICE_PTT_UNUSED_ENTRY(21), \ - \ - /* Non Tunneled IPv4 */ \ - ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(25), \ - ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv4 */ \ - ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(32), \ - ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> IPv6 */ \ - ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(39), \ - ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT */ \ - ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> IPv4 */ \ - ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(47), \ - ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> IPv6 */ \ - ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(54), \ - ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC */ \ - ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \ - ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(62), \ - ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \ - ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(69), \ - ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv4 --> GRE/NAT --> MAC/VLAN */ \ - ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(77), \ - ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(84), \ - ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \ - \ - /* Non Tunneled IPv6 */ \ - ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \ - ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(91), \ - ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \ - ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \ - ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv4 */ \ - ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(98), \ - ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> IPv6 */ \ - ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(105), \ - ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT */ \ - ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> IPv4 */ \ - ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(113), \ - ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> IPv6 */ \ - ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(120), \ - ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC */ \ - ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \ - ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(128), \ - ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \ - ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(135), \ - ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN */ \ - ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \ - ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \ - ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \ - ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(143), \ - ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \ - ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \ - ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \ - \ - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \ - ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \ - ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \ - ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \ - ICE_PTT_UNUSED_ENTRY(150), \ - ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \ - ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \ - ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - -#define ICE_NUM_DEFINED_PTYPES 154 - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP, \ - ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - ICE_RX_PTYPE_##OUTER_FRAG, \ - ICE_RX_PTYPE_TUNNEL_##T, \ - ICE_RX_PTYPE_TUNNEL_END_##TE, \ - ICE_RX_PTYPE_##TEF, \ - ICE_RX_PTYPE_INNER_PROT_##I, \ - ICE_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG -#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG - -/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */ -static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = { - ICE_PTYPES - - /* unused entries */ - [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - -static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype) -{ - return ice_ptype_lkup[ptype]; -} - - #endif /* _ICE_LAN_TX_RX_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 2b173638b877..f55b57daec83 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -37,6 +37,7 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(ICE_DDP_PKG_FILE); diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index df072ce767b1..2719f0e20933 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -2,6 +2,7 @@ /* Copyright (c) 2019, Intel Corporation. */ #include +#include #include "ice_txrx_lib.h" #include "ice_eswitch.h" @@ -38,30 +39,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val) } } -/** - * ice_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by - * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of - * Rx desc. - */ -static enum pkt_hash_types ice_ptype_to_htype(u16 ptype) -{ - struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2) - return PKT_HASH_TYPE_L2; - - return PKT_HASH_TYPE_NONE; -} - /** * ice_get_rx_hash - get RX hash value from descriptor * @rx_desc: specific descriptor @@ -91,14 +68,16 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring, const union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb, u16 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; - if (!(rx_ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded)) return; hash = ice_get_rx_hash(rx_desc); if (likely(hash)) - skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } /** @@ -114,34 +93,26 @@ static void ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, union ice_32b_rx_flex_desc *rx_desc, u16 ptype) { - struct ice_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u16 rx_status0, rx_status1; bool ipv4, ipv6; - rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); - rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); - - decoded = ice_decode_rx_desc_ptype(ptype); - /* Start with CHECKSUM_NONE and by default csum_level = 0 */ skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); - /* check if Rx checksum is enabled */ - if (!(ring->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(ring->netdev, decoded)) return; + rx_status0 = le16_to_cpu(rx_desc->wb.status_error0); + rx_status1 = le16_to_cpu(rx_desc->wb.status_error1); + /* check if HW has decoded the packet and checksum */ if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S))) return; - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) { ring->vsi->back->hw_rx_eipe_error++; @@ -169,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb, * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case ICE_RX_PTYPE_INNER_PROT_TCP: - case ICE_RX_PTYPE_INNER_PROT_UDP: - case ICE_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - break; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -536,42 +498,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns) return 0; } -/* Define a ptype index -> XDP hash type lookup table. - * It uses the same ptype definitions as ice_decode_rx_desc_ptype[], - * avoiding possible copy-paste errors. - */ -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - -#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL - -#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0 - -/* A few supplementary definitions for when XDP hash types do not coincide - * with what can be generated from ptype definitions - * by means of preprocessor concatenation. - */ -#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2 -#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE -#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4 - -static const enum xdp_rss_hash_type -ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { - ICE_PTYPES -}; - -#undef XDP_RSS_L3_NONE -#undef XDP_RSS_L4_NONE -#undef XDP_RSS_TYPE_PAY2 -#undef XDP_RSS_TYPE_PAY3 -#undef XDP_RSS_TYPE_PAY4 - -#undef ICE_PTT -#undef ICE_PTT_UNUSED_ENTRY - /** * ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor * @eop_desc: End of Packet descriptor @@ -579,12 +505,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = { static enum xdp_rss_hash_type ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc) { - u16 ptype = ice_get_ptype(eop_desc); - - if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES)) - return 0; - - return ice_ptype_to_xdp_hash[ptype]; + return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type; } /** diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig new file mode 100644 index 000000000000..af970a63c227 --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/Kconfig @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +config LIBETH + tristate + help + libeth is a common library containing routines shared between several + drivers, but not yet promoted to the generic kernel API. diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile new file mode 100644 index 000000000000..cb99203d1dd2 --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +obj-$(CONFIG_LIBETH) += libeth.o + +libeth-objs += rx.o diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c new file mode 100644 index 000000000000..879c4dfd6a4e --- /dev/null +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include + +/* Converting abstract packet type numbers into a software structure with + * the packet parameters to do O(1) lookup on Rx. + */ + +static const u16 libeth_rx_pt_xdp_oip[] = { + [LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4, + [LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6, +}; + +static const u16 libeth_rx_pt_xdp_iprot[] = { + [LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP, + [LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP, + [LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP, + [LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP, + [LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE, +}; + +static const u16 libeth_rx_pt_xdp_pl[] = { + [LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE, + [LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4, +}; + +/** + * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT + * @pt: PT structure to evaluate + * + * Generates ```hash_type``` field with XDP RSS type values from the parsed + * packet parameters if they're obtained dynamically at runtime. + */ +void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt) +{ + pt->hash_type = 0; + pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip]; + pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot]; + pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer]; +} +EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH); + +/* Module */ + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Common Ethernet library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig new file mode 100644 index 000000000000..33aff6bc8f81 --- /dev/null +++ b/drivers/net/ethernet/intel/libie/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +config LIBIE + tristate + select LIBETH + help + libie (Intel Ethernet library) is a common library built on top of + libeth and containing vendor-specific routines shared between several + Intel Ethernet drivers. diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile new file mode 100644 index 000000000000..bf42c5aeeedd --- /dev/null +++ b/drivers/net/ethernet/intel/libie/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +obj-$(CONFIG_LIBIE) += libie.o + +libie-objs += rx.o diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c new file mode 100644 index 000000000000..38201ee1e891 --- /dev/null +++ b/drivers/net/ethernet/intel/libie/rx.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2024 Intel Corporation */ + +#include + +/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed + * bitfield struct. + */ + +/* A few supplementary definitions for when XDP hash types do not coincide + * with what can be generated from ptype definitions by means of preprocessor + * concatenation. + */ +#define XDP_RSS_L3_L2 XDP_RSS_TYPE_NONE +#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE +#define XDP_RSS_L4_TIMESYNC XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_L3 XDP_RSS_TYPE_NONE +#define XDP_RSS_TYPE_L4 XDP_RSS_L4 + +#define LIBIE_RX_PT(oip, ofrag, tun, tp, tefr, iprot, pl) { \ + .outer_ip = LIBETH_RX_PT_OUTER_##oip, \ + .outer_frag = LIBETH_RX_PT_##ofrag, \ + .tunnel_type = LIBETH_RX_PT_TUNNEL_IP_##tun, \ + .tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_##tp, \ + .tunnel_end_frag = LIBETH_RX_PT_##tefr, \ + .inner_prot = LIBETH_RX_PT_INNER_##iprot, \ + .payload_layer = LIBETH_RX_PT_PAYLOAD_##pl, \ + .hash_type = XDP_RSS_L3_##oip | \ + XDP_RSS_L4_##iprot | \ + XDP_RSS_TYPE_##pl, \ + } + +#define LIBIE_RX_PT_UNUSED { } + +#define __LIBIE_RX_PT_L2(iprot, pl) \ + LIBIE_RX_PT(L2, NOT_FRAG, NONE, NONE, NOT_FRAG, iprot, pl) +#define LIBIE_RX_PT_L2 __LIBIE_RX_PT_L2(NONE, L2) +#define LIBIE_RX_PT_TS __LIBIE_RX_PT_L2(TIMESYNC, L2) +#define LIBIE_RX_PT_L3 __LIBIE_RX_PT_L2(NONE, L3) + +#define LIBIE_RX_PT_IP_FRAG(oip) \ + LIBIE_RX_PT(IPV##oip, FRAG, NONE, NONE, NOT_FRAG, NONE, L3) +#define LIBIE_RX_PT_IP_L3(oip, tun, teprot, tefr) \ + LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, tefr, NONE, L3) +#define LIBIE_RX_PT_IP_L4(oip, tun, teprot, iprot) \ + LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, NOT_FRAG, iprot, L4) + +#define LIBIE_RX_PT_IP_NOF(oip, tun, ver) \ + LIBIE_RX_PT_IP_L3(oip, tun, ver, NOT_FRAG), \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, UDP), \ + LIBIE_RX_PT_UNUSED, \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, TCP), \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, SCTP), \ + LIBIE_RX_PT_IP_L4(oip, tun, ver, ICMP) + +/* IPv oip --> tun --> IPv ver */ +#define LIBIE_RX_PT_IP_TUN_VER(oip, tun, ver) \ + LIBIE_RX_PT_IP_L3(oip, tun, ver, FRAG), \ + LIBIE_RX_PT_IP_NOF(oip, tun, ver) + +/* Non Tunneled IPv oip */ +#define LIBIE_RX_PT_IP_RAW(oip) \ + LIBIE_RX_PT_IP_FRAG(oip), \ + LIBIE_RX_PT_IP_NOF(oip, NONE, NONE) + +/* IPv oip --> tun --> { IPv4, IPv6 } */ +#define LIBIE_RX_PT_IP_TUN(oip, tun) \ + LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV4), \ + LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV6) + +/* IPv oip --> GRE/NAT tun --> { x, IPv4, IPv6 } */ +#define LIBIE_RX_PT_IP_GRE(oip, tun) \ + LIBIE_RX_PT_IP_L3(oip, tun, NONE, NOT_FRAG), \ + LIBIE_RX_PT_IP_TUN(oip, tun) + +/* Non Tunneled IPv oip + * IPv oip --> { IPv4, IPv6 } + * IPv oip --> GRE/NAT --> { x, IPv4, IPv6 } + * IPv oip --> GRE/NAT --> MAC --> { x, IPv4, IPv6 } + * IPv oip --> GRE/NAT --> MAC/VLAN --> { x, IPv4, IPv6 } + */ +#define LIBIE_RX_PT_IP(oip) \ + LIBIE_RX_PT_IP_RAW(oip), \ + LIBIE_RX_PT_IP_TUN(oip, IP), \ + LIBIE_RX_PT_IP_GRE(oip, GRENAT), \ + LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC), \ + LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC_VLAN) + +/* Lookup table mapping for O(1) parsing */ +const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = { + /* L2 packet types */ + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_TS, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_UNUSED, + LIBIE_RX_PT_L2, + LIBIE_RX_PT_UNUSED, + + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + LIBIE_RX_PT_L3, + + LIBIE_RX_PT_IP(4), + LIBIE_RX_PT_IP(6), +}; +EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("Intel(R) Ethernet common library"); +MODULE_IMPORT_NS(LIBETH); +MODULE_LICENSE("GPL"); diff --git a/include/linux/net/intel/libie/rx.h b/include/linux/net/intel/libie/rx.h new file mode 100644 index 000000000000..37c5c8f26fb9 --- /dev/null +++ b/include/linux/net/intel/libie/rx.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef __LIBIE_RX_H +#define __LIBIE_RX_H + +#include + +/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed + * bitfield struct. + */ + +#define LIBIE_RX_PT_NUM 154 + +extern const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM]; + +/** + * libie_rx_pt_parse - convert HW packet type to software bitfield structure + * @pt: 10-bit hardware packet type value from the descriptor + * + * ```libie_rx_pt_lut``` must be accessed only using this wrapper. + * + * Return: parsed bitfield struct corresponding to the provided ptype. + */ +static inline struct libeth_rx_pt libie_rx_pt_parse(u32 pt) +{ + if (unlikely(pt >= LIBIE_RX_PT_NUM)) + pt = 0; + + return libie_rx_pt_lut[pt]; +} + +#endif /* __LIBIE_RX_H */ diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h new file mode 100644 index 000000000000..0807e19f44b3 --- /dev/null +++ b/include/net/libeth/rx.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef __LIBETH_RX_H +#define __LIBETH_RX_H + +#include + +/* Converting abstract packet type numbers into a software structure with + * the packet parameters to do O(1) lookup on Rx. + */ + +enum { + LIBETH_RX_PT_OUTER_L2 = 0U, + LIBETH_RX_PT_OUTER_IPV4, + LIBETH_RX_PT_OUTER_IPV6, +}; + +enum { + LIBETH_RX_PT_NOT_FRAG = 0U, + LIBETH_RX_PT_FRAG, +}; + +enum { + LIBETH_RX_PT_TUNNEL_IP_NONE = 0U, + LIBETH_RX_PT_TUNNEL_IP_IP, + LIBETH_RX_PT_TUNNEL_IP_GRENAT, + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC, + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC_VLAN, +}; + +enum { + LIBETH_RX_PT_TUNNEL_END_NONE = 0U, + LIBETH_RX_PT_TUNNEL_END_IPV4, + LIBETH_RX_PT_TUNNEL_END_IPV6, +}; + +enum { + LIBETH_RX_PT_INNER_NONE = 0U, + LIBETH_RX_PT_INNER_UDP, + LIBETH_RX_PT_INNER_TCP, + LIBETH_RX_PT_INNER_SCTP, + LIBETH_RX_PT_INNER_ICMP, + LIBETH_RX_PT_INNER_TIMESYNC, +}; + +#define LIBETH_RX_PT_PAYLOAD_NONE PKT_HASH_TYPE_NONE +#define LIBETH_RX_PT_PAYLOAD_L2 PKT_HASH_TYPE_L2 +#define LIBETH_RX_PT_PAYLOAD_L3 PKT_HASH_TYPE_L3 +#define LIBETH_RX_PT_PAYLOAD_L4 PKT_HASH_TYPE_L4 + +struct libeth_rx_pt { + u32 outer_ip:2; + u32 outer_frag:1; + u32 tunnel_type:3; + u32 tunnel_end_prot:2; + u32 tunnel_end_frag:1; + u32 inner_prot:3; + enum pkt_hash_types payload_layer:2; + + u32 pad:2; + enum xdp_rss_hash_type hash_type:16; +}; + +void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt); + +/** + * libeth_rx_pt_get_ip_ver - get IP version from a packet type structure + * @pt: packet type params + * + * Wrapper to compile out the IPv6 code from the drivers when not supported + * by the kernel. + * + * Return: @pt.outer_ip or stub for IPv6 when not compiled-in. + */ +static inline u32 libeth_rx_pt_get_ip_ver(struct libeth_rx_pt pt) +{ +#if !IS_ENABLED(CONFIG_IPV6) + switch (pt.outer_ip) { + case LIBETH_RX_PT_OUTER_IPV4: + return LIBETH_RX_PT_OUTER_IPV4; + default: + return LIBETH_RX_PT_OUTER_L2; + } +#else + return pt.outer_ip; +#endif +} + +/* libeth_has_*() can be used to quickly check whether the HW metadata is + * available to avoid further expensive processing such as descriptor reads. + * They already check for the corresponding netdev feature to be enabled, + * thus can be used as drop-in replacements. + */ + +static inline bool libeth_rx_pt_has_checksum(const struct net_device *dev, + struct libeth_rx_pt pt) +{ + /* Non-zero _INNER* is only possible when _OUTER_IPV* is set, + * it is enough to check only for the L4 type. + */ + return likely(pt.inner_prot > LIBETH_RX_PT_INNER_NONE && + (dev->features & NETIF_F_RXCSUM)); +} + +static inline bool libeth_rx_pt_has_hash(const struct net_device *dev, + struct libeth_rx_pt pt) +{ + return likely(pt.payload_layer > LIBETH_RX_PT_PAYLOAD_NONE && + (dev->features & NETIF_F_RXHASH)); +} + +/** + * libeth_rx_pt_set_hash - fill in skb hash value basing on the PT + * @skb: skb to fill the hash in + * @hash: 32-bit hash value from the descriptor + * @pt: packet type + */ +static inline void libeth_rx_pt_set_hash(struct sk_buff *skb, u32 hash, + struct libeth_rx_pt pt) +{ + skb_set_hash(skb, hash, pt.payload_layer); +} + +#endif /* __LIBETH_RX_H */ -- cgit From a1d6063d9f2f4f4f4ed1733ed3f3f63244c4afb5 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 18 Apr 2024 13:36:10 +0200 Subject: slab: introduce kvmalloc_array_node() and kvcalloc_node() Add NUMA-aware counterparts for kvmalloc_array() and kvcalloc() to be able to flexibly allocate arrays for a particular node. Rewrite kvmalloc_array() to kvmalloc_array_node(NUMA_NO_NODE) call. Acked-by: Vlastimil Babka Reviewed-by: Przemek Kitszel Signed-off-by: Alexander Lobakin Signed-off-by: Tony Nguyen --- include/linux/slab.h | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index e53cbfa18325..d1d1fa5e7983 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -774,14 +774,27 @@ static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) return kvmalloc(size, flags | __GFP_ZERO); } -static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void * +kvmalloc_array_node(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; - return kvmalloc(bytes, flags); + return kvmalloc_node(bytes, flags, node); +} + +static inline __alloc_size(1, 2) void * +kvmalloc_array(size_t n, size_t size, gfp_t flags) +{ + return kvmalloc_array_node(n, size, flags, NUMA_NO_NODE); +} + +static inline __alloc_size(1, 2) void * +kvcalloc_node(size_t n, size_t size, gfp_t flags, int node) +{ + return kvmalloc_array_node(n, size, flags | __GFP_ZERO, node); } static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) -- cgit From 5fa4caff59f251bf9f766fc48c9f0a774a9216a0 Mon Sep 17 00:00:00 2001 From: Alexander Lobakin Date: Thu, 18 Apr 2024 13:36:15 +0200 Subject: iavf: switch to Page Pool Now that the IAVF driver simply uses dev_alloc_page() + free_page() with no custom recycling logics, it can easily be switched to using Page Pool / libeth API instead. This allows to removing the whole dancing around headroom, HW buffer size, and page order. All DMA-for-device is now done in the PP core, for-CPU -- in the libeth helper. Use skb_mark_for_recycle() to bring back the recycling and restore the performance. Speaking of performance: on par with the baseline and faster with the PP optimization series applied. But the memory usage for 1500b MTU is now almost 2x lower (x86_64) thanks to allocating a page every second descriptor. Signed-off-by: Alexander Lobakin Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/iavf/iavf_main.c | 7 +- drivers/net/ethernet/intel/iavf/iavf_txrx.c | 257 +++++++----------------- drivers/net/ethernet/intel/iavf/iavf_txrx.h | 34 +--- drivers/net/ethernet/intel/iavf/iavf_virtchnl.c | 10 +- include/linux/net/intel/libie/rx.h | 17 ++ 5 files changed, 111 insertions(+), 214 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index ffb71a62b105..7e0de0a9b883 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include + #include "iavf.h" #include "iavf_prototype.h" /* All iavf tracepoints are defined by the include below, which must @@ -45,6 +47,7 @@ MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); +MODULE_IMPORT_NS(LIBETH); MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); @@ -1586,7 +1589,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter) rx_ring = &adapter->rx_rings[i]; rx_ring->queue_index = i; rx_ring->netdev = adapter->netdev; - rx_ring->dev = &adapter->pdev->dev; rx_ring->count = adapter->rx_desc_count; rx_ring->itr_setting = IAVF_ITR_RX_DEF; } @@ -2613,9 +2615,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter) iavf_set_ethtool_ops(netdev); netdev->watchdog_timeo = 5 * HZ; - /* MTU range: 68 - 9710 */ netdev->min_mtu = ETH_MIN_MTU; - netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD; + netdev->max_mtu = LIBIE_MAX_MTU; if (!is_valid_ether_addr(adapter->hw.mac.addr)) { dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n", diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 2ec68b51bebe..26b424fd6718 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -690,11 +690,8 @@ err: **/ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) { - unsigned long bi_size; - u16 i; - /* ring already cleared, nothing to do */ - if (!rx_ring->rx_bi) + if (!rx_ring->rx_fqes) return; if (rx_ring->skb) { @@ -702,40 +699,16 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) rx_ring->skb = NULL; } - /* Free all the Rx ring sk_buffs */ - for (i = 0; i < rx_ring->count; i++) { - struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; + /* Free all the Rx ring buffers */ + for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) { + const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i]; - if (!rx_bi->page) - continue; + page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false); - /* Invalidate cache lines that may have been written to by - * device so that we avoid corrupting memory. - */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_bi->dma, - rx_bi->page_offset, - IAVF_RXBUFFER_3072, - DMA_FROM_DEVICE); - - /* free resources associated with mapping */ - dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IAVF_RX_DMA_ATTR); - - __free_page(rx_bi->page); - - rx_bi->page = NULL; - rx_bi->page_offset = 0; + if (unlikely(++i == rx_ring->count)) + i = 0; } - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; - memset(rx_ring->rx_bi, 0, bi_size); - - /* Zero out the descriptor ring */ - memset(rx_ring->desc, 0, rx_ring->size); - rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; } @@ -748,15 +721,22 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring) **/ void iavf_free_rx_resources(struct iavf_ring *rx_ring) { + struct libeth_fq fq = { + .fqes = rx_ring->rx_fqes, + .pp = rx_ring->pp, + }; + iavf_clean_rx_ring(rx_ring); - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; if (rx_ring->desc) { - dma_free_coherent(rx_ring->dev, rx_ring->size, + dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size, rx_ring->desc, rx_ring->dma); rx_ring->desc = NULL; } + + libeth_rx_fq_destroy(&fq); + rx_ring->rx_fqes = NULL; + rx_ring->pp = NULL; } /** @@ -767,26 +747,32 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring) **/ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) { - struct device *dev = rx_ring->dev; - int bi_size; - - /* warn if we are about to overwrite the pointer */ - WARN_ON(rx_ring->rx_bi); - bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; - rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); - if (!rx_ring->rx_bi) - goto err; + struct libeth_fq fq = { + .count = rx_ring->count, + .buf_len = LIBIE_MAX_RX_BUF_LEN, + .nid = NUMA_NO_NODE, + }; + int ret; + + ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi); + if (ret) + return ret; + + rx_ring->pp = fq.pp; + rx_ring->rx_fqes = fq.fqes; + rx_ring->truesize = fq.truesize; + rx_ring->rx_buf_len = fq.buf_len; u64_stats_init(&rx_ring->syncp); /* Round up to nearest 4K */ rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); rx_ring->size = ALIGN(rx_ring->size, 4096); - rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, + rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size, &rx_ring->dma, GFP_KERNEL); if (!rx_ring->desc) { - dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", + dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n", rx_ring->size); goto err; } @@ -795,9 +781,12 @@ int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) rx_ring->next_to_use = 0; return 0; + err: - kfree(rx_ring->rx_bi); - rx_ring->rx_bi = NULL; + libeth_rx_fq_destroy(&fq); + rx_ring->rx_fqes = NULL; + rx_ring->pp = NULL; + return -ENOMEM; } @@ -819,49 +808,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) writel(val, rx_ring->tail); } -/** - * iavf_alloc_mapped_page - recycle or make a new page - * @rx_ring: ring to use - * @bi: rx_buffer struct to modify - * - * Returns true if the page was successfully allocated or - * reused. - **/ -static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *bi) -{ - struct page *page = bi->page; - dma_addr_t dma; - - /* alloc new page for storage */ - page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); - if (unlikely(!page)) { - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - /* map page for use */ - dma = dma_map_page_attrs(rx_ring->dev, page, 0, - iavf_rx_pg_size(rx_ring), - DMA_FROM_DEVICE, - IAVF_RX_DMA_ATTR); - - /* if mapping failed free memory back to system since - * there isn't much point in holding memory we can't use - */ - if (dma_mapping_error(rx_ring->dev, dma)) { - __free_pages(page, iavf_rx_pg_order(rx_ring)); - rx_ring->rx_stats.alloc_page_failed++; - return false; - } - - bi->dma = dma; - bi->page = page; - bi->page_offset = IAVF_SKB_PAD; - - return true; -} - /** * iavf_receive_skb - Send a completed packet up the stack * @rx_ring: rx ring in play @@ -892,38 +838,37 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring, **/ bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) { + const struct libeth_fq_fp fq = { + .pp = rx_ring->pp, + .fqes = rx_ring->rx_fqes, + .truesize = rx_ring->truesize, + .count = rx_ring->count, + }; u16 ntu = rx_ring->next_to_use; union iavf_rx_desc *rx_desc; - struct iavf_rx_buffer *bi; /* do nothing if no valid netdev defined */ if (!rx_ring->netdev || !cleaned_count) return false; rx_desc = IAVF_RX_DESC(rx_ring, ntu); - bi = &rx_ring->rx_bi[ntu]; do { - if (!iavf_alloc_mapped_page(rx_ring, bi)) - goto no_buffers; + dma_addr_t addr; - /* sync the buffer for use by the device */ - dma_sync_single_range_for_device(rx_ring->dev, bi->dma, - bi->page_offset, - IAVF_RXBUFFER_3072, - DMA_FROM_DEVICE); + addr = libeth_rx_alloc(&fq, ntu); + if (addr == DMA_MAPPING_ERROR) + goto no_buffers; /* Refresh the desc even if buffer_addrs didn't change * because each write-back erases this info. */ - rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); + rx_desc->read.pkt_addr = cpu_to_le64(addr); rx_desc++; - bi++; ntu++; if (unlikely(ntu == rx_ring->count)) { rx_desc = IAVF_RX_DESC(rx_ring, 0); - bi = rx_ring->rx_bi; ntu = 0; } @@ -942,6 +887,8 @@ no_buffers: if (rx_ring->next_to_use != ntu) iavf_release_rx_desc(rx_ring, ntu); + rx_ring->rx_stats.alloc_page_failed++; + /* make sure to come back via polling to try again after * allocation failure */ @@ -1090,9 +1037,8 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) /** * iavf_add_rx_frag - Add contents of Rx buffer to sk_buff - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: buffer containing page to add * @skb: sk_buff to place the data into + * @rx_buffer: buffer containing page to add * @size: packet length from rx_desc * * This function will add the data contained in rx_buffer->page to the skb. @@ -1100,105 +1046,49 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) * * The function will then update the page offset. **/ -static void iavf_add_rx_frag(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, - struct sk_buff *skb, +static void iavf_add_rx_frag(struct sk_buff *skb, + const struct libeth_fqe *rx_buffer, unsigned int size) { - unsigned int truesize = SKB_DATA_ALIGN(size + IAVF_SKB_PAD); - - if (!size) - return; + u32 hr = rx_buffer->page->pp->p.offset; skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, - rx_buffer->page_offset, size, truesize); -} - -/** - * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use - * @rx_ring: rx descriptor ring to transact packets on - * @size: size of buffer to add to skb - * - * This function will pull an Rx buffer from the ring and synchronize it - * for use by the CPU. - */ -static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, - const unsigned int size) -{ - struct iavf_rx_buffer *rx_buffer; - - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; - prefetchw(rx_buffer->page); - if (!size) - return rx_buffer; - - /* we are reusing so sync this buffer for CPU use */ - dma_sync_single_range_for_cpu(rx_ring->dev, - rx_buffer->dma, - rx_buffer->page_offset, - size, - DMA_FROM_DEVICE); - - return rx_buffer; + rx_buffer->offset + hr, size, rx_buffer->truesize); } /** * iavf_build_skb - Build skb around an existing buffer - * @rx_ring: Rx descriptor ring to transact packets on * @rx_buffer: Rx buffer to pull data from * @size: size of buffer to add to skb * * This function builds an skb around an existing Rx buffer, taking care * to set up the skb correctly and avoid any memcpy overhead. */ -static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer, +static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer, unsigned int size) { - void *va; - unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + - SKB_DATA_ALIGN(IAVF_SKB_PAD + size); + u32 hr = rx_buffer->page->pp->p.offset; struct sk_buff *skb; + void *va; - if (!rx_buffer || !size) - return NULL; /* prefetch first cache line of first page */ - va = page_address(rx_buffer->page) + rx_buffer->page_offset; - net_prefetch(va); + va = page_address(rx_buffer->page) + rx_buffer->offset; + net_prefetch(va + hr); /* build an skb around the page buffer */ - skb = napi_build_skb(va - IAVF_SKB_PAD, truesize); + skb = napi_build_skb(va, rx_buffer->truesize); if (unlikely(!skb)) return NULL; + skb_mark_for_recycle(skb); + /* update pointers within the skb to store the data */ - skb_reserve(skb, IAVF_SKB_PAD); + skb_reserve(skb, hr); __skb_put(skb, size); return skb; } -/** - * iavf_put_rx_buffer - Unmap used buffer - * @rx_ring: rx descriptor ring to transact packets on - * @rx_buffer: rx buffer to pull data from - * - * This function will unmap the buffer after it's written by HW. - */ -static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, - struct iavf_rx_buffer *rx_buffer) -{ - if (!rx_buffer) - return; - - /* we are not reusing the buffer so unmap it */ - dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, PAGE_SIZE, - DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR); - - /* clear contents of buffer_info */ - rx_buffer->page = NULL; -} - /** * iavf_is_non_eop - process handling of non-EOP buffers * @rx_ring: Rx ring being processed @@ -1252,7 +1142,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) bool failure = false; while (likely(total_rx_packets < (unsigned int)budget)) { - struct iavf_rx_buffer *rx_buffer; + struct libeth_fqe *rx_buffer; union iavf_rx_desc *rx_desc; unsigned int size; u16 vlan_tag = 0; @@ -1287,13 +1177,16 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword); iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); - rx_buffer = iavf_get_rx_buffer(rx_ring, size); + + rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean]; + if (!libeth_rx_sync_for_cpu(rx_buffer, size)) + goto skip_data; /* retrieve a buffer from the ring */ if (skb) - iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); + iavf_add_rx_frag(skb, rx_buffer, size); else - skb = iavf_build_skb(rx_ring, rx_buffer, size); + skb = iavf_build_skb(rx_buffer, size); /* exit if we failed to retrieve a buffer */ if (!skb) { @@ -1301,10 +1194,10 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) break; } - iavf_put_rx_buffer(rx_ring, rx_buffer); +skip_data: cleaned_count++; - if (iavf_is_non_eop(rx_ring, rx_desc, skb)) + if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb)) continue; /* ERR_MASK will only have valid bits if EOP set, and diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h index ed559fa6f214..d7b5587aeb8e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h @@ -80,18 +80,8 @@ enum iavf_dyn_idx_t { BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) -/* Supported Rx Buffer Sizes (a multiple of 128) */ -#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ -#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */ - -#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) #define iavf_rx_desc iavf_32byte_rx_desc -#define IAVF_RX_DMA_ATTR \ - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) - -#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) - /** * iavf_test_staterr - tests bits in Rx descriptor status and error fields * @rx_desc: pointer to receive descriptor (in le64 format) @@ -210,12 +200,6 @@ struct iavf_tx_buffer { u32 tx_flags; }; -struct iavf_rx_buffer { - dma_addr_t dma; - struct page *page; - __u32 page_offset; -}; - struct iavf_queue_stats { u64 packets; u64 bytes; @@ -251,13 +235,18 @@ struct iavf_rx_queue_stats { struct iavf_ring { struct iavf_ring *next; /* pointer to next ring in q_vector */ void *desc; /* Descriptor ring memory */ - struct device *dev; /* Used for DMA mapping */ + union { + struct page_pool *pp; /* Used on Rx for buffer management */ + struct device *dev; /* Used on Tx for DMA mapping */ + }; struct net_device *netdev; /* netdev ring maps to */ union { + struct libeth_fqe *rx_fqes; struct iavf_tx_buffer *tx_bi; - struct iavf_rx_buffer *rx_bi; }; u8 __iomem *tail; + u32 truesize; + u16 queue_index; /* Queue number of ring */ /* high bit set means dynamic, use accessors routines to read/write. @@ -305,6 +294,8 @@ struct iavf_ring { * iavf_clean_rx_ring_irq() is called * for this ring. */ + + u32 rx_buf_len; } ____cacheline_internodealigned_in_smp; #define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002 @@ -328,13 +319,6 @@ struct iavf_ring_container { #define iavf_for_each_ring(pos, head) \ for (pos = (head).ring; pos != NULL; pos = pos->next) -static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring) -{ - return 0; -} - -#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring)) - bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count); netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring); diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index f8e9f859a4f1..1e543f6a7c30 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2013 - 2018 Intel Corporation. */ +#include + #include "iavf.h" #include "iavf_prototype.h" @@ -268,13 +270,13 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter) void iavf_configure_queues(struct iavf_adapter *adapter) { struct virtchnl_vsi_queue_config_info *vqci; - int i, max_frame = adapter->vf_res->max_mtu; int pairs = adapter->num_active_queues; struct virtchnl_queue_pair_info *vqpi; + u32 i, max_frame; size_t len; - if (max_frame > IAVF_MAX_RXBUFFER || !max_frame) - max_frame = IAVF_MAX_RXBUFFER; + max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset); + max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame); if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) { /* bail because we already have a command pending */ @@ -304,7 +306,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter) vqpi->rxq.ring_len = adapter->rx_rings[i].count; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma; vqpi->rxq.max_pkt_size = max_frame; - vqpi->rxq.databuffer_size = IAVF_RXBUFFER_3072; + vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len; if (CRC_OFFLOAD_ALLOWED(adapter)) vqpi->rxq.crc_disable = !!(adapter->netdev->features & NETIF_F_RXFCS); diff --git a/include/linux/net/intel/libie/rx.h b/include/linux/net/intel/libie/rx.h index 37c5c8f26fb9..8e97775f1d66 100644 --- a/include/linux/net/intel/libie/rx.h +++ b/include/linux/net/intel/libie/rx.h @@ -6,6 +6,23 @@ #include +/* Rx buffer management */ + +/* The largest size for a single descriptor as per HW */ +#define LIBIE_MAX_RX_BUF_LEN 9728U +/* "True" HW-writeable space: minimum from SW and HW values */ +#define LIBIE_RX_BUF_LEN(hr) min_t(u32, LIBETH_RX_PAGE_LEN(hr), \ + LIBIE_MAX_RX_BUF_LEN) + +/* The maximum frame size as per HW (S/G) */ +#define __LIBIE_MAX_RX_FRM_LEN 16382U +/* ATST, HW can chain up to 5 Rx descriptors */ +#define LIBIE_MAX_RX_FRM_LEN(hr) \ + min_t(u32, __LIBIE_MAX_RX_FRM_LEN, LIBIE_RX_BUF_LEN(hr) * 5) +/* Maximum frame size minus LL overhead */ +#define LIBIE_MAX_MTU \ + (LIBIE_MAX_RX_FRM_LEN(LIBETH_MAX_HEADROOM) - LIBETH_RX_LL_LEN) + /* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed * bitfield struct. */ -- cgit From cdc66553c4130735f0a2db943a5259e54ff1597a Mon Sep 17 00:00:00 2001 From: Dawei Li Date: Tue, 16 Apr 2024 16:54:48 +0800 Subject: cpumask: Introduce cpumask_first_and_and() Introduce cpumask_first_and_and() to get intersection between 3 cpumasks, free of any intermediate cpumask variable. Instead, cpumask_first_and_and() works in-place with all inputs and produces desired output directly. Signed-off-by: Dawei Li Signed-off-by: Thomas Gleixner Acked-by: Yury Norov Link: https://lore.kernel.org/r/20240416085454.3547175-2-dawei.li@shingroup.cn --- include/linux/cpumask.h | 17 +++++++++++++++++ include/linux/find.h | 27 +++++++++++++++++++++++++++ lib/find_bit.c | 12 ++++++++++++ 3 files changed, 56 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 1c29947db848..c46f9e9e1d66 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -187,6 +187,23 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), small_cpumask_bits); } +/** + * cpumask_first_and_and - return the first cpu from *srcp1 & *srcp2 & *srcp3 + * @srcp1: the first input + * @srcp2: the second input + * @srcp3: the third input + * + * Return: >= nr_cpu_ids if no cpus set in all. + */ +static inline +unsigned int cpumask_first_and_and(const struct cpumask *srcp1, + const struct cpumask *srcp2, + const struct cpumask *srcp3) +{ + return find_first_and_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), + cpumask_bits(srcp3), small_cpumask_bits); +} + /** * cpumask_last - get the last CPU in a cpumask * @srcp: - the cpumask pointer diff --git a/include/linux/find.h b/include/linux/find.h index c69598e383c1..28ec5a03393a 100644 --- a/include/linux/find.h +++ b/include/linux/find.h @@ -29,6 +29,8 @@ unsigned long __find_nth_and_andnot_bit(const unsigned long *addr1, const unsign unsigned long n); extern unsigned long _find_first_and_bit(const unsigned long *addr1, const unsigned long *addr2, unsigned long size); +unsigned long _find_first_and_and_bit(const unsigned long *addr1, const unsigned long *addr2, + const unsigned long *addr3, unsigned long size); extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size); extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size); @@ -345,6 +347,31 @@ unsigned long find_first_and_bit(const unsigned long *addr1, } #endif +/** + * find_first_and_and_bit - find the first set bit in 3 memory regions + * @addr1: The first address to base the search on + * @addr2: The second address to base the search on + * @addr3: The third address to base the search on + * @size: The bitmap size in bits + * + * Returns the bit number for the first set bit + * If no bits are set, returns @size. + */ +static inline +unsigned long find_first_and_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + const unsigned long *addr3, + unsigned long size) +{ + if (small_const_nbits(size)) { + unsigned long val = *addr1 & *addr2 & *addr3 & GENMASK(size - 1, 0); + + return val ? __ffs(val) : size; + } + + return _find_first_and_and_bit(addr1, addr2, addr3, size); +} + #ifndef find_first_zero_bit /** * find_first_zero_bit - find the first cleared bit in a memory region diff --git a/lib/find_bit.c b/lib/find_bit.c index 32f99e9a670e..dacadd904250 100644 --- a/lib/find_bit.c +++ b/lib/find_bit.c @@ -116,6 +116,18 @@ unsigned long _find_first_and_bit(const unsigned long *addr1, EXPORT_SYMBOL(_find_first_and_bit); #endif +/* + * Find the first set bit in three memory regions. + */ +unsigned long _find_first_and_and_bit(const unsigned long *addr1, + const unsigned long *addr2, + const unsigned long *addr3, + unsigned long size) +{ + return FIND_FIRST_BIT(addr1[idx] & addr2[idx] & addr3[idx], /* nop */, size); +} +EXPORT_SYMBOL(_find_first_and_and_bit); + #ifndef find_first_zero_bit /* * Find the first cleared bit in a memory region. -- cgit From 95c07d58250ca3ed01855c20be568cf04e15382f Mon Sep 17 00:00:00 2001 From: Haiyue Wang Date: Wed, 24 Apr 2024 13:45:07 +0800 Subject: bpf: update the comment for BTF_FIELDS_MAX The commit d56b63cf0c0f ("bpf: add support for bpf_wq user type") changes the fields support number to 11, just sync the comment. Signed-off-by: Haiyue Wang Acked-by: Yonghong Song Link: https://lore.kernel.org/r/20240424054526.8031-1-haiyue.wang@intel.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 64bf0cf3ee95..978200f6d925 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -184,7 +184,7 @@ struct bpf_map_ops { }; enum { - /* Support at most 10 fields in a BTF type */ + /* Support at most 11 fields in a BTF type */ BTF_FIELDS_MAX = 11, }; -- cgit From 3e1c6f35409f9e447bf37f64840f5b65576bfb78 Mon Sep 17 00:00:00 2001 From: Vadim Fedorenko Date: Mon, 22 Apr 2024 15:50:21 -0700 Subject: bpf: make common crypto API for TC/XDP programs Add crypto API support to BPF to be able to decrypt or encrypt packets in TC/XDP BPF programs. Special care should be taken for initialization part of crypto algo because crypto alloc) doesn't work with preemtion disabled, it can be run only in sleepable BPF program. Also async crypto is not supported because of the very same issue - TC/XDP BPF programs are not sleepable. Signed-off-by: Vadim Fedorenko Link: https://lore.kernel.org/r/20240422225024.2847039-2-vadfed@meta.com Signed-off-by: Martin KaFai Lau --- include/linux/bpf.h | 1 + include/linux/bpf_crypto.h | 24 +++ kernel/bpf/Makefile | 3 + kernel/bpf/crypto.c | 385 +++++++++++++++++++++++++++++++++++++++++++++ kernel/bpf/helpers.c | 2 +- kernel/bpf/verifier.c | 1 + 6 files changed, 415 insertions(+), 1 deletion(-) create mode 100644 include/linux/bpf_crypto.h create mode 100644 kernel/bpf/crypto.c (limited to 'include/linux') diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 978200f6d925..364563b74db6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1275,6 +1275,7 @@ int bpf_dynptr_check_size(u32 size); u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); #ifdef CONFIG_BPF_JIT int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); diff --git a/include/linux/bpf_crypto.h b/include/linux/bpf_crypto.h new file mode 100644 index 000000000000..a41e71d4e2d9 --- /dev/null +++ b/include/linux/bpf_crypto.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#ifndef _BPF_CRYPTO_H +#define _BPF_CRYPTO_H + +struct bpf_crypto_type { + void *(*alloc_tfm)(const char *algo); + void (*free_tfm)(void *tfm); + int (*has_algo)(const char *algo); + int (*setkey)(void *tfm, const u8 *key, unsigned int keylen); + int (*setauthsize)(void *tfm, unsigned int authsize); + int (*encrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv); + int (*decrypt)(void *tfm, const u8 *src, u8 *dst, unsigned int len, u8 *iv); + unsigned int (*ivsize)(void *tfm); + unsigned int (*statesize)(void *tfm); + u32 (*get_flags)(void *tfm); + struct module *owner; + char name[14]; +}; + +int bpf_crypto_register_type(const struct bpf_crypto_type *type); +int bpf_crypto_unregister_type(const struct bpf_crypto_type *type); + +#endif /* _BPF_CRYPTO_H */ diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 368c5d86b5b7..736bd22e5ce0 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -44,6 +44,9 @@ obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o obj-$(CONFIG_BPF_SYSCALL) += cpumask.o obj-${CONFIG_BPF_LSM} += bpf_lsm.o endif +ifeq ($(CONFIG_CRYPTO),y) +obj-$(CONFIG_BPF_SYSCALL) += crypto.o +endif obj-$(CONFIG_BPF_PRELOAD) += preload/ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c new file mode 100644 index 000000000000..2bee4af91e38 --- /dev/null +++ b/kernel/bpf/crypto.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Meta, Inc */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct bpf_crypto_type_list { + const struct bpf_crypto_type *type; + struct list_head list; +}; + +/* BPF crypto initialization parameters struct */ +/** + * struct bpf_crypto_params - BPF crypto initialization parameters structure + * @type: The string of crypto operation type. + * @reserved: Reserved member, will be reused for more options in future + * Values: + * 0 + * @algo: The string of algorithm to initialize. + * @key: The cipher key used to init crypto algorithm. + * @key_len: The length of cipher key. + * @authsize: The length of authentication tag used by algorithm. + */ +struct bpf_crypto_params { + char type[14]; + u8 reserved[2]; + char algo[128]; + u8 key[256]; + u32 key_len; + u32 authsize; +}; + +static LIST_HEAD(bpf_crypto_types); +static DECLARE_RWSEM(bpf_crypto_types_sem); + +/** + * struct bpf_crypto_ctx - refcounted BPF crypto context structure + * @type: The pointer to bpf crypto type + * @tfm: The pointer to instance of crypto API struct. + * @siv_len: Size of IV and state storage for cipher + * @rcu: The RCU head used to free the crypto context with RCU safety. + * @usage: Object reference counter. When the refcount goes to 0, the + * memory is released back to the BPF allocator, which provides + * RCU safety. + */ +struct bpf_crypto_ctx { + const struct bpf_crypto_type *type; + void *tfm; + u32 siv_len; + struct rcu_head rcu; + refcount_t usage; +}; + +int bpf_crypto_register_type(const struct bpf_crypto_type *type) +{ + struct bpf_crypto_type_list *node; + int err = -EEXIST; + + down_write(&bpf_crypto_types_sem); + list_for_each_entry(node, &bpf_crypto_types, list) { + if (!strcmp(node->type->name, type->name)) + goto unlock; + } + + node = kmalloc(sizeof(*node), GFP_KERNEL); + err = -ENOMEM; + if (!node) + goto unlock; + + node->type = type; + list_add(&node->list, &bpf_crypto_types); + err = 0; + +unlock: + up_write(&bpf_crypto_types_sem); + + return err; +} +EXPORT_SYMBOL_GPL(bpf_crypto_register_type); + +int bpf_crypto_unregister_type(const struct bpf_crypto_type *type) +{ + struct bpf_crypto_type_list *node; + int err = -ENOENT; + + down_write(&bpf_crypto_types_sem); + list_for_each_entry(node, &bpf_crypto_types, list) { + if (strcmp(node->type->name, type->name)) + continue; + + list_del(&node->list); + kfree(node); + err = 0; + break; + } + up_write(&bpf_crypto_types_sem); + + return err; +} +EXPORT_SYMBOL_GPL(bpf_crypto_unregister_type); + +static const struct bpf_crypto_type *bpf_crypto_get_type(const char *name) +{ + const struct bpf_crypto_type *type = ERR_PTR(-ENOENT); + struct bpf_crypto_type_list *node; + + down_read(&bpf_crypto_types_sem); + list_for_each_entry(node, &bpf_crypto_types, list) { + if (strcmp(node->type->name, name)) + continue; + + if (try_module_get(node->type->owner)) + type = node->type; + break; + } + up_read(&bpf_crypto_types_sem); + + return type; +} + +__bpf_kfunc_start_defs(); + +/** + * bpf_crypto_ctx_create() - Create a mutable BPF crypto context. + * + * Allocates a crypto context that can be used, acquired, and released by + * a BPF program. The crypto context returned by this function must either + * be embedded in a map as a kptr, or freed with bpf_crypto_ctx_release(). + * As crypto API functions use GFP_KERNEL allocations, this function can + * only be used in sleepable BPF programs. + * + * bpf_crypto_ctx_create() allocates memory for crypto context. + * It may return NULL if no memory is available. + * @params: pointer to struct bpf_crypto_params which contains all the + * details needed to initialise crypto context. + * @params__sz: size of steuct bpf_crypto_params usef by bpf program + * @err: integer to store error code when NULL is returned. + */ +__bpf_kfunc struct bpf_crypto_ctx * +bpf_crypto_ctx_create(const struct bpf_crypto_params *params, u32 params__sz, + int *err) +{ + const struct bpf_crypto_type *type; + struct bpf_crypto_ctx *ctx; + + if (!params || params->reserved[0] || params->reserved[1] || + params__sz != sizeof(struct bpf_crypto_params)) { + *err = -EINVAL; + return NULL; + } + + type = bpf_crypto_get_type(params->type); + if (IS_ERR(type)) { + *err = PTR_ERR(type); + return NULL; + } + + if (!type->has_algo(params->algo)) { + *err = -EOPNOTSUPP; + goto err_module_put; + } + + if (!!params->authsize ^ !!type->setauthsize) { + *err = -EOPNOTSUPP; + goto err_module_put; + } + + if (!params->key_len || params->key_len > sizeof(params->key)) { + *err = -EINVAL; + goto err_module_put; + } + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) { + *err = -ENOMEM; + goto err_module_put; + } + + ctx->type = type; + ctx->tfm = type->alloc_tfm(params->algo); + if (IS_ERR(ctx->tfm)) { + *err = PTR_ERR(ctx->tfm); + goto err_free_ctx; + } + + if (params->authsize) { + *err = type->setauthsize(ctx->tfm, params->authsize); + if (*err) + goto err_free_tfm; + } + + *err = type->setkey(ctx->tfm, params->key, params->key_len); + if (*err) + goto err_free_tfm; + + if (type->get_flags(ctx->tfm) & CRYPTO_TFM_NEED_KEY) { + *err = -EINVAL; + goto err_free_tfm; + } + + ctx->siv_len = type->ivsize(ctx->tfm) + type->statesize(ctx->tfm); + + refcount_set(&ctx->usage, 1); + + return ctx; + +err_free_tfm: + type->free_tfm(ctx->tfm); +err_free_ctx: + kfree(ctx); +err_module_put: + module_put(type->owner); + + return NULL; +} + +static void crypto_free_cb(struct rcu_head *head) +{ + struct bpf_crypto_ctx *ctx; + + ctx = container_of(head, struct bpf_crypto_ctx, rcu); + ctx->type->free_tfm(ctx->tfm); + module_put(ctx->type->owner); + kfree(ctx); +} + +/** + * bpf_crypto_ctx_acquire() - Acquire a reference to a BPF crypto context. + * @ctx: The BPF crypto context being acquired. The ctx must be a trusted + * pointer. + * + * Acquires a reference to a BPF crypto context. The context returned by this function + * must either be embedded in a map as a kptr, or freed with + * bpf_crypto_ctx_release(). + */ +__bpf_kfunc struct bpf_crypto_ctx * +bpf_crypto_ctx_acquire(struct bpf_crypto_ctx *ctx) +{ + if (!refcount_inc_not_zero(&ctx->usage)) + return NULL; + return ctx; +} + +/** + * bpf_crypto_ctx_release() - Release a previously acquired BPF crypto context. + * @ctx: The crypto context being released. + * + * Releases a previously acquired reference to a BPF crypto context. When the final + * reference of the BPF crypto context has been released, its memory + * will be released. + */ +__bpf_kfunc void bpf_crypto_ctx_release(struct bpf_crypto_ctx *ctx) +{ + if (refcount_dec_and_test(&ctx->usage)) + call_rcu(&ctx->rcu, crypto_free_cb); +} + +static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx, + const struct bpf_dynptr_kern *src, + const struct bpf_dynptr_kern *dst, + const struct bpf_dynptr_kern *siv, + bool decrypt) +{ + u32 src_len, dst_len, siv_len; + const u8 *psrc; + u8 *pdst, *piv; + int err; + + if (__bpf_dynptr_is_rdonly(dst)) + return -EINVAL; + + siv_len = __bpf_dynptr_size(siv); + src_len = __bpf_dynptr_size(src); + dst_len = __bpf_dynptr_size(dst); + if (!src_len || !dst_len) + return -EINVAL; + + if (siv_len != ctx->siv_len) + return -EINVAL; + + psrc = __bpf_dynptr_data(src, src_len); + if (!psrc) + return -EINVAL; + pdst = __bpf_dynptr_data_rw(dst, dst_len); + if (!pdst) + return -EINVAL; + + piv = siv_len ? __bpf_dynptr_data_rw(siv, siv_len) : NULL; + if (siv_len && !piv) + return -EINVAL; + + err = decrypt ? ctx->type->decrypt(ctx->tfm, psrc, pdst, src_len, piv) + : ctx->type->encrypt(ctx->tfm, psrc, pdst, src_len, piv); + + return err; +} + +/** + * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided. + * @ctx: The crypto context being used. The ctx must be a trusted pointer. + * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer. + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer. + * @siv: bpf_dynptr to IV data and state data to be used by decryptor. + * + * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured. + */ +__bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, + const struct bpf_dynptr_kern *src, + const struct bpf_dynptr_kern *dst, + const struct bpf_dynptr_kern *siv) +{ + return bpf_crypto_crypt(ctx, src, dst, siv, true); +} + +/** + * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided. + * @ctx: The crypto context being used. The ctx must be a trusted pointer. + * @src: bpf_dynptr to the plain data. Must be a trusted pointer. + * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer. + * @siv: bpf_dynptr to IV data and state data to be used by decryptor. + * + * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured. + */ +__bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, + const struct bpf_dynptr_kern *src, + const struct bpf_dynptr_kern *dst, + const struct bpf_dynptr_kern *siv) +{ + return bpf_crypto_crypt(ctx, src, dst, siv, false); +} + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(crypt_init_kfunc_btf_ids) +BTF_ID_FLAGS(func, bpf_crypto_ctx_create, KF_ACQUIRE | KF_RET_NULL | KF_SLEEPABLE) +BTF_ID_FLAGS(func, bpf_crypto_ctx_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_crypto_ctx_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL) +BTF_KFUNCS_END(crypt_init_kfunc_btf_ids) + +static const struct btf_kfunc_id_set crypt_init_kfunc_set = { + .owner = THIS_MODULE, + .set = &crypt_init_kfunc_btf_ids, +}; + +BTF_KFUNCS_START(crypt_kfunc_btf_ids) +BTF_ID_FLAGS(func, bpf_crypto_decrypt, KF_RCU) +BTF_ID_FLAGS(func, bpf_crypto_encrypt, KF_RCU) +BTF_KFUNCS_END(crypt_kfunc_btf_ids) + +static const struct btf_kfunc_id_set crypt_kfunc_set = { + .owner = THIS_MODULE, + .set = &crypt_kfunc_btf_ids, +}; + +BTF_ID_LIST(bpf_crypto_dtor_ids) +BTF_ID(struct, bpf_crypto_ctx) +BTF_ID(func, bpf_crypto_ctx_release) + +static int __init crypto_kfunc_init(void) +{ + int ret; + const struct btf_id_dtor_kfunc bpf_crypto_dtors[] = { + { + .btf_id = bpf_crypto_dtor_ids[0], + .kfunc_btf_id = bpf_crypto_dtor_ids[1] + }, + }; + + ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &crypt_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_ACT, &crypt_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, &crypt_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, + &crypt_init_kfunc_set); + return ret ?: register_btf_id_dtor_kfuncs(bpf_crypto_dtors, + ARRAY_SIZE(bpf_crypto_dtors), + THIS_MODULE); +} + +late_initcall(crypto_kfunc_init); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 14c401b78e12..2a69a9a36c0f 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1583,7 +1583,7 @@ static const struct bpf_func_proto bpf_kptr_xchg_proto = { #define DYNPTR_SIZE_MASK 0xFFFFFF #define DYNPTR_RDONLY_BIT BIT(31) -static bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) +bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr) { return ptr->size & DYNPTR_RDONLY_BIT; } diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8312c7a0ee19..4e474ef44e9c 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -5310,6 +5310,7 @@ BTF_ID(struct, cgroup) BTF_ID(struct, bpf_cpumask) #endif BTF_ID(struct, task_struct) +BTF_ID(struct, bpf_crypto_ctx) BTF_SET_END(rcu_protected_types) static bool rcu_protected_object(const struct btf *btf, u32 btf_id) -- cgit From 4bcc2e91b9e77644ee04b290bfd2c1410d7c402e Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 11 Apr 2024 07:44:30 +0300 Subject: bus: ti-sysc: Drop legacy idle quirk handling There are no more users that need the legacy idle quirk so let's drop the legacy idle quirk handling. This simplifies the PM code to just sysc_pm_ops with unified handling for all the interconnect targets. Reviewed-by: Dhruva Gole Signed-off-by: Tony Lindgren --- drivers/bus/ti-sysc.c | 109 +--------------------------------- include/linux/platform_data/ti-sysc.h | 1 - 2 files changed, 2 insertions(+), 108 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 467702da0e82..8767e04d6c89 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -1469,8 +1469,7 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & - (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) return 0; if (!ddata->enabled) @@ -1488,8 +1487,7 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) ddata = dev_get_drvdata(dev); - if (ddata->cfg.quirks & - (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE) return 0; if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) { @@ -2457,89 +2455,6 @@ static int __maybe_unused sysc_child_runtime_resume(struct device *dev) return pm_generic_runtime_resume(dev); } -#ifdef CONFIG_PM_SLEEP -static int sysc_child_suspend_noirq(struct device *dev) -{ - struct sysc *ddata; - int error; - - ddata = sysc_child_to_parent(dev); - - dev_dbg(ddata->dev, "%s %s\n", __func__, - ddata->name ? ddata->name : ""); - - error = pm_generic_suspend_noirq(dev); - if (error) { - dev_err(dev, "%s error at %i: %i\n", - __func__, __LINE__, error); - - return error; - } - - if (!pm_runtime_status_suspended(dev)) { - error = pm_generic_runtime_suspend(dev); - if (error) { - dev_dbg(dev, "%s busy at %i: %i\n", - __func__, __LINE__, error); - - return 0; - } - - error = sysc_runtime_suspend(ddata->dev); - if (error) { - dev_err(dev, "%s error at %i: %i\n", - __func__, __LINE__, error); - - return error; - } - - ddata->child_needs_resume = true; - } - - return 0; -} - -static int sysc_child_resume_noirq(struct device *dev) -{ - struct sysc *ddata; - int error; - - ddata = sysc_child_to_parent(dev); - - dev_dbg(ddata->dev, "%s %s\n", __func__, - ddata->name ? ddata->name : ""); - - if (ddata->child_needs_resume) { - ddata->child_needs_resume = false; - - error = sysc_runtime_resume(ddata->dev); - if (error) - dev_err(ddata->dev, - "%s runtime resume error: %i\n", - __func__, error); - - error = pm_generic_runtime_resume(dev); - if (error) - dev_err(ddata->dev, - "%s generic runtime resume: %i\n", - __func__, error); - } - - return pm_generic_resume_noirq(dev); -} -#endif - -static struct dev_pm_domain sysc_child_pm_domain = { - .ops = { - SET_RUNTIME_PM_OPS(sysc_child_runtime_suspend, - sysc_child_runtime_resume, - NULL) - USE_PLATFORM_PM_SLEEP_OPS - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(sysc_child_suspend_noirq, - sysc_child_resume_noirq) - } -}; - /* Caller needs to take list_lock if ever used outside of cpu_pm */ static void sysc_reinit_modules(struct sysc_soc_info *soc) { @@ -2610,25 +2525,6 @@ out_unlock: mutex_unlock(&sysc_soc->list_lock); } -/** - * sysc_legacy_idle_quirk - handle children in omap_device compatible way - * @ddata: device driver data - * @child: child device driver - * - * Allow idle for child devices as done with _od_runtime_suspend(). - * Otherwise many child devices will not idle because of the permanent - * parent usecount set in pm_runtime_irq_safe(). - * - * Note that the long term solution is to just modify the child device - * drivers to not set pm_runtime_irq_safe() and then this can be just - * dropped. - */ -static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child) -{ - if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) - dev_pm_domain_set(child, &sysc_child_pm_domain); -} - static int sysc_notifier_call(struct notifier_block *nb, unsigned long event, void *device) { @@ -2645,7 +2541,6 @@ static int sysc_notifier_call(struct notifier_block *nb, error = sysc_child_add_clocks(ddata, dev); if (error) return error; - sysc_legacy_idle_quirk(ddata, dev); break; default: break; diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h index eb556f988d57..d8f15770a522 100644 --- a/include/linux/platform_data/ti-sysc.h +++ b/include/linux/platform_data/ti-sysc.h @@ -71,7 +71,6 @@ struct sysc_regbits { #define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12) #define SYSC_QUIRK_SWSUP_SIDLE BIT(11) #define SYSC_QUIRK_EXT_OPT_CLOCK BIT(10) -#define SYSC_QUIRK_LEGACY_IDLE BIT(9) #define SYSC_QUIRK_RESET_STATUS BIT(8) #define SYSC_QUIRK_NO_IDLE BIT(7) #define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) -- cgit From d850b5bae0f5e435360edf0474ff446622f0d899 Mon Sep 17 00:00:00 2001 From: Robin Gong Date: Fri, 19 Apr 2024 11:07:29 -0400 Subject: dmaengine: imx-sdma: Add i2c dma support New sdma script (sdma-6q: v3.6, sdma-7d: v4.6) support i2c at imx8mp and imx6ull. So add I2C dma support. Signed-off-by: Robin Gong Acked-by: Clark Wang Reviewed-by: Joy Zou Reviewed-by: Daniel Baluta Signed-off-by: Frank Li Link: https://lore.kernel.org/r/20240419150729.1071904-3-Frank.Li@nxp.com Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 7 +++++++ include/linux/dma/imx-dma.h | 1 + 2 files changed, 8 insertions(+) (limited to 'include/linux') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 4a4d44ed03c8..003e1580b902 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -251,6 +251,8 @@ struct sdma_script_start_addrs { s32 sai_2_mcu_addr; s32 uart_2_mcu_rom_addr; s32 uartsh_2_mcu_rom_addr; + s32 i2c_2_mcu_addr; + s32 mcu_2_i2c_addr; /* End of v3 array */ union { s32 v3_end; s32 mcu_2_zqspi_addr; }; /* End of v4 array */ @@ -1082,6 +1084,11 @@ static int sdma_get_pc(struct sdma_channel *sdmac, per_2_emi = sdma->script_addrs->sai_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_sai_addr; break; + case IMX_DMATYPE_I2C: + per_2_emi = sdma->script_addrs->i2c_2_mcu_addr; + emi_2_per = sdma->script_addrs->mcu_2_i2c_addr; + sdmac->is_ram_script = true; + break; case IMX_DMATYPE_HDMI: emi_2_per = sdma->script_addrs->hdmi_dma_addr; sdmac->is_ram_script = true; diff --git a/include/linux/dma/imx-dma.h b/include/linux/dma/imx-dma.h index cfec5f946e23..76a8de9ae151 100644 --- a/include/linux/dma/imx-dma.h +++ b/include/linux/dma/imx-dma.h @@ -41,6 +41,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_SAI, /* SAI */ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */ IMX_DMATYPE_HDMI, /* HDMI Audio */ + IMX_DMATYPE_I2C, /* I2C */ }; enum imx_dma_prio { -- cgit From 17553ba8e19dee8770b3dcc597d49dcc3418f3b0 Mon Sep 17 00:00:00 2001 From: Qiang Yu Date: Wed, 24 Apr 2024 11:21:53 +0800 Subject: bus: mhi: host: Add sysfs entry to force device to enter EDL Add sysfs entry to allow users of MHI bus to force device to enter EDL (Emergency Download) mode to download the device firmware. Since there is no guarantee that all the devices will support EDL mode, the sysfs entry is kept as an optional one and will appear only for the supported devices. Controllers supporting the EDL mode are expected to provide edl_trigger() callback that puts the device into EDL mode. Signed-off-by: Qiang Yu Reviewed-by: Jeffrey Hugo Reviewed-by: Manivannan Sadhasivam Link: https://lore.kernel.org/r/1713928915-18229-2-git-send-email-quic_qianyu@quicinc.com [mani: fixed the kernel version and reworded the commit message] Signed-off-by: Manivannan Sadhasivam --- Documentation/ABI/stable/sysfs-bus-mhi | 13 +++++++++++++ drivers/bus/mhi/host/init.c | 33 +++++++++++++++++++++++++++++++++ include/linux/mhi.h | 2 ++ 3 files changed, 48 insertions(+) (limited to 'include/linux') diff --git a/Documentation/ABI/stable/sysfs-bus-mhi b/Documentation/ABI/stable/sysfs-bus-mhi index 1a47f9e0cc84..8b9698fa0beb 100644 --- a/Documentation/ABI/stable/sysfs-bus-mhi +++ b/Documentation/ABI/stable/sysfs-bus-mhi @@ -29,3 +29,16 @@ Description: Initiates a SoC reset on the MHI controller. A SoC reset is This can be useful as a method of recovery if the device is non-responsive, or as a means of loading new firmware as a system administration task. + +What: /sys/bus/mhi/devices/.../trigger_edl +Date: April 2024 +KernelVersion: 6.10 +Contact: mhi@lists.linux.dev +Description: Writing a non-zero value to this file will force devices to + enter EDL (Emergency Download) mode. This entry only exists for + devices capable of entering the EDL mode using the standard EDL + triggering mechanism defined in the MHI spec v1.2. Once in EDL + mode, the flash programmer image can be downloaded to the + device to enter the flash programmer execution environment. + This can be useful if user wants to use QDL (Qualcomm Download, + which is used to download firmware over EDL) to update firmware. diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index 44f934981de8..7104c1856987 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -127,6 +127,30 @@ static ssize_t soc_reset_store(struct device *dev, } static DEVICE_ATTR_WO(soc_reset); +static ssize_t trigger_edl_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mhi_device *mhi_dev = to_mhi_device(dev); + struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; + unsigned long val; + int ret; + + ret = kstrtoul(buf, 10, &val); + if (ret < 0) + return ret; + + if (!val) + return -EINVAL; + + ret = mhi_cntrl->edl_trigger(mhi_cntrl); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_WO(trigger_edl); + static struct attribute *mhi_dev_attrs[] = { &dev_attr_serial_number.attr, &dev_attr_oem_pk_hash.attr, @@ -1018,6 +1042,12 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl, if (ret) goto err_release_dev; + if (mhi_cntrl->edl_trigger) { + ret = sysfs_create_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); + if (ret) + goto err_release_dev; + } + mhi_cntrl->mhi_dev = mhi_dev; mhi_create_debugfs(mhi_cntrl); @@ -1051,6 +1081,9 @@ void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) mhi_deinit_free_irq(mhi_cntrl); mhi_destroy_debugfs(mhi_cntrl); + if (mhi_cntrl->edl_trigger) + sysfs_remove_file(&mhi_dev->dev.kobj, &dev_attr_trigger_edl.attr); + destroy_workqueue(mhi_cntrl->hiprio_wq); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_event); diff --git a/include/linux/mhi.h b/include/linux/mhi.h index cde01e133a1b..d968e1ab44dc 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -353,6 +353,7 @@ struct mhi_controller_config { * @read_reg: Read a MHI register via the physical link (required) * @write_reg: Write a MHI register via the physical link (required) * @reset: Controller specific reset function (optional) + * @edl_trigger: CB function to trigger EDL mode (optional) * @buffer_len: Bounce buffer length * @index: Index of the MHI controller instance * @bounce_buf: Use of bounce buffer @@ -435,6 +436,7 @@ struct mhi_controller { void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr, u32 val); void (*reset)(struct mhi_controller *mhi_cntrl); + int (*edl_trigger)(struct mhi_controller *mhi_cntrl); size_t buffer_len; int index; -- cgit From 553f94fc7667259e47a9318e9e5702c9a814d637 Mon Sep 17 00:00:00 2001 From: Qiang Yu Date: Wed, 24 Apr 2024 11:21:54 +0800 Subject: bus: mhi: host: Add a new API for getting channel doorbell offset Some controllers may want to access a specific doorbell register. Hence add a new API that reads the CHDBOFF register and returns the offset of the doorbell registers from MMIO base, so that the controller can calculate the address of the specific doorbell register by adding the register offset with doorbell offset and MMIO base address. Signed-off-by: Qiang Yu Reviewed-by: Jeffrey Hugo Link: https://lore.kernel.org/r/1713928915-18229-3-git-send-email-quic_qianyu@quicinc.com [mani: reworded commit message and Kdoc] Signed-off-by: Manivannan Sadhasivam --- drivers/bus/mhi/host/init.c | 8 +++----- drivers/bus/mhi/host/main.c | 16 ++++++++++++++++ include/linux/mhi.h | 9 +++++++++ 3 files changed, 28 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index 7104c1856987..173f79918741 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -541,11 +541,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl) dev_dbg(dev, "Initializing MHI registers\n"); /* Read channel db offset */ - ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val); - if (ret) { - dev_err(dev, "Unable to read CHDBOFF register\n"); - return -EIO; - } + ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); + if (ret) + return ret; if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) { dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n", diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index 15d657af9b5b..4de75674f193 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -1691,3 +1691,19 @@ void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) } } EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); + +int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset) +{ + struct device *dev = &mhi_cntrl->mhi_dev->dev; + void __iomem *base = mhi_cntrl->regs; + int ret; + + ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, chdb_offset); + if (ret) { + dev_err(dev, "Unable to read CHDBOFF register\n"); + return -EIO; + } + + return 0; +} +EXPORT_SYMBOL_GPL(mhi_get_channel_doorbell_offset); diff --git a/include/linux/mhi.h b/include/linux/mhi.h index d968e1ab44dc..b573f15762f8 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -816,4 +816,13 @@ int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, */ bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir); +/** + * mhi_get_channel_doorbell_offset - Get the channel doorbell offset + * @mhi_cntrl: MHI controller + * @chdb_offset: Read channel doorbell offset + * + * Return: 0 if the read succeeds, a negative error code otherwise + */ +int mhi_get_channel_doorbell_offset(struct mhi_controller *mhi_cntrl, u32 *chdb_offset); + #endif /* _MHI_H_ */ -- cgit From dbc8fc9d6de17b618591d4d2a6227dee27fe0f51 Mon Sep 17 00:00:00 2001 From: Shivaprasad G Bhat Date: Sat, 27 Jan 2024 12:57:00 -0600 Subject: powerpc/papr_scm: Move duplicate definitions to common header files papr_scm and ndtest share common PDSM payload structs like nd_papr_pdsm_health. Presently these structs are duplicated across papr_pdsm.h and ndtest.h header files. Since 'ndtest' is essentially arch independent and can run on platforms other than PPC64, a way needs to be deviced to avoid redundancy and duplication of PDSM structs in future. So the patch proposes moving the PDSM header from arch/powerpc/include- -/uapi/ to the generic include/uapi/linux directory. Also, there are some #defines common between papr_scm and ndtest which are not exported to the user space. So, move them to a header file which can be shared across ndtest and papr_scm via newly introduced include/linux/papr_scm.h. Signed-off-by: Shivaprasad G Bhat Signed-off-by: Vaibhav Jain Suggested-by: Aneesh Kumar K.V Link: https://lore.kernel.org/r/170638176942.112443.2937254675538057083.stgit@ltcd48-lp2.aus.stglab.ibm.com Signed-off-by: Ira Weiny --- MAINTAINERS | 2 + arch/powerpc/include/uapi/asm/papr_pdsm.h | 165 ------------------------------ arch/powerpc/platforms/pseries/papr_scm.c | 43 +------- include/linux/papr_scm.h | 49 +++++++++ include/uapi/linux/papr_pdsm.h | 165 ++++++++++++++++++++++++++++++ tools/testing/nvdimm/test/ndtest.c | 2 + tools/testing/nvdimm/test/ndtest.h | 31 ------ 7 files changed, 220 insertions(+), 237 deletions(-) delete mode 100644 arch/powerpc/include/uapi/asm/papr_pdsm.h create mode 100644 include/linux/papr_scm.h create mode 100644 include/uapi/linux/papr_pdsm.h (limited to 'include/linux') diff --git a/MAINTAINERS b/MAINTAINERS index ebf03f5f0619..0960c519f949 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -12497,6 +12497,8 @@ F: drivers/rtc/rtc-opal.c F: drivers/scsi/ibmvscsi/ F: drivers/tty/hvc/hvc_opal.c F: drivers/watchdog/wdrtas.c +F: include/linux/papr_scm.h +F: include/uapi/linux/papr_pdsm.h F: tools/testing/selftests/powerpc N: /pmac N: powermac diff --git a/arch/powerpc/include/uapi/asm/papr_pdsm.h b/arch/powerpc/include/uapi/asm/papr_pdsm.h deleted file mode 100644 index 17439925045c..000000000000 --- a/arch/powerpc/include/uapi/asm/papr_pdsm.h +++ /dev/null @@ -1,165 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl - * - * (C) Copyright IBM 2020 - * - * Author: Vaibhav Jain - */ - -#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_ -#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_ - -#include -#include - -/* - * PDSM Envelope: - * - * The ioctl ND_CMD_CALL exchange data between user-space and kernel via - * envelope which consists of 2 headers sections and payload sections as - * illustrated below: - * +-----------------+---------------+---------------------------+ - * | 64-Bytes | 8-Bytes | Max 184-Bytes | - * +-----------------+---------------+---------------------------+ - * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD | - * +-----------------+---------------+---------------------------+ - * | nd_family | | | - * | nd_size_out | cmd_status | | - * | nd_size_in | reserved | nd_pdsm_payload | - * | nd_command | payload --> | | - * | nd_fw_size | | | - * | nd_payload ---> | | | - * +---------------+-----------------+---------------------------+ - * - * ND Header: - * This is the generic libnvdimm header described as 'struct nd_cmd_pkg' - * which is interpreted by libnvdimm before passed on to papr_scm. Important - * member fields used are: - * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM - * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0) - * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD - * 'nd_command' : (In) One of PAPR_PDSM_XXX - * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned - * - * PDSM Header: - * This is papr-scm specific header that precedes the payload. This is defined - * as nd_cmd_pdsm_pkg. Following fields aare available in this header: - * - * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM. - * 'reserved' : Not used, reserved for future and should be set to 0. - * 'payload' : A union of all the possible payload structs - * - * PDSM Payload: - * - * The layout of the PDSM Payload is defined by various structs shared between - * papr_scm and libndctl so that contents of payload can be interpreted. As such - * its defined as a union of all possible payload structs as - * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command' - * appropriate member of the union is accessed. - */ - -/* Max payload size that we can handle */ -#define ND_PDSM_PAYLOAD_MAX_SIZE 184 - -/* Max payload size that we can handle */ -#define ND_PDSM_HDR_SIZE \ - (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE) - -/* Various nvdimm health indicators */ -#define PAPR_PDSM_DIMM_HEALTHY 0 -#define PAPR_PDSM_DIMM_UNHEALTHY 1 -#define PAPR_PDSM_DIMM_CRITICAL 2 -#define PAPR_PDSM_DIMM_FATAL 3 - -/* struct nd_papr_pdsm_health.extension_flags field flags */ - -/* Indicate that the 'dimm_fuel_gauge' field is valid */ -#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1 - -/* Indicate that the 'dimm_dsc' field is valid */ -#define PDSM_DIMM_DSC_VALID 2 - -/* - * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH - * Various flags indicate the health status of the dimm. - * - * extension_flags : Any extension fields present in the struct. - * dimm_unarmed : Dimm not armed. So contents wont persist. - * dimm_bad_shutdown : Previous shutdown did not persist contents. - * dimm_bad_restore : Contents from previous shutdown werent restored. - * dimm_scrubbed : Contents of the dimm have been scrubbed. - * dimm_locked : Contents of the dimm cant be modified until CEC reboot - * dimm_encrypted : Contents of dimm are encrypted. - * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX - * dimm_fuel_gauge : Life remaining of DIMM as a percentage from 0-100 - */ -struct nd_papr_pdsm_health { - union { - struct { - __u32 extension_flags; - __u8 dimm_unarmed; - __u8 dimm_bad_shutdown; - __u8 dimm_bad_restore; - __u8 dimm_scrubbed; - __u8 dimm_locked; - __u8 dimm_encrypted; - __u16 dimm_health; - - /* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */ - __u16 dimm_fuel_gauge; - - /* Extension flag PDSM_DIMM_DSC_VALID */ - __u64 dimm_dsc; - }; - __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; - }; -}; - -/* Flags for injecting specific smart errors */ -#define PDSM_SMART_INJECT_HEALTH_FATAL (1 << 0) -#define PDSM_SMART_INJECT_BAD_SHUTDOWN (1 << 1) - -struct nd_papr_pdsm_smart_inject { - union { - struct { - /* One or more of PDSM_SMART_INJECT_ */ - __u32 flags; - __u8 fatal_enable; - __u8 unsafe_shutdown_enable; - }; - __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; - }; -}; - -/* - * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel - * via 'nd_cmd_pkg.nd_command' member of the ioctl struct - */ -enum papr_pdsm { - PAPR_PDSM_MIN = 0x0, - PAPR_PDSM_HEALTH, - PAPR_PDSM_SMART_INJECT, - PAPR_PDSM_MAX, -}; - -/* Maximal union that can hold all possible payload types */ -union nd_pdsm_payload { - struct nd_papr_pdsm_health health; - struct nd_papr_pdsm_smart_inject smart_inject; - __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; -} __packed; - -/* - * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm - * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command' - * that should always precede this struct when sent to papr_scm via CMD_CALL - * interface. - */ -struct nd_pkg_pdsm { - __s32 cmd_status; /* Out: Sub-cmd status returned back */ - __u16 reserved[2]; /* Ignored and to be set as '0' */ - union nd_pdsm_payload payload; -} __packed; - -#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */ diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index c233f9db039b..9b6420eb3567 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -16,7 +16,8 @@ #include #include -#include +#include +#include #include #include #include @@ -29,46 +30,6 @@ (1ul << ND_CMD_SET_CONFIG_DATA) | \ (1ul << ND_CMD_CALL)) -/* DIMM health bitmap indicators */ -/* SCM device is unable to persist memory contents */ -#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) -/* SCM device failed to persist memory contents */ -#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) -/* SCM device contents are persisted from previous IPL */ -#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2)) -/* SCM device contents are not persisted from previous IPL */ -#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) -/* SCM device memory life remaining is critically low */ -#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) -/* SCM device will be garded off next IPL due to failure */ -#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) -/* SCM contents cannot persist due to current platform health status */ -#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) -/* SCM device is unable to persist memory contents in certain conditions */ -#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7)) -/* SCM device is encrypted */ -#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8)) -/* SCM device has been scrubbed and locked */ -#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9)) - -/* Bits status indicators for health bitmap indicating unarmed dimm */ -#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -/* Bits status indicators for health bitmap indicating unflushed dimm */ -#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) - -/* Bits status indicators for health bitmap indicating unrestored dimm */ -#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) - -/* Bit status indicators for smart event notification */ -#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ - PAPR_PMEM_HEALTH_FATAL | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS) -#define PAPR_SCM_PERF_STATS_VERSION 0x1 - /* Struct holding a single performance metric */ struct papr_scm_perf_stat { u8 stat_id[8]; diff --git a/include/linux/papr_scm.h b/include/linux/papr_scm.h new file mode 100644 index 000000000000..eb36453813db --- /dev/null +++ b/include/linux/papr_scm.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __LINUX_PAPR_SCM_H +#define __LINUX_PAPR_SCM_H + +/* DIMM health bitmap indicators */ +/* SCM device is unable to persist memory contents */ +#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) +/* SCM device failed to persist memory contents */ +#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) +/* SCM device contents are persisted from previous IPL */ +#define PAPR_PMEM_SHUTDOWN_CLEAN (1ULL << (63 - 2)) +/* SCM device contents are not persisted from previous IPL */ +#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) +/* SCM device memory life remaining is critically low */ +#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) +/* SCM device will be garded off next IPL due to failure */ +#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) +/* SCM contents cannot persist due to current platform health status */ +#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) +/* SCM device is unable to persist memory contents in certain conditions */ +#define PAPR_PMEM_HEALTH_NON_CRITICAL (1ULL << (63 - 7)) +/* SCM device is encrypted */ +#define PAPR_PMEM_ENCRYPTED (1ULL << (63 - 8)) +/* SCM device has been scrubbed and locked */ +#define PAPR_PMEM_SCRUBBED_AND_LOCKED (1ULL << (63 - 9)) + +#define PAPR_PMEM_SAVE_FAILED (1ULL << (63 - 10)) + +/* Bits status indicators for health bitmap indicating unarmed dimm */ +#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ + PAPR_PMEM_HEALTH_UNHEALTHY) + +/* Bits status indicators for health bitmap indicating unflushed dimm */ +#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) + +/* Bits status indicators for health bitmap indicating unrestored dimm */ +#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) + +/* Bit status indicators for smart event notification */ +#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ + PAPR_PMEM_HEALTH_FATAL | \ + PAPR_PMEM_HEALTH_UNHEALTHY) + +#define PAPR_PMEM_SAVE_MASK (PAPR_PMEM_SAVE_FAILED) + +#define PAPR_SCM_PERF_STATS_EYECATCHER __stringify(SCMSTATS) +#define PAPR_SCM_PERF_STATS_VERSION 0x1 + +#endif /* __LINUX_PAPR_SCM_H */ diff --git a/include/uapi/linux/papr_pdsm.h b/include/uapi/linux/papr_pdsm.h new file mode 100644 index 000000000000..17439925045c --- /dev/null +++ b/include/uapi/linux/papr_pdsm.h @@ -0,0 +1,165 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * PAPR nvDimm Specific Methods (PDSM) and structs for libndctl + * + * (C) Copyright IBM 2020 + * + * Author: Vaibhav Jain + */ + +#ifndef _UAPI_ASM_POWERPC_PAPR_PDSM_H_ +#define _UAPI_ASM_POWERPC_PAPR_PDSM_H_ + +#include +#include + +/* + * PDSM Envelope: + * + * The ioctl ND_CMD_CALL exchange data between user-space and kernel via + * envelope which consists of 2 headers sections and payload sections as + * illustrated below: + * +-----------------+---------------+---------------------------+ + * | 64-Bytes | 8-Bytes | Max 184-Bytes | + * +-----------------+---------------+---------------------------+ + * | ND-HEADER | PDSM-HEADER | PDSM-PAYLOAD | + * +-----------------+---------------+---------------------------+ + * | nd_family | | | + * | nd_size_out | cmd_status | | + * | nd_size_in | reserved | nd_pdsm_payload | + * | nd_command | payload --> | | + * | nd_fw_size | | | + * | nd_payload ---> | | | + * +---------------+-----------------+---------------------------+ + * + * ND Header: + * This is the generic libnvdimm header described as 'struct nd_cmd_pkg' + * which is interpreted by libnvdimm before passed on to papr_scm. Important + * member fields used are: + * 'nd_family' : (In) NVDIMM_FAMILY_PAPR_SCM + * 'nd_size_in' : (In) PDSM-HEADER + PDSM-IN-PAYLOAD (usually 0) + * 'nd_size_out' : (In) PDSM-HEADER + PDSM-RETURN-PAYLOAD + * 'nd_command' : (In) One of PAPR_PDSM_XXX + * 'nd_fw_size' : (Out) PDSM-HEADER + size of actual payload returned + * + * PDSM Header: + * This is papr-scm specific header that precedes the payload. This is defined + * as nd_cmd_pdsm_pkg. Following fields aare available in this header: + * + * 'cmd_status' : (Out) Errors if any encountered while servicing PDSM. + * 'reserved' : Not used, reserved for future and should be set to 0. + * 'payload' : A union of all the possible payload structs + * + * PDSM Payload: + * + * The layout of the PDSM Payload is defined by various structs shared between + * papr_scm and libndctl so that contents of payload can be interpreted. As such + * its defined as a union of all possible payload structs as + * 'union nd_pdsm_payload'. Based on the value of 'nd_cmd_pkg.nd_command' + * appropriate member of the union is accessed. + */ + +/* Max payload size that we can handle */ +#define ND_PDSM_PAYLOAD_MAX_SIZE 184 + +/* Max payload size that we can handle */ +#define ND_PDSM_HDR_SIZE \ + (sizeof(struct nd_pkg_pdsm) - ND_PDSM_PAYLOAD_MAX_SIZE) + +/* Various nvdimm health indicators */ +#define PAPR_PDSM_DIMM_HEALTHY 0 +#define PAPR_PDSM_DIMM_UNHEALTHY 1 +#define PAPR_PDSM_DIMM_CRITICAL 2 +#define PAPR_PDSM_DIMM_FATAL 3 + +/* struct nd_papr_pdsm_health.extension_flags field flags */ + +/* Indicate that the 'dimm_fuel_gauge' field is valid */ +#define PDSM_DIMM_HEALTH_RUN_GAUGE_VALID 1 + +/* Indicate that the 'dimm_dsc' field is valid */ +#define PDSM_DIMM_DSC_VALID 2 + +/* + * Struct exchanged between kernel & ndctl in for PAPR_PDSM_HEALTH + * Various flags indicate the health status of the dimm. + * + * extension_flags : Any extension fields present in the struct. + * dimm_unarmed : Dimm not armed. So contents wont persist. + * dimm_bad_shutdown : Previous shutdown did not persist contents. + * dimm_bad_restore : Contents from previous shutdown werent restored. + * dimm_scrubbed : Contents of the dimm have been scrubbed. + * dimm_locked : Contents of the dimm cant be modified until CEC reboot + * dimm_encrypted : Contents of dimm are encrypted. + * dimm_health : Dimm health indicator. One of PAPR_PDSM_DIMM_XXXX + * dimm_fuel_gauge : Life remaining of DIMM as a percentage from 0-100 + */ +struct nd_papr_pdsm_health { + union { + struct { + __u32 extension_flags; + __u8 dimm_unarmed; + __u8 dimm_bad_shutdown; + __u8 dimm_bad_restore; + __u8 dimm_scrubbed; + __u8 dimm_locked; + __u8 dimm_encrypted; + __u16 dimm_health; + + /* Extension flag PDSM_DIMM_HEALTH_RUN_GAUGE_VALID */ + __u16 dimm_fuel_gauge; + + /* Extension flag PDSM_DIMM_DSC_VALID */ + __u64 dimm_dsc; + }; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; + }; +}; + +/* Flags for injecting specific smart errors */ +#define PDSM_SMART_INJECT_HEALTH_FATAL (1 << 0) +#define PDSM_SMART_INJECT_BAD_SHUTDOWN (1 << 1) + +struct nd_papr_pdsm_smart_inject { + union { + struct { + /* One or more of PDSM_SMART_INJECT_ */ + __u32 flags; + __u8 fatal_enable; + __u8 unsafe_shutdown_enable; + }; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; + }; +}; + +/* + * Methods to be embedded in ND_CMD_CALL request. These are sent to the kernel + * via 'nd_cmd_pkg.nd_command' member of the ioctl struct + */ +enum papr_pdsm { + PAPR_PDSM_MIN = 0x0, + PAPR_PDSM_HEALTH, + PAPR_PDSM_SMART_INJECT, + PAPR_PDSM_MAX, +}; + +/* Maximal union that can hold all possible payload types */ +union nd_pdsm_payload { + struct nd_papr_pdsm_health health; + struct nd_papr_pdsm_smart_inject smart_inject; + __u8 buf[ND_PDSM_PAYLOAD_MAX_SIZE]; +} __packed; + +/* + * PDSM-header + payload expected with ND_CMD_CALL ioctl from libnvdimm + * Valid member of union 'payload' is identified via 'nd_cmd_pkg.nd_command' + * that should always precede this struct when sent to papr_scm via CMD_CALL + * interface. + */ +struct nd_pkg_pdsm { + __s32 cmd_status; /* Out: Sub-cmd status returned back */ + __u16 reserved[2]; /* Ignored and to be set as '0' */ + union nd_pdsm_payload payload; +} __packed; + +#endif /* _UAPI_ASM_POWERPC_PAPR_PDSM_H_ */ diff --git a/tools/testing/nvdimm/test/ndtest.c b/tools/testing/nvdimm/test/ndtest.c index b8419f460368..fa9d1327fc71 100644 --- a/tools/testing/nvdimm/test/ndtest.c +++ b/tools/testing/nvdimm/test/ndtest.c @@ -13,6 +13,8 @@ #include #include #include +#include +#include #include "../watermark.h" #include "nfit_test.h" diff --git a/tools/testing/nvdimm/test/ndtest.h b/tools/testing/nvdimm/test/ndtest.h index 2c54c9cbb90c..8f27ad6f7319 100644 --- a/tools/testing/nvdimm/test/ndtest.h +++ b/tools/testing/nvdimm/test/ndtest.h @@ -5,37 +5,6 @@ #include #include -/* SCM device is unable to persist memory contents */ -#define PAPR_PMEM_UNARMED (1ULL << (63 - 0)) -/* SCM device failed to persist memory contents */ -#define PAPR_PMEM_SHUTDOWN_DIRTY (1ULL << (63 - 1)) -/* SCM device contents are not persisted from previous IPL */ -#define PAPR_PMEM_EMPTY (1ULL << (63 - 3)) -#define PAPR_PMEM_HEALTH_CRITICAL (1ULL << (63 - 4)) -/* SCM device will be garded off next IPL due to failure */ -#define PAPR_PMEM_HEALTH_FATAL (1ULL << (63 - 5)) -/* SCM contents cannot persist due to current platform health status */ -#define PAPR_PMEM_HEALTH_UNHEALTHY (1ULL << (63 - 6)) - -/* Bits status indicators for health bitmap indicating unarmed dimm */ -#define PAPR_PMEM_UNARMED_MASK (PAPR_PMEM_UNARMED | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -#define PAPR_PMEM_SAVE_FAILED (1ULL << (63 - 10)) - -/* Bits status indicators for health bitmap indicating unflushed dimm */ -#define PAPR_PMEM_BAD_SHUTDOWN_MASK (PAPR_PMEM_SHUTDOWN_DIRTY) - -/* Bits status indicators for health bitmap indicating unrestored dimm */ -#define PAPR_PMEM_BAD_RESTORE_MASK (PAPR_PMEM_EMPTY) - -/* Bit status indicators for smart event notification */ -#define PAPR_PMEM_SMART_EVENT_MASK (PAPR_PMEM_HEALTH_CRITICAL | \ - PAPR_PMEM_HEALTH_FATAL | \ - PAPR_PMEM_HEALTH_UNHEALTHY) - -#define PAPR_PMEM_SAVE_MASK (PAPR_PMEM_SAVE_FAILED) - struct ndtest_config; struct ndtest_priv { -- cgit From 5b0a67008b0d608bb7585fe4c3c0c09d5ceaf1c9 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 16 Mar 2024 12:52:41 +0100 Subject: mm: remove guard around pgd_offset_k() macro The last architecture redefining pgd_offset_k() was IA64 and it was removed by commit cf8e8658100d ("arch: Remove Itanium (IA-64) architecture") There is no need anymore to guard generic version of pgd_offset_k() with #ifndef pgd_offset_k Link: https://lkml.kernel.org/r/59d3f47d5615d18cca1986f269be2fcb3df34556.1710589838.git.christophe.leroy@csgroup.eu Signed-off-by: Christophe Leroy Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 85fc7554cd52..bd9c7180718c 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -149,9 +149,7 @@ static inline pgd_t *pgd_offset_pgd(pgd_t *pgd, unsigned long address) * a shortcut which implies the use of the kernel's pgd, instead * of a process's */ -#ifndef pgd_offset_k #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) -#endif /* * In many cases it is known that a virtual address is mapped at PMD or PTE -- cgit From 91b71e78b8e478e1a9af36b15ff1d38810572866 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Sat, 16 Mar 2024 01:58:03 +0000 Subject: mm: memcg: add NULL check to obj_cgroup_put() 9 out of 16 callers perform a NULL check before calling obj_cgroup_put(). Move the NULL check in the function, similar to mem_cgroup_put(). The unlikely() NULL check in current_objcg_update() was left alone to avoid dropping the unlikey() annotation as this a fast path. Link: https://lkml.kernel.org/r/20240316015803.2777252-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Johannes Weiner Acked-by: Roman Gushchin Cc: Michal Hocko Cc: Muchun Song Cc: Shakeel Butt Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 3 ++- kernel/bpf/memalloc.c | 6 ++---- mm/memcontrol.c | 18 ++++++------------ mm/zswap.c | 3 +-- 4 files changed, 11 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 394fd0a887ae..b6264796815d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -818,7 +818,8 @@ static inline void obj_cgroup_get_many(struct obj_cgroup *objcg, static inline void obj_cgroup_put(struct obj_cgroup *objcg) { - percpu_ref_put(&objcg->refcnt); + if (objcg) + percpu_ref_put(&objcg->refcnt); } static inline bool mem_cgroup_tryget(struct mem_cgroup *memcg) diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 550f02e2cb13..a546aba46d5d 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -759,8 +759,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); rcu_in_progress += atomic_read(&c->call_rcu_in_progress); } - if (ma->objcg) - obj_cgroup_put(ma->objcg); + obj_cgroup_put(ma->objcg); destroy_mem_alloc(ma, rcu_in_progress); } if (ma->caches) { @@ -776,8 +775,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) rcu_in_progress += atomic_read(&c->call_rcu_in_progress); } } - if (ma->objcg) - obj_cgroup_put(ma->objcg); + obj_cgroup_put(ma->objcg); destroy_mem_alloc(ma, rcu_in_progress); } } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index fabce2b50c69..786c0fae6e30 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2369,8 +2369,7 @@ static void drain_local_stock(struct work_struct *dummy) clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); local_unlock_irqrestore(&memcg_stock.stock_lock, flags); - if (old) - obj_cgroup_put(old); + obj_cgroup_put(old); } /* @@ -3145,8 +3144,7 @@ static struct obj_cgroup *current_objcg_update(void) if (old) { old = (struct obj_cgroup *) ((unsigned long)old & ~CURRENT_OBJCG_UPDATE_FLAG); - if (old) - obj_cgroup_put(old); + obj_cgroup_put(old); old = NULL; } @@ -3418,8 +3416,7 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, mod_objcg_mlstate(objcg, pgdat, idx, nr); local_unlock_irqrestore(&memcg_stock.stock_lock, flags); - if (old) - obj_cgroup_put(old); + obj_cgroup_put(old); } static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) @@ -3546,8 +3543,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, } local_unlock_irqrestore(&memcg_stock.stock_lock, flags); - if (old) - obj_cgroup_put(old); + obj_cgroup_put(old); if (nr_pages) obj_cgroup_uncharge_pages(objcg, nr_pages); @@ -5468,8 +5464,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) { int node; - if (memcg->orig_objcg) - obj_cgroup_put(memcg->orig_objcg); + obj_cgroup_put(memcg->orig_objcg); for_each_node(node) free_mem_cgroup_per_node_info(memcg, node); @@ -6620,8 +6615,7 @@ static void mem_cgroup_exit(struct task_struct *task) objcg = (struct obj_cgroup *) ((unsigned long)objcg & ~CURRENT_OBJCG_UPDATE_FLAG); - if (objcg) - obj_cgroup_put(objcg); + obj_cgroup_put(objcg); /* * Some kernel allocations can happen after this point, diff --git a/mm/zswap.c b/mm/zswap.c index 6f8850c44b61..2817e2791c13 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1618,8 +1618,7 @@ put_pool: freepage: zswap_entry_cache_free(entry); reject: - if (objcg) - obj_cgroup_put(objcg); + obj_cgroup_put(objcg); check_old: /* * If the zswap store fails or zswap is disabled, we must invalidate the -- cgit From 9636f055dae19ffae9d8bf21a68261b58c769987 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 18 Mar 2024 16:04:02 -0400 Subject: mm/treewide: remove pXd_huge() This API is not used anymore, drop it for the whole tree. Link: https://lkml.kernel.org/r/20240318200404.448346-13-peterx@redhat.com Signed-off-by: Peter Xu Cc: Alistair Popple Cc: Andreas Larsson Cc: "Aneesh Kumar K.V" Cc: Arnd Bergmann Cc: Bjorn Andersson Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christophe Leroy Cc: Dave Hansen Cc: David S. Miller Cc: Fabio Estevam Cc: Ingo Molnar Cc: Jason Gunthorpe Cc: Konrad Dybcio Cc: Krzysztof Kozlowski Cc: Lucas Stach Cc: Mark Salter Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Naoya Horiguchi Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Russell King Cc: Shawn Guo Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm/mm/Makefile | 1 - arch/arm/mm/hugetlbpage.c | 29 ------------------------ arch/arm64/mm/hugetlbpage.c | 10 -------- arch/loongarch/mm/hugetlbpage.c | 10 -------- arch/mips/include/asm/pgtable-32.h | 2 +- arch/mips/include/asm/pgtable-64.h | 2 +- arch/mips/mm/hugetlbpage.c | 10 -------- arch/parisc/mm/hugetlbpage.c | 11 --------- arch/powerpc/include/asm/book3s/64/pgtable-4k.h | 10 -------- arch/powerpc/include/asm/book3s/64/pgtable-64k.h | 25 -------------------- arch/powerpc/include/asm/nohash/pgtable.h | 10 -------- arch/riscv/mm/hugetlbpage.c | 10 -------- arch/s390/mm/hugetlbpage.c | 10 -------- arch/sh/mm/hugetlbpage.c | 10 -------- arch/sparc/mm/hugetlbpage.c | 10 -------- arch/x86/mm/hugetlbpage.c | 16 ------------- include/linux/hugetlb.h | 24 -------------------- 17 files changed, 2 insertions(+), 198 deletions(-) delete mode 100644 arch/arm/mm/hugetlbpage.c (limited to 'include/linux') diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 71b858c9b10c..1779e12db085 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -21,7 +21,6 @@ KASAN_SANITIZE_physaddr.o := n obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o -obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c deleted file mode 100644 index c2fa643f6bb5..000000000000 --- a/arch/arm/mm/hugetlbpage.c +++ /dev/null @@ -1,29 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/mm/hugetlbpage.c - * - * Copyright (C) 2012 ARM Ltd. - * - * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -int pud_huge(pud_t pud) -{ - return 0; -} - -int pmd_huge(pmd_t pmd) -{ - return pmd_leaf(pmd); -} diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index f494fc31201f..ca58210d6c07 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -79,16 +79,6 @@ bool arch_hugetlb_migration_supported(struct hstate *h) } #endif -int pmd_huge(pmd_t pmd) -{ - return pmd_leaf(pmd); -} - -int pud_huge(pud_t pud) -{ - return pud_leaf(pud); -} - static int find_num_contig(struct mm_struct *mm, unsigned long addr, pte_t *ptep, size_t *pgsize) { diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c index a4e78e74aa21..12222c56cb59 100644 --- a/arch/loongarch/mm/hugetlbpage.c +++ b/arch/loongarch/mm/hugetlbpage.c @@ -50,16 +50,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return (pte_t *) pmd; } -int pmd_huge(pmd_t pmd) -{ - return (pmd_val(pmd) & _PAGE_HUGE) != 0; -} - -int pud_huge(pud_t pud) -{ - return (pud_val(pud) & _PAGE_HUGE) != 0; -} - uint64_t pmd_to_entrylo(unsigned long pmd_val) { uint64_t val; diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index 0e196650f4f4..92b7591aac2a 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -129,7 +129,7 @@ static inline int pmd_none(pmd_t pmd) static inline int pmd_bad(pmd_t pmd) { #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT - /* pmd_huge(pmd) but inline */ + /* pmd_leaf(pmd) but inline */ if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) return 0; #endif diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 20ca48c1b606..7c28510b3768 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -245,7 +245,7 @@ static inline int pmd_none(pmd_t pmd) static inline int pmd_bad(pmd_t pmd) { #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT - /* pmd_huge(pmd) but inline */ + /* pmd_leaf(pmd) but inline */ if (unlikely(pmd_val(pmd) & _PAGE_HUGE)) return 0; #endif diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index 7eaff5b07873..0b9e15555b59 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -57,13 +57,3 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, } return (pte_t *) pmd; } - -int pmd_huge(pmd_t pmd) -{ - return (pmd_val(pmd) & _PAGE_HUGE) != 0; -} - -int pud_huge(pud_t pud) -{ - return (pud_val(pud) & _PAGE_HUGE) != 0; -} diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index a9f7e21f6656..0356199bd9e7 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -180,14 +180,3 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma, } return changed; } - - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h index 92545981bb49..baf934578c3a 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h @@ -6,16 +6,6 @@ */ #ifndef __ASSEMBLY__ #ifdef CONFIG_HUGETLB_PAGE -static inline int pmd_huge(pmd_t pmd) -{ - return pmd_leaf(pmd); -} - -static inline int pud_huge(pud_t pud) -{ - return pud_leaf(pud); -} - /* * With radix , we have hugepage ptes in the pud and pmd entries. We don't * need to setup hugepage directory for them. Our pte and page directory format diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h index ced7ee8b42fc..6ac73da7b80e 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h @@ -4,31 +4,6 @@ #ifndef __ASSEMBLY__ #ifdef CONFIG_HUGETLB_PAGE -/* - * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have - * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; - * - * Defined in such a way that we can optimize away code block at build time - * if CONFIG_HUGETLB_PAGE=n. - * - * returns true for pmd migration entries, THP, devmap, hugetlb - * But compile time dependent on CONFIG_HUGETLB_PAGE - */ -static inline int pmd_huge(pmd_t pmd) -{ - /* - * leaf pte for huge page - */ - return !!(pmd_raw(pmd) & cpu_to_be64(_PAGE_PTE)); -} - -static inline int pud_huge(pud_t pud) -{ - /* - * leaf pte for huge page - */ - return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); -} /* * With 64k page size, we have hugepage ptes in the pgd and pmd entries. We don't diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 427db14292c9..f5f39d4f03c8 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -351,16 +351,6 @@ static inline int hugepd_ok(hugepd_t hpd) #endif } -static inline int pmd_huge(pmd_t pmd) -{ - return 0; -} - -static inline int pud_huge(pud_t pud) -{ - return 0; -} - #define is_hugepd(hpd) (hugepd_ok(hpd)) #endif diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index 5ef2a6891158..0ebd968b33c9 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -399,16 +399,6 @@ static bool is_napot_size(unsigned long size) #endif /*CONFIG_RISCV_ISA_SVNAPOT*/ -int pud_huge(pud_t pud) -{ - return pud_leaf(pud); -} - -int pmd_huge(pmd_t pmd) -{ - return pmd_leaf(pmd); -} - static bool __hugetlb_valid_size(unsigned long size) { if (size == HPAGE_SIZE) diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index c2e8242bd15d..ca43b6fce71c 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -233,16 +233,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return (pte_t *) pmdp; } -int pmd_huge(pmd_t pmd) -{ - return pmd_leaf(pmd); -} - -int pud_huge(pud_t pud) -{ - return pud_leaf(pud); -} - bool __init arch_hugetlb_valid_size(unsigned long size) { if (MACHINE_HAS_EDAT1 && size == PMD_SIZE) diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 6cb0ad73dbb9..ff209b55285a 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -70,13 +70,3 @@ pte_t *huge_pte_offset(struct mm_struct *mm, return pte; } - -int pmd_huge(pmd_t pmd) -{ - return 0; -} - -int pud_huge(pud_t pud) -{ - return 0; -} diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 532143f1bf63..8ed5bdf95d25 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -407,16 +407,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, return entry; } -int pmd_huge(pmd_t pmd) -{ - return pmd_leaf(pmd); -} - -int pud_huge(pud_t pud) -{ - return pud_leaf(pud); -} - static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index 8362953a24ce..dab6db288e5b 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -19,22 +19,6 @@ #include #include -/* - * pmd_huge() returns 1 if @pmd is hugetlb related entry. - */ -int pmd_huge(pmd_t pmd) -{ - return pmd_leaf(pmd); -} - -/* - * pud_huge() returns 1 if @pud is hugetlb related entry. - */ -int pud_huge(pud_t pud) -{ - return pud_leaf(pud); -} - #ifdef CONFIG_HUGETLB_PAGE static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr, unsigned long len, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 77b30a8c6076..300de33c6fde 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -272,13 +272,9 @@ void hugetlb_vma_unlock_write(struct vm_area_struct *vma); int hugetlb_vma_trylock_write(struct vm_area_struct *vma); void hugetlb_vma_assert_locked(struct vm_area_struct *vma); void hugetlb_vma_lock_release(struct kref *kref); - -int pmd_huge(pmd_t pmd); -int pud_huge(pud_t pud); long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags); - bool is_hugetlb_entry_migration(pte_t pte); bool is_hugetlb_entry_hwpoisoned(pte_t pte); void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); @@ -399,16 +395,6 @@ static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) { } -static inline int pmd_huge(pmd_t pmd) -{ - return 0; -} - -static inline int pud_huge(pud_t pud) -{ - return 0; -} - static inline int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { @@ -493,16 +479,6 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } #endif /* !CONFIG_HUGETLB_PAGE */ -/* - * hugepages at page global directory. If arch support - * hugepages at pgd level, they need to define this. - */ -#ifndef pgd_huge -#define pgd_huge(x) 0 -#endif -#ifndef p4d_huge -#define p4d_huge(x) 0 -#endif #ifndef pgd_write static inline int pgd_write(pgd_t pgd) -- cgit From 64078b3d57dd9aaa4dde4a4f5851a53a79ac4d42 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 18 Mar 2024 16:04:04 -0400 Subject: mm: document pXd_leaf() API There's one small section already, but since we're going to remove pXd_huge(), that comment may start to obsolete. Rewrite that section with more information, hopefully with that the API is crystal clear on what it implies. Link: https://lkml.kernel.org/r/20240318200404.448346-15-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Jason Gunthorpe Cc: Alistair Popple Cc: Andreas Larsson Cc: "Aneesh Kumar K.V" Cc: Arnd Bergmann Cc: Bjorn Andersson Cc: Borislav Petkov Cc: Catalin Marinas Cc: Christophe Leroy Cc: Dave Hansen Cc: David S. Miller Cc: Fabio Estevam Cc: Ingo Molnar Cc: Konrad Dybcio Cc: Krzysztof Kozlowski Cc: Lucas Stach Cc: Mark Salter Cc: "Matthew Wilcox (Oracle)" Cc: Michael Ellerman Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Naoya Horiguchi Cc: "Naveen N. Rao" Cc: Nicholas Piggin Cc: Russell King Cc: Shawn Guo Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index bd9c7180718c..2a1c044ae467 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1768,11 +1768,25 @@ typedef unsigned int pgtbl_mod_mask; #endif /* - * p?d_leaf() - true if this entry is a final mapping to a physical address. - * This differs from p?d_huge() by the fact that they are always available (if - * the architecture supports large pages at the appropriate level) even - * if CONFIG_HUGETLB_PAGE is not defined. - * Only meaningful when called on a valid entry. + * pXd_leaf() is the API to check whether a pgtable entry is a huge page + * mapping. It should work globally across all archs, without any + * dependency on CONFIG_* options. For architectures that do not support + * huge mappings on specific levels, below fallbacks will be used. + * + * A leaf pgtable entry should always imply the following: + * + * - It is a "present" entry. IOW, before using this API, please check it + * with pXd_present() first. NOTE: it may not always mean the "present + * bit" is set. For example, PROT_NONE entries are always "present". + * + * - It should _never_ be a swap entry of any type. Above "present" check + * should have guarded this, but let's be crystal clear on this. + * + * - It should contain a huge PFN, which points to a huge page larger than + * PAGE_SIZE of the platform. The PFN format isn't important here. + * + * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(), + * pXd_devmap(), or hugetlb mappings). */ #ifndef pgd_leaf #define pgd_leaf(x) false -- cgit From 91cdcd8d624bfdf05e8db9d572759516f3c786b8 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 12 Mar 2024 11:34:11 -0400 Subject: mm: zswap: optimize zswap pool size tracking Profiling the munmap() of a zswapped memory region shows 60% of the total cycles currently going into updating the zswap_pool_total_size. There are three consumers of this counter: - store, to enforce the globally configured pool limit - meminfo & debugfs, to report the size to the user - shrink, to determine the batch size for each cycle Instead of aggregating everytime an entry enters or exits the zswap pool, aggregate the value from the zpools on-demand: - Stores aggregate the counter anyway upon success. Aggregating to check the limit instead is the same amount of work. - Meminfo & debugfs might benefit somewhat from a pre-aggregated counter, but aren't exactly hotpaths. - Shrinking can aggregate once for every cycle instead of doing it for every freed entry. As the shrinker might work on tens or hundreds of objects per scan cycle, this is a large reduction in aggregations. The paths that benefit dramatically are swapin, swapoff, and unmaps. There could be millions of pages being processed until somebody asks for the pool size again. This eliminates the pool size updates from those paths entirely. Top profile entries for a 24G range munmap(), before: 38.54% zswap-unmap [kernel.kallsyms] [k] zs_zpool_total_size 12.51% zswap-unmap [kernel.kallsyms] [k] zpool_get_total_size 9.10% zswap-unmap [kernel.kallsyms] [k] zswap_update_total_size 2.95% zswap-unmap [kernel.kallsyms] [k] obj_cgroup_uncharge_zswap 2.88% zswap-unmap [kernel.kallsyms] [k] __slab_free 2.86% zswap-unmap [kernel.kallsyms] [k] xas_store and after: 7.70% zswap-unmap [kernel.kallsyms] [k] __slab_free 7.16% zswap-unmap [kernel.kallsyms] [k] obj_cgroup_uncharge_zswap 6.74% zswap-unmap [kernel.kallsyms] [k] xas_store It was also briefly considered to move to a single atomic in zswap that is updated by the backends, since zswap only cares about the sum of all pools anyway. However, zram directly needs per-pool information out of zsmalloc. To keep the backend from having to update two atomics every time, I opted for the lazy aggregation instead for now. Link: https://lkml.kernel.org/r/20240312153901.3441-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Yosry Ahmed Reviewed-by: Chengming Zhou Reviewed-by: Nhat Pham Signed-off-by: Andrew Morton --- fs/proc/meminfo.c | 3 +- include/linux/zswap.h | 2 +- mm/zswap.c | 101 +++++++++++++++++++++++++------------------------- 3 files changed, 52 insertions(+), 54 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 45af9a989d40..245171d9164b 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -89,8 +89,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "SwapTotal: ", i.totalswap); show_val_kb(m, "SwapFree: ", i.freeswap); #ifdef CONFIG_ZSWAP - seq_printf(m, "Zswap: %8lu kB\n", - (unsigned long)(zswap_pool_total_size >> 10)); + show_val_kb(m, "Zswap: ", zswap_total_pages()); seq_printf(m, "Zswapped: %8lu kB\n", (unsigned long)atomic_read(&zswap_stored_pages) << (PAGE_SHIFT - 10)); diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 341aea490070..2a85b941db97 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -7,7 +7,6 @@ struct lruvec; -extern u64 zswap_pool_total_size; extern atomic_t zswap_stored_pages; #ifdef CONFIG_ZSWAP @@ -27,6 +26,7 @@ struct zswap_lruvec_state { atomic_long_t nr_zswap_protected; }; +unsigned long zswap_total_pages(void); bool zswap_store(struct folio *folio); bool zswap_load(struct folio *folio); void zswap_invalidate(swp_entry_t swp); diff --git a/mm/zswap.c b/mm/zswap.c index 2817e2791c13..a56b76852664 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -43,8 +43,6 @@ /********************************* * statistics **********************************/ -/* Total bytes used by the compressed storage */ -u64 zswap_pool_total_size; /* The number of compressed pages currently stored in zswap */ atomic_t zswap_stored_pages = ATOMIC_INIT(0); /* The number of same-value filled pages currently stored in zswap */ @@ -265,45 +263,6 @@ static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp) pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ zpool_get_type((p)->zpools[0])) -static bool zswap_is_full(void) -{ - return totalram_pages() * zswap_max_pool_percent / 100 < - DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); -} - -static bool zswap_can_accept(void) -{ - return totalram_pages() * zswap_accept_thr_percent / 100 * - zswap_max_pool_percent / 100 > - DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); -} - -static u64 get_zswap_pool_size(struct zswap_pool *pool) -{ - u64 pool_size = 0; - int i; - - for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) - pool_size += zpool_get_total_size(pool->zpools[i]); - - return pool_size; -} - -static void zswap_update_total_size(void) -{ - struct zswap_pool *pool; - u64 total = 0; - - rcu_read_lock(); - - list_for_each_entry_rcu(pool, &zswap_pools, list) - total += get_zswap_pool_size(pool); - - rcu_read_unlock(); - - zswap_pool_total_size = total; -} - /********************************* * pool functions **********************************/ @@ -541,6 +500,33 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) return NULL; } +static unsigned long zswap_max_pages(void) +{ + return totalram_pages() * zswap_max_pool_percent / 100; +} + +static unsigned long zswap_accept_thr_pages(void) +{ + return zswap_max_pages() * zswap_accept_thr_percent / 100; +} + +unsigned long zswap_total_pages(void) +{ + struct zswap_pool *pool; + u64 total = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(pool, &zswap_pools, list) { + int i; + + for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) + total += zpool_get_total_size(pool->zpools[i]); + } + rcu_read_unlock(); + + return total >> PAGE_SHIFT; +} + /********************************* * param callbacks **********************************/ @@ -913,7 +899,6 @@ static void zswap_entry_free(struct zswap_entry *entry) } zswap_entry_cache_free(entry); atomic_dec(&zswap_stored_pages); - zswap_update_total_size(); } /* @@ -1344,7 +1329,7 @@ static unsigned long zswap_shrinker_count(struct shrinker *shrinker, nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT; nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED); } else { - nr_backing = zswap_pool_total_size >> PAGE_SHIFT; + nr_backing = zswap_total_pages(); nr_stored = atomic_read(&zswap_nr_stored); } @@ -1412,6 +1397,10 @@ static void shrink_worker(struct work_struct *w) { struct mem_cgroup *memcg; int ret, failures = 0; + unsigned long thr; + + /* Reclaim down to the accept threshold */ + thr = zswap_accept_thr_pages(); /* global reclaim will select cgroup in a round-robin fashion. */ do { @@ -1459,10 +1448,9 @@ static void shrink_worker(struct work_struct *w) break; if (ret && ++failures == MAX_RECLAIM_RETRIES) break; - resched: cond_resched(); - } while (!zswap_can_accept()); + } while (zswap_total_pages() > thr); } static int zswap_is_page_same_filled(void *ptr, unsigned long *value) @@ -1503,6 +1491,7 @@ bool zswap_store(struct folio *folio) struct zswap_entry *entry, *dupentry; struct obj_cgroup *objcg = NULL; struct mem_cgroup *memcg = NULL; + unsigned long max_pages, cur_pages; VM_WARN_ON_ONCE(!folio_test_locked(folio)); VM_WARN_ON_ONCE(!folio_test_swapcache(folio)); @@ -1514,6 +1503,7 @@ bool zswap_store(struct folio *folio) if (!zswap_enabled) goto check_old; + /* Check cgroup limits */ objcg = get_obj_cgroup_from_folio(folio); if (objcg && !obj_cgroup_may_zswap(objcg)) { memcg = get_mem_cgroup_from_objcg(objcg); @@ -1524,15 +1514,18 @@ bool zswap_store(struct folio *folio) mem_cgroup_put(memcg); } - /* reclaim space if needed */ - if (zswap_is_full()) { + /* Check global limits */ + cur_pages = zswap_total_pages(); + max_pages = zswap_max_pages(); + + if (cur_pages >= max_pages) { zswap_pool_limit_hit++; zswap_pool_reached_full = true; goto shrink; } if (zswap_pool_reached_full) { - if (!zswap_can_accept()) + if (cur_pages > zswap_accept_thr_pages()) goto shrink; else zswap_pool_reached_full = false; @@ -1608,7 +1601,6 @@ insert_entry: /* update stats */ atomic_inc(&zswap_stored_pages); - zswap_update_total_size(); count_vm_event(ZSWPOUT); return true; @@ -1752,6 +1744,13 @@ void zswap_swapoff(int type) static struct dentry *zswap_debugfs_root; +static int debugfs_get_total_size(void *data, u64 *val) +{ + *val = zswap_total_pages() * PAGE_SIZE; + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(total_size_fops, debugfs_get_total_size, NULL, "%llu\n"); + static int zswap_debugfs_init(void) { if (!debugfs_initialized()) @@ -1773,8 +1772,8 @@ static int zswap_debugfs_init(void) zswap_debugfs_root, &zswap_reject_compress_poor); debugfs_create_u64("written_back_pages", 0444, zswap_debugfs_root, &zswap_written_back_pages); - debugfs_create_u64("pool_total_size", 0444, - zswap_debugfs_root, &zswap_pool_total_size); + debugfs_create_file("pool_total_size", 0444, + zswap_debugfs_root, NULL, &total_size_fops); debugfs_create_atomic_t("stored_pages", 0444, zswap_debugfs_root, &zswap_stored_pages); debugfs_create_atomic_t("same_filled_pages", 0444, -- cgit From 4196b48ddd382419e51658bf94a25af195ba9450 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Tue, 12 Mar 2024 11:34:12 -0400 Subject: mm: zpool: return pool size in pages All zswap backends track their pool sizes in pages. Currently they multiply by PAGE_SIZE for zswap, only for zswap to divide again in order to do limit math. Report pages directly. Link: https://lkml.kernel.org/r/20240312153901.3441-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Yosry Ahmed Reviewed-by: Chengming Zhou Reviewed-by: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/zpool.h | 4 ++-- mm/z3fold.c | 10 +++++----- mm/zbud.c | 10 +++++----- mm/zpool.c | 10 +++++----- mm/zsmalloc.c | 6 +++--- mm/zswap.c | 6 +++--- 6 files changed, 23 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 3296438eec06..a67d62b79698 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -53,7 +53,7 @@ void *zpool_map_handle(struct zpool *pool, unsigned long handle, void zpool_unmap_handle(struct zpool *pool, unsigned long handle); -u64 zpool_get_total_size(struct zpool *pool); +u64 zpool_get_total_pages(struct zpool *pool); /** @@ -91,7 +91,7 @@ struct zpool_driver { enum zpool_mapmode mm); void (*unmap)(void *pool, unsigned long handle); - u64 (*total_size)(void *pool); + u64 (*total_pages)(void *pool); }; void zpool_register_driver(struct zpool_driver *driver); diff --git a/mm/z3fold.c b/mm/z3fold.c index 7ab05621052d..2ebfed32871b 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -1237,12 +1237,12 @@ static void z3fold_unmap(struct z3fold_pool *pool, unsigned long handle) } /** - * z3fold_get_pool_size() - gets the z3fold pool size in pages + * z3fold_get_pool_pages() - gets the z3fold pool size in pages * @pool: pool whose size is being queried * * Returns: size in pages of the given pool. */ -static u64 z3fold_get_pool_size(struct z3fold_pool *pool) +static u64 z3fold_get_pool_pages(struct z3fold_pool *pool) { return atomic64_read(&pool->pages_nr); } @@ -1402,9 +1402,9 @@ static void z3fold_zpool_unmap(void *pool, unsigned long handle) z3fold_unmap(pool, handle); } -static u64 z3fold_zpool_total_size(void *pool) +static u64 z3fold_zpool_total_pages(void *pool) { - return z3fold_get_pool_size(pool) * PAGE_SIZE; + return z3fold_get_pool_pages(pool); } static struct zpool_driver z3fold_zpool_driver = { @@ -1417,7 +1417,7 @@ static struct zpool_driver z3fold_zpool_driver = { .free = z3fold_zpool_free, .map = z3fold_zpool_map, .unmap = z3fold_zpool_unmap, - .total_size = z3fold_zpool_total_size, + .total_pages = z3fold_zpool_total_pages, }; MODULE_ALIAS("zpool-z3fold"); diff --git a/mm/zbud.c b/mm/zbud.c index 2190cc1f37b3..e9836fff9438 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -365,13 +365,13 @@ static void zbud_unmap(struct zbud_pool *pool, unsigned long handle) } /** - * zbud_get_pool_size() - gets the zbud pool size in pages + * zbud_get_pool_pages() - gets the zbud pool size in pages * @pool: pool whose size is being queried * * Returns: size in pages of the given pool. The pool lock need not be * taken to access pages_nr. */ -static u64 zbud_get_pool_size(struct zbud_pool *pool) +static u64 zbud_get_pool_pages(struct zbud_pool *pool) { return pool->pages_nr; } @@ -410,9 +410,9 @@ static void zbud_zpool_unmap(void *pool, unsigned long handle) zbud_unmap(pool, handle); } -static u64 zbud_zpool_total_size(void *pool) +static u64 zbud_zpool_total_pages(void *pool) { - return zbud_get_pool_size(pool) * PAGE_SIZE; + return zbud_get_pool_pages(pool); } static struct zpool_driver zbud_zpool_driver = { @@ -425,7 +425,7 @@ static struct zpool_driver zbud_zpool_driver = { .free = zbud_zpool_free, .map = zbud_zpool_map, .unmap = zbud_zpool_unmap, - .total_size = zbud_zpool_total_size, + .total_pages = zbud_zpool_total_pages, }; MODULE_ALIAS("zpool-zbud"); diff --git a/mm/zpool.c b/mm/zpool.c index 846410479c2f..b9fda1fa857d 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -321,16 +321,16 @@ void zpool_unmap_handle(struct zpool *zpool, unsigned long handle) } /** - * zpool_get_total_size() - The total size of the pool + * zpool_get_total_pages() - The total size of the pool * @zpool: The zpool to check * - * This returns the total size in bytes of the pool. + * This returns the total size in pages of the pool. * - * Returns: Total size of the zpool in bytes. + * Returns: Total size of the zpool in pages. */ -u64 zpool_get_total_size(struct zpool *zpool) +u64 zpool_get_total_pages(struct zpool *zpool) { - return zpool->driver->total_size(zpool->pool); + return zpool->driver->total_pages(zpool->pool); } /** diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 7d7cb3eaabe0..b42d3545ca85 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -399,9 +399,9 @@ static void zs_zpool_unmap(void *pool, unsigned long handle) zs_unmap_object(pool, handle); } -static u64 zs_zpool_total_size(void *pool) +static u64 zs_zpool_total_pages(void *pool) { - return zs_get_total_pages(pool) << PAGE_SHIFT; + return zs_get_total_pages(pool); } static struct zpool_driver zs_zpool_driver = { @@ -414,7 +414,7 @@ static struct zpool_driver zs_zpool_driver = { .free = zs_zpool_free, .map = zs_zpool_map, .unmap = zs_zpool_unmap, - .total_size = zs_zpool_total_size, + .total_pages = zs_zpool_total_pages, }; MODULE_ALIAS("zpool-zsmalloc"); diff --git a/mm/zswap.c b/mm/zswap.c index a56b76852664..8721980a2274 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -513,18 +513,18 @@ static unsigned long zswap_accept_thr_pages(void) unsigned long zswap_total_pages(void) { struct zswap_pool *pool; - u64 total = 0; + unsigned long total = 0; rcu_read_lock(); list_for_each_entry_rcu(pool, &zswap_pools, list) { int i; for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) - total += zpool_get_total_size(pool->zpools[i]); + total += zpool_get_total_pages(pool->zpools[i]); } rcu_read_unlock(); - return total >> PAGE_SHIFT; + return total; } /********************************* -- cgit From f8fd525ba3a298d5791536bd8ca39be7a7103296 Mon Sep 17 00:00:00 2001 From: Donet Tom Date: Fri, 8 Mar 2024 09:15:37 -0600 Subject: mm/mempolicy: use numa_node_id() instead of cpu_to_node() Patch series "Allow migrate on protnone reference with MPOL_PREFERRED_MANY policy:, v4. This patchset is to optimize the cross-socket memory access with MPOL_PREFERRED_MANY policy. To test this patch we ran the following test on a 3 node system. Node 0 - 2GB - Tier 1 Node 1 - 11GB - Tier 1 Node 6 - 10GB - Tier 2 Below changes are made to memcached to set the memory policy, It select Node0 and Node1 as preferred nodes. #include #include unsigned long nodemask; int ret; nodemask = 0x03; ret = set_mempolicy(MPOL_PREFERRED_MANY | MPOL_F_NUMA_BALANCING, &nodemask, 10); /* If MPOL_F_NUMA_BALANCING isn't supported, * fall back to MPOL_PREFERRED_MANY */ if (ret < 0 && errno == EINVAL){ printf("set mem policy normal\n"); ret = set_mempolicy(MPOL_PREFERRED_MANY, &nodemask, 10); } if (ret < 0) { perror("Failed to call set_mempolicy"); exit(-1); } Test Procedure: =============== 1. Make sure memory tiring and demotion are enabled. 2. Start memcached. # ./memcached -b 100000 -m 204800 -u root -c 1000000 -t 7 -d -s "/tmp/memcached.sock" 3. Run memtier_benchmark to store 3200000 keys. #./memtier_benchmark -S "/tmp/memcached.sock" --protocol=memcache_binary --threads=1 --pipeline=1 --ratio=1:0 --key-pattern=S:S --key-minimum=1 --key-maximum=3200000 -n allkeys -c 1 -R -x 1 -d 1024 4. Start a memory eater on node 0 and 1. This will demote all memcached pages to node 6. 5. Make sure all the memcached pages got demoted to lower tier by reading /proc//numa_maps. # cat /proc/2771/numa_maps --- default anon=1009 dirty=1009 active=0 N6=1009 kernelpagesize_kB=64 default anon=1009 dirty=1009 active=0 N6=1009 kernelpagesize_kB=64 --- 6. Kill memory eater. 7. Read the pgpromote_success counter. 8. Start reading the keys by running memtier_benchmark. #./memtier_benchmark -S "/tmp/memcached.sock" --protocol=memcache_binary --pipeline=1 --distinct-client-seed --ratio=0:3 --key-pattern=R:R --key-minimum=1 --key-maximum=3200000 -n allkeys --threads=64 -c 1 -R -x 6 9. Read the pgpromote_success counter. Test Results: ============= Without Patch ------------------ 1. pgpromote_success before test Node 0: pgpromote_success 11 Node 1: pgpromote_success 140974 pgpromote_success after test Node 0: pgpromote_success 11 Node 1: pgpromote_success 140974 2. Memtier-benchmark result. AGGREGATED AVERAGE RESULTS (6 runs) ================================================================== Type Ops/sec Hits/sec Misses/sec Avg. Latency p50 Latency ------------------------------------------------------------------ Sets 0.00 --- --- --- --- Gets 305792.03 305791.93 0.10 0.18949 0.16700 Waits 0.00 --- --- --- --- Totals 305792.03 305791.93 0.10 0.18949 0.16700 ====================================== p99 Latency p99.9 Latency KB/sec ------------------------------------- --- --- 0.00 0.44700 1.71100 11542.69 --- --- --- 0.44700 1.71100 11542.69 With Patch --------------- 1. pgpromote_success before test Node 0: pgpromote_success 5 Node 1: pgpromote_success 89386 pgpromote_success after test Node 0: pgpromote_success 57895 Node 1: pgpromote_success 141463 2. Memtier-benchmark result. AGGREGATED AVERAGE RESULTS (6 runs) ==================================================================== Type Ops/sec Hits/sec Misses/sec Avg. Latency p50 Latency -------------------------------------------------------------------- Sets 0.00 --- --- --- --- Gets 521942.24 521942.07 0.17 0.11459 0.10300 Waits 0.00 --- --- --- --- Totals 521942.24 521942.07 0.17 0.11459 0.10300 ======================================= p99 Latency p99.9 Latency KB/sec --------------------------------------- --- --- 0.00 0.23100 0.31900 19701.68 --- --- --- 0.23100 0.31900 19701.68 Test Result Analysis: ===================== 1. With patch we could observe pages are getting promoted. 2. Memtier-benchmark results shows that, with the patch, performance has increased more than 50%. Ops/sec without fix - 305792.03 Ops/sec with fix - 521942.24 This patch (of 2): Instead of using 'cpu_to_node()', we use 'numa_node_id()', which is quicker. smp_processor_id is guaranteed to be stable in the 'mpol_misplaced()' function because it is called with ptl held. lockdep_assert_held was added to ensure that. No functional change in this patch. [donettom@linux.ibm.com: add "* @vmf: structure describing the fault" comment] Link: https://lkml.kernel.org/r/d8b993ea9dccfac0bc3ed61d3a81f4ac5f376e46.1711002865.git.donettom@linux.ibm.com Link: https://lkml.kernel.org/r/cover.1711373653.git.donettom@linux.ibm.com Link: https://lkml.kernel.org/r/6059f034f436734b472d066db69676fb3a459864.1711373653.git.donettom@linux.ibm.com Link: https://lkml.kernel.org/r/cover.1709909210.git.donettom@linux.ibm.com Link: https://lkml.kernel.org/r/744646531af02cc687cde8ae788fb1779e99d02c.1709909210.git.donettom@linux.ibm.com Signed-off-by: Aneesh Kumar K.V (IBM) Signed-off-by: Donet Tom Cc: Andrea Arcangeli Cc: Dan Williams Cc: Dave Hansen Cc: Feng Tang Cc: Huang, Ying Cc: Hugh Dickins Cc: Ingo Molnar Cc: Johannes Weiner Cc: Kefeng Wang Cc: "Matthew Wilcox (Oracle)" Cc: Mel Gorman Cc: Michal Hocko Cc: Peter Zijlstra Cc: Rik van Riel Cc: Suren Baghdasaryan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mempolicy.h | 5 +++-- mm/huge_memory.c | 2 +- mm/internal.h | 2 +- mm/memory.c | 8 +++++--- mm/mempolicy.c | 14 ++++++++++---- 5 files changed, 20 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 931b118336f4..1add16f21612 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -167,7 +167,8 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); /* Check if a vma is migratable */ extern bool vma_migratable(struct vm_area_struct *vma); -int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long); +int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, + unsigned long addr); extern void mpol_put_task_policy(struct task_struct *); static inline bool mpol_is_preferred_many(struct mempolicy *pol) @@ -282,7 +283,7 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol) #endif static inline int mpol_misplaced(struct folio *folio, - struct vm_area_struct *vma, + struct vm_fault *vmf, unsigned long address) { return -1; /* no node preference */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 89f58c7603b2..dd88bb3ea196 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1754,7 +1754,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) */ if (node_is_toptier(nid)) last_cpupid = folio_last_cpupid(folio); - target_nid = numa_migrate_prep(folio, vma, haddr, nid, &flags); + target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map; diff --git a/mm/internal.h b/mm/internal.h index 07ad2675a88b..85c3db43454d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1087,7 +1087,7 @@ void vunmap_range_noflush(unsigned long start, unsigned long end); void __vunmap_range_noflush(unsigned long start, unsigned long end); -int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int page_nid, int *flags); void free_zone_device_page(struct page *page); diff --git a/mm/memory.c b/mm/memory.c index e0576f2a8d23..c859a09b4f72 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5035,9 +5035,11 @@ static vm_fault_t do_fault(struct vm_fault *vmf) return ret; } -int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, +int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf, unsigned long addr, int page_nid, int *flags) { + struct vm_area_struct *vma = vmf->vma; + folio_get(folio); /* Record the current PID acceesing VMA */ @@ -5049,7 +5051,7 @@ int numa_migrate_prep(struct folio *folio, struct vm_area_struct *vma, *flags |= TNF_FAULT_LOCAL; } - return mpol_misplaced(folio, vma, addr); + return mpol_misplaced(folio, vmf, addr); } static vm_fault_t do_numa_page(struct vm_fault *vmf) @@ -5123,7 +5125,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) last_cpupid = (-1 & LAST_CPUPID_MASK); else last_cpupid = folio_last_cpupid(folio); - target_nid = numa_migrate_prep(folio, vma, vmf->address, nid, &flags); + target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags); if (target_nid == NUMA_NO_NODE) { folio_put(folio); goto out_map; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0fe77738d971..aa48376e2d34 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2718,7 +2718,7 @@ static void sp_free(struct sp_node *n) * mpol_misplaced - check whether current folio node is valid in policy * * @folio: folio to be checked - * @vma: vm area where folio mapped + * @vmf: structure describing the fault * @addr: virtual address in @vma for shared policy lookup and interleave policy * * Lookup current policy node id for vma,addr and "compare to" folio's @@ -2728,18 +2728,24 @@ static void sp_free(struct sp_node *n) * Return: NUMA_NO_NODE if the page is in a node that is valid for this * policy, or a suitable node ID to allocate a replacement folio from. */ -int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, +int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, unsigned long addr) { struct mempolicy *pol; pgoff_t ilx; struct zoneref *z; int curnid = folio_nid(folio); + struct vm_area_struct *vma = vmf->vma; int thiscpu = raw_smp_processor_id(); - int thisnid = cpu_to_node(thiscpu); + int thisnid = numa_node_id(); int polnid = NUMA_NO_NODE; int ret = NUMA_NO_NODE; + /* + * Make sure ptl is held so that we don't preempt and we + * have a stable smp processor id + */ + lockdep_assert_held(vmf->ptl); pol = get_vma_policy(vma, addr, folio_order(folio), &ilx); if (!(pol->flags & MPOL_F_MOF)) goto out; @@ -2781,7 +2787,7 @@ int mpol_misplaced(struct folio *folio, struct vm_area_struct *vma, if (node_isset(curnid, pol->nodes)) goto out; z = first_zones_zonelist( - node_zonelist(numa_node_id(), GFP_HIGHUSER), + node_zonelist(thisnid, GFP_HIGHUSER), gfp_zone(GFP_HIGHUSER), &pol->nodes); polnid = zone_to_nid(z->zone); -- cgit From 0069455bcbf9ea73ffe4553ed6d2b4e4cad703de Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 21 Mar 2024 09:36:23 -0700 Subject: fix missing vmalloc.h includes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Memory allocation profiling", v6. Overview: Low overhead [1] per-callsite memory allocation profiling. Not just for debug kernels, overhead low enough to be deployed in production. Example output: root@moria-kvm:~# sort -rn /proc/allocinfo 127664128 31168 mm/page_ext.c:270 func:alloc_page_ext 56373248 4737 mm/slub.c:2259 func:alloc_slab_page 14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded 14417920 3520 mm/mm_init.c:2530 func:alloc_large_system_hash 13377536 234 block/blk-mq.c:3421 func:blk_mq_alloc_rqs 11718656 2861 mm/filemap.c:1919 func:__filemap_get_folio 9192960 2800 kernel/fork.c:307 func:alloc_thread_stack_node 4206592 4 net/netfilter/nf_conntrack_core.c:2567 func:nf_ct_alloc_hashtable 4136960 1010 drivers/staging/ctagmod/ctagmod.c:20 [ctagmod] func:ctagmod_start 3940352 962 mm/memory.c:4214 func:alloc_anon_folio 2894464 22613 fs/kernfs/dir.c:615 func:__kernfs_new_node ... Usage: kconfig options: - CONFIG_MEM_ALLOC_PROFILING - CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT - CONFIG_MEM_ALLOC_PROFILING_DEBUG adds warnings for allocations that weren't accounted because of a missing annotation sysctl: /proc/sys/vm/mem_profiling Runtime info: /proc/allocinfo Notes: [1]: Overhead To measure the overhead we are comparing the following configurations: (1) Baseline with CONFIG_MEMCG_KMEM=n (2) Disabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n) (3) Enabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=y) (4) Enabled at runtime (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n && /proc/sys/vm/mem_profiling=1) (5) Baseline with CONFIG_MEMCG_KMEM=y && allocating with __GFP_ACCOUNT (6) Disabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=n) && CONFIG_MEMCG_KMEM=y (7) Enabled by default (CONFIG_MEM_ALLOC_PROFILING=y && CONFIG_MEM_ALLOC_PROFILING_BY_DEFAULT=y) && CONFIG_MEMCG_KMEM=y Performance overhead: To evaluate performance we implemented an in-kernel test executing multiple get_free_page/free_page and kmalloc/kfree calls with allocation sizes growing from 8 to 240 bytes with CPU frequency set to max and CPU affinity set to a specific CPU to minimize the noise. Below are results from running the test on Ubuntu 22.04.2 LTS with 6.8.0-rc1 kernel on 56 core Intel Xeon: kmalloc pgalloc (1 baseline) 6.764s 16.902s (2 default disabled) 6.793s (+0.43%) 17.007s (+0.62%) (3 default enabled) 7.197s (+6.40%) 23.666s (+40.02%) (4 runtime enabled) 7.405s (+9.48%) 23.901s (+41.41%) (5 memcg) 13.388s (+97.94%) 48.460s (+186.71%) (6 def disabled+memcg) 13.332s (+97.10%) 48.105s (+184.61%) (7 def enabled+memcg) 13.446s (+98.78%) 54.963s (+225.18%) Memory overhead: Kernel size: text data bss dec diff (1) 26515311 18890222 17018880 62424413 (2) 26524728 19423818 16740352 62688898 264485 (3) 26524724 19423818 16740352 62688894 264481 (4) 26524728 19423818 16740352 62688898 264485 (5) 26541782 18964374 16957440 62463596 39183 Memory consumption on a 56 core Intel CPU with 125GB of memory: Code tags: 192 kB PageExts: 262144 kB (256MB) SlabExts: 9876 kB (9.6MB) PcpuExts: 512 kB (0.5MB) Total overhead is 0.2% of total memory. Benchmarks: Hackbench tests run 100 times: hackbench -s 512 -l 200 -g 15 -f 25 -P baseline disabled profiling enabled profiling avg 0.3543 0.3559 (+0.0016) 0.3566 (+0.0023) stdev 0.0137 0.0188 0.0077 hackbench -l 10000 baseline disabled profiling enabled profiling avg 6.4218 6.4306 (+0.0088) 6.5077 (+0.0859) stdev 0.0933 0.0286 0.0489 stress-ng tests: stress-ng --class memory --seq 4 -t 60 stress-ng --class cpu --seq 4 -t 60 Results posted at: https://evilpiepirate.org/~kent/memalloc_prof_v4_stress-ng/ [2] https://lore.kernel.org/all/20240306182440.2003814-1-surenb@google.com/ This patch (of 37): The next patch drops vmalloc.h from a system header in order to fix a circular dependency; this adds it to all the files that were pulling it in implicitly. [kent.overstreet@linux.dev: fix arch/alpha/lib/memcpy.c] Link: https://lkml.kernel.org/r/20240327002152.3339937-1-kent.overstreet@linux.dev [surenb@google.com: fix arch/x86/mm/numa_32.c] Link: https://lkml.kernel.org/r/20240402180933.1663992-1-surenb@google.com [kent.overstreet@linux.dev: a few places were depending on sizes.h] Link: https://lkml.kernel.org/r/20240404034744.1664840-1-kent.overstreet@linux.dev [arnd@arndb.de: fix mm/kasan/hw_tags.c] Link: https://lkml.kernel.org/r/20240404124435.3121534-1-arnd@kernel.org [surenb@google.com: fix arc build] Link: https://lkml.kernel.org/r/20240405225115.431056-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-2-surenb@google.com Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Signed-off-by: Arnd Bergmann Reviewed-by: Pasha Tatashin Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- arch/alpha/lib/checksum.c | 1 + arch/alpha/lib/fpreg.c | 1 + arch/arc/include/asm/mmu-arcv2.h | 2 ++ arch/arm/kernel/irq.c | 1 + arch/arm/kernel/traps.c | 1 + arch/arm64/kernel/efi.c | 1 + arch/loongarch/include/asm/kfence.h | 1 + arch/powerpc/kernel/iommu.c | 1 + arch/powerpc/mm/mem.c | 1 + arch/riscv/kernel/elf_kexec.c | 1 + arch/riscv/kernel/probes/kprobes.c | 1 + arch/s390/kernel/cert_store.c | 1 + arch/s390/kernel/ipl.c | 1 + arch/x86/include/asm/io.h | 1 + arch/x86/kernel/cpu/sgx/main.c | 1 + arch/x86/kernel/irq_64.c | 1 + arch/x86/mm/fault.c | 1 + arch/x86/mm/numa_32.c | 1 + drivers/accel/ivpu/ivpu_mmu_context.c | 1 + drivers/gpu/drm/gma500/mmu.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_pages.c | 1 + drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c | 1 + drivers/gpu/drm/i915/gt/shmem_utils.c | 1 + drivers/gpu/drm/i915/gvt/firmware.c | 1 + drivers/gpu/drm/i915/gvt/gtt.c | 1 + drivers/gpu/drm/i915/gvt/handlers.c | 1 + drivers/gpu/drm/i915/gvt/mmio.c | 1 + drivers/gpu/drm/i915/gvt/vgpu.c | 1 + drivers/gpu/drm/i915/intel_gvt.c | 1 + drivers/gpu/drm/imagination/pvr_vm_mips.c | 1 + drivers/gpu/drm/mediatek/mtk_drm_gem.c | 1 + drivers/gpu/drm/omapdrm/omap_gem.c | 1 + drivers/gpu/drm/v3d/v3d_bo.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_binding.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 1 + drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c | 1 + drivers/gpu/drm/xen/xen_drm_front_gem.c | 1 + drivers/hwtracing/coresight/coresight-trbe.c | 1 + drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c | 1 + drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c | 1 + drivers/net/ethernet/microsoft/mana/hw_channel.c | 1 + drivers/platform/x86/uv_sysfs.c | 1 + drivers/scsi/mpi3mr/mpi3mr_transport.c | 2 ++ drivers/vfio/pci/pds/dirty.c | 1 + drivers/virt/acrn/mm.c | 1 + drivers/virtio/virtio_mem.c | 1 + include/asm-generic/io.h | 1 + include/linux/io.h | 1 + include/linux/pds/pds_common.h | 2 ++ include/rdma/rdmavt_qp.h | 1 + mm/debug_vm_pgtable.c | 1 + mm/kasan/hw_tags.c | 1 + sound/pci/hda/cs35l41_hda.c | 1 + 56 files changed, 59 insertions(+) (limited to 'include/linux') diff --git a/arch/alpha/lib/checksum.c b/arch/alpha/lib/checksum.c index 3f35c3ed6948..c29b98ef9c82 100644 --- a/arch/alpha/lib/checksum.c +++ b/arch/alpha/lib/checksum.c @@ -14,6 +14,7 @@ #include #include +#include static inline unsigned short from64to16(unsigned long x) { diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 7c08b225261c..3d32165043f8 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) diff --git a/arch/arc/include/asm/mmu-arcv2.h b/arch/arc/include/asm/mmu-arcv2.h index ed9036d4ede3..d85dc0721907 100644 --- a/arch/arc/include/asm/mmu-arcv2.h +++ b/arch/arc/include/asm/mmu-arcv2.h @@ -9,6 +9,8 @@ #ifndef _ASM_ARC_MMU_ARCV2_H #define _ASM_ARC_MMU_ARCV2_H +#include + /* * TLB Management regs */ diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index fe28fc1f759d..dab42d066d06 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 72c82a4d63ac..480e307501bb 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 9afcc690fe73..4a92096db34e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include diff --git a/arch/loongarch/include/asm/kfence.h b/arch/loongarch/include/asm/kfence.h index a6a5760da3a3..92636e82957c 100644 --- a/arch/loongarch/include/asm/kfence.h +++ b/arch/loongarch/include/asm/kfence.h @@ -10,6 +10,7 @@ #define _ASM_LOONGARCH_KFENCE_H #include +#include #include #include diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 1185efebf032..65468d0829e1 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 3a440004b97d..a197d4c2244b 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index 54260c16f991..11c0d2e0becf 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -19,6 +19,7 @@ #include #include #include +#include #include int arch_kimage_file_post_load_cleanup(struct kimage *image) diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 2f08c14a933d..71a8b8945b26 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/s390/kernel/cert_store.c b/arch/s390/kernel/cert_store.c index 554447768bdd..bf983513dd33 100644 --- a/arch/s390/kernel/cert_store.c +++ b/arch/s390/kernel/cert_store.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 1486350a4177..5d6d381aa0be 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 294cd2a40818..7452fc193b4f 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -42,6 +42,7 @@ #include #include #include +#include #define build_mmio_read(name, size, type, reg, barrier) \ static inline type name(const volatile void __iomem *addr) \ diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c index 166692f2d501..27892e57c4ef 100644 --- a/arch/x86/kernel/cpu/sgx/main.c +++ b/arch/x86/kernel/cpu/sgx/main.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "driver.h" #include "encl.h" diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index fe0c859873d1..ade0043ce56e 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 622d12ec7f08..a4cc20d0036d 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -20,6 +20,7 @@ #include /* efi_crash_gracefully_on_page_fault()*/ #include #include /* find_and_lock_vma() */ +#include #include /* boot_cpu_has, ... */ #include /* dotraplinkage, ... */ diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 025fd7ea5d69..65fda406e6f2 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -24,6 +24,7 @@ #include #include +#include #include #include "numa_internal.h" diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index fe6161299236..128aef8e5a19 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -6,6 +6,7 @@ #include #include #include +#include #include diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index a70b01ccdf70..4d78b33eaa82 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c @@ -5,6 +5,7 @@ **************************************************************************/ #include +#include #include "mmu.h" #include "psb_drv.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 0ba955611dfb..8780aa243105 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -5,6 +5,7 @@ */ #include +#include #include "gt/intel_gt.h" #include "gt/intel_tlb.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c index b2a5882b8f81..075657018739 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c @@ -4,6 +4,7 @@ * Copyright © 2016 Intel Corporation */ +#include #include "mock_dmabuf.h" static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment, diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index bccc3a1200bc..1fb6ff77fd89 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "i915_drv.h" #include "gem/i915_gem_object.h" diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 4dd52ac2043e..d800d267f0e9 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -30,6 +30,7 @@ #include #include +#include #include "i915_drv.h" #include "gvt.h" diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 094fca9b0e73..58cca4906f41 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -39,6 +39,7 @@ #include "trace.h" #include "gt/intel_gt_regs.h" +#include #if defined(VERBOSE_DEBUG) #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index efcb00472be2..ea9c30092767 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -52,6 +52,7 @@ #include "display/skl_watermark_regs.h" #include "display/vlv_dsi_pll_regs.h" #include "gt/intel_gt_regs.h" +#include /* XXX FIXME i915 has changed PP_XXX definition */ #define PCH_PP_STATUS _MMIO(0xc7200) diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 5b5def6ddef7..780762f28aa4 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -33,6 +33,7 @@ * */ +#include #include "i915_drv.h" #include "i915_reg.h" #include "gvt.h" diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 08ad1bd651f1..63c751ca4119 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -34,6 +34,7 @@ #include "i915_drv.h" #include "gvt.h" #include "i915_pvinfo.h" +#include void populate_pvinfo_page(struct intel_vgpu *vgpu) { diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index 9b6d87c8b583..5a01d60e5186 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c @@ -28,6 +28,7 @@ #include "gt/intel_context.h" #include "gt/intel_ring.h" #include "gt/shmem_utils.h" +#include /** * DOC: Intel GVT-g host support diff --git a/drivers/gpu/drm/imagination/pvr_vm_mips.c b/drivers/gpu/drm/imagination/pvr_vm_mips.c index b7fef3c797e6..6563dcde109c 100644 --- a/drivers/gpu/drm/imagination/pvr_vm_mips.c +++ b/drivers/gpu/drm/imagination/pvr_vm_mips.c @@ -14,6 +14,7 @@ #include #include #include +#include /** * pvr_vm_mips_init() - Initialise MIPS FW pagetable diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 4f2e3feabc0f..3e519869b632 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -4,6 +4,7 @@ */ #include +#include #include #include diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 3421e8389222..9ea0c64c26b5 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index a07ede668cc1..a165cbcdd27b 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -21,6 +21,7 @@ #include #include +#include #include "v3d_drv.h" #include "uapi/drm/v3d_drm.h" diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index ae2de914eb89..2731f6ded1c2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -54,6 +54,7 @@ #include "vmwgfx_drv.h" #include "vmwgfx_binding.h" #include "device_include/svga3d_reg.h" +#include #define VMW_BINDING_RT_BIT 0 #define VMW_BINDING_PS_BIT 1 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 195ff8792e5a..dd4ca6a9c690 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -31,6 +31,7 @@ #include #include +#include bool vmw_supports_3d(struct vmw_private *dev_priv) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c index 829df395c2ed..6e6beff9e262 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c @@ -25,6 +25,7 @@ * **************************************************************************/ +#include #include "vmwgfx_devcaps.h" #include "vmwgfx_drv.h" diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0a304706e013..f8504a2e049e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -53,6 +53,7 @@ #include #include #include +#include #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index cc3086e649eb..2e52d73eba48 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -35,6 +35,7 @@ #include #include +#include /* * Helper macro to get dx_ctx_node if available otherwise print an error diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index a1da5678c731..835d1eed8dd9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -31,6 +31,7 @@ #include #include +#include int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 3ad2b4cfd1f0..63112ed975c4 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 6136776482e6..96a32b213669 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -17,6 +17,7 @@ #include #include +#include #include "coresight-self-hosted-trace.h" #include "coresight-trbe.h" diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c index 2e2c3be8a0b4..e6eb98d70f3c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "octep_config.h" #include "octep_main.h" diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c index 2eab21e43048..445b626efe11 100644 --- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c +++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c @@ -7,6 +7,7 @@ #include #include #include +#include #include "octep_vf_config.h" #include "octep_vf_main.h" diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 2729a2c5acf9..11021c34e47e 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -3,6 +3,7 @@ #include #include +#include static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id) { diff --git a/drivers/platform/x86/uv_sysfs.c b/drivers/platform/x86/uv_sysfs.c index 38d1b692d3c0..40e010877189 100644 --- a/drivers/platform/x86/uv_sysfs.c +++ b/drivers/platform/x86/uv_sysfs.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index d32ad46318cb..dabb91f0f75d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -7,6 +7,8 @@ * */ +#include + #include "mpi3mr.h" /** diff --git a/drivers/vfio/pci/pds/dirty.c b/drivers/vfio/pci/pds/dirty.c index 68e8f006dfdb..c51f5e4c3dd6 100644 --- a/drivers/vfio/pci/pds/dirty.c +++ b/drivers/vfio/pci/pds/dirty.c @@ -3,6 +3,7 @@ #include #include +#include #include #include diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c index fa5d9ca6be57..c088ee1f1180 100644 --- a/drivers/virt/acrn/mm.c +++ b/drivers/virt/acrn/mm.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "acrn_drv.h" diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index 8e3223294442..e8355f55a8f7 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -21,6 +21,7 @@ #include #include #include +#include #include diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index bac63e874c7b..d4e1f042c9eb 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -9,6 +9,7 @@ #include /* I/O is all done through memory accesses */ #include /* for memset() and memcpy() */ +#include #include #include diff --git a/include/linux/io.h b/include/linux/io.h index 235ba7d80a8f..e8ee40ee1843 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -6,6 +6,7 @@ #ifndef _LINUX_IO_H #define _LINUX_IO_H +#include #include #include #include diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h index 30581e2e04cc..5802e1deef24 100644 --- a/include/linux/pds/pds_common.h +++ b/include/linux/pds/pds_common.h @@ -4,6 +4,8 @@ #ifndef _PDS_COMMON_H_ #define _PDS_COMMON_H_ +#include + #define PDS_CORE_DRV_NAME "pds_core" /* the device's internal addressing uses up to 52 bits */ diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h index 2e58d5e6ac0e..d67892944193 100644 --- a/include/rdma/rdmavt_qp.h +++ b/include/rdma/rdmavt_qp.h @@ -11,6 +11,7 @@ #include #include #include +#include /* * Atomic bit definitions for r_aflags. */ diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index 65c19025da3d..f1c9a2c5abc0 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index 2b994092a2d4..9958ebc15d38 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -16,6 +16,7 @@ #include #include #include +#include #include "kasan.h" diff --git a/sound/pci/hda/cs35l41_hda.c b/sound/pci/hda/cs35l41_hda.c index d3fa6e136744..990b5bd717a1 100644 --- a/sound/pci/hda/cs35l41_hda.c +++ b/sound/pci/hda/cs35l41_hda.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "hda_local.h" #include "hda_auto_parser.h" #include "hda_jack.h" -- cgit From a5674119f0faa46f6ebdfcfa92342535b7f54e67 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 21 Mar 2024 09:36:27 -0700 Subject: fs: convert alloc_inode_sb() to a macro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We're introducing alloc tagging, which tracks memory allocations by callsite. Converting alloc_inode_sb() to a macro means allocations will be tracked by its caller, which is a bit more useful. Link: https://lkml.kernel.org/r/20240321163705.3067592-6-surenb@google.com Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Cc: Alexander Viro Reviewed-by: Kees Cook Reviewed-by: Pasha Tatashin Tested-by: Kees Cook Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/fs.h | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fs.h b/include/linux/fs.h index 8dfd53b52744..4f2e2e5b6ab1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3085,11 +3085,7 @@ int setattr_should_drop_sgid(struct mnt_idmap *idmap, * This must be used for allocating filesystems specific inodes to set * up the inode reclaim context correctly. */ -static inline void * -alloc_inode_sb(struct super_block *sb, struct kmem_cache *cache, gfp_t gfp) -{ - return kmem_cache_alloc_lru(cache, &sb->s_inode_lru, gfp); -} +#define alloc_inode_sb(_sb, _cache, _gfp) kmem_cache_alloc_lru(_cache, &_sb->s_inode_lru, _gfp) extern void __insert_inode_hash(struct inode *, unsigned long hashval); static inline void insert_inode_hash(struct inode *inode) -- cgit From 21c690a349baab895dc68ab70d291e1598d7109d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:28 -0700 Subject: mm: introduce slabobj_ext to support slab object extensions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently slab pages can store only vectors of obj_cgroup pointers in page->memcg_data. Introduce slabobj_ext structure to allow more data to be stored for each slab object. Wrap obj_cgroup into slabobj_ext to support current functionality while allowing to extend slabobj_ext in the future. Link: https://lkml.kernel.org/r/20240321163705.3067592-7-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Pasha Tatashin Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 20 ++++++--- include/linux/mm_types.h | 4 +- init/Kconfig | 4 ++ mm/kfence/core.c | 14 +++---- mm/kfence/kfence.h | 4 +- mm/memcontrol.c | 56 ++++--------------------- mm/page_owner.c | 2 +- mm/slab.h | 52 ++++++++++++----------- mm/slub.c | 101 ++++++++++++++++++++++++++++++++++++--------- 9 files changed, 145 insertions(+), 112 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b6264796815d..99f423742324 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -349,8 +349,8 @@ struct mem_cgroup { extern struct mem_cgroup *root_mem_cgroup; enum page_memcg_data_flags { - /* page->memcg_data is a pointer to an objcgs vector */ - MEMCG_DATA_OBJCGS = (1UL << 0), + /* page->memcg_data is a pointer to an slabobj_ext vector */ + MEMCG_DATA_OBJEXTS = (1UL << 0), /* page has been accounted as a non-slab kernel page */ MEMCG_DATA_KMEM = (1UL << 1), /* the next bit after the last actual flag */ @@ -388,7 +388,7 @@ static inline struct mem_cgroup *__folio_memcg(struct folio *folio) unsigned long memcg_data = folio->memcg_data; VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); + VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); @@ -409,7 +409,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) unsigned long memcg_data = folio->memcg_data; VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJCGS, folio); + VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); @@ -506,7 +506,7 @@ static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) */ unsigned long memcg_data = READ_ONCE(folio->memcg_data); - if (memcg_data & MEMCG_DATA_OBJCGS) + if (memcg_data & MEMCG_DATA_OBJEXTS) return NULL; if (memcg_data & MEMCG_DATA_KMEM) { @@ -552,7 +552,7 @@ retry: static inline bool folio_memcg_kmem(struct folio *folio) { VM_BUG_ON_PGFLAGS(PageTail(&folio->page), &folio->page); - VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJCGS, folio); + VM_BUG_ON_FOLIO(folio->memcg_data & MEMCG_DATA_OBJEXTS, folio); return folio->memcg_data & MEMCG_DATA_KMEM; } @@ -1633,6 +1633,14 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, } #endif /* CONFIG_MEMCG */ +/* + * Extended information for slab objects stored as an array in page->memcg_data + * if MEMCG_DATA_OBJEXTS is set. + */ +struct slabobj_ext { + struct obj_cgroup *objcg; +} __aligned(8); + static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) { __mod_lruvec_kmem_state(p, idx, 1); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5240bd7bca33..4ae4684d1add 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -169,7 +169,7 @@ struct page { /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */ atomic_t _refcount; -#ifdef CONFIG_MEMCG +#ifdef CONFIG_SLAB_OBJ_EXT unsigned long memcg_data; #endif @@ -331,7 +331,7 @@ struct folio { }; atomic_t _mapcount; atomic_t _refcount; -#ifdef CONFIG_MEMCG +#ifdef CONFIG_SLAB_OBJ_EXT unsigned long memcg_data; #endif #if defined(WANT_PAGE_VIRTUAL) diff --git a/init/Kconfig b/init/Kconfig index aa02aec6aa7d..10d4a638d9ae 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -929,6 +929,9 @@ config NUMA_BALANCING_DEFAULT_ENABLED If set, automatic NUMA balancing will be enabled if running on a NUMA machine. +config SLAB_OBJ_EXT + bool + menuconfig CGROUPS bool "Control Group support" select KERNFS @@ -962,6 +965,7 @@ config MEMCG bool "Memory controller" select PAGE_COUNTER select EVENTFD + select SLAB_OBJ_EXT help Provides control over the memory footprint of tasks in a cgroup. diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 8350f5c06f2e..964b8482275b 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -595,9 +595,9 @@ static unsigned long kfence_init_pool(void) continue; __folio_set_slab(slab_folio(slab)); -#ifdef CONFIG_MEMCG - slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg | - MEMCG_DATA_OBJCGS; +#ifdef CONFIG_MEMCG_KMEM + slab->obj_exts = (unsigned long)&kfence_metadata_init[i / 2 - 1].obj_exts | + MEMCG_DATA_OBJEXTS; #endif } @@ -645,8 +645,8 @@ reset_slab: if (!i || (i % 2)) continue; -#ifdef CONFIG_MEMCG - slab->memcg_data = 0; +#ifdef CONFIG_MEMCG_KMEM + slab->obj_exts = 0; #endif __folio_clear_slab(slab_folio(slab)); } @@ -1139,8 +1139,8 @@ void __kfence_free(void *addr) { struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); -#ifdef CONFIG_MEMCG - KFENCE_WARN_ON(meta->objcg); +#ifdef CONFIG_MEMCG_KMEM + KFENCE_WARN_ON(meta->obj_exts.objcg); #endif /* * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index f46fbb03062b..084f5f36e8e7 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -97,8 +97,8 @@ struct kfence_metadata { struct kfence_track free_track; /* For updating alloc_covered on frees. */ u32 alloc_stack_hash; -#ifdef CONFIG_MEMCG - struct obj_cgroup *objcg; +#ifdef CONFIG_MEMCG_KMEM + struct slabobj_ext obj_exts; #endif }; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 786c0fae6e30..896b4bf05b9c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2977,13 +2977,6 @@ void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) } #ifdef CONFIG_MEMCG_KMEM -/* - * The allocated objcg pointers array is not accounted directly. - * Moreover, it should not come from DMA buffer and is not readily - * reclaimable. So those GFP bits should be masked off. - */ -#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ - __GFP_ACCOUNT | __GFP_NOFAIL) /* * mod_objcg_mlstate() may be called with irq enabled, so @@ -3003,62 +2996,27 @@ static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, rcu_read_unlock(); } -int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, - gfp_t gfp, bool new_slab) -{ - unsigned int objects = objs_per_slab(s, slab); - unsigned long memcg_data; - void *vec; - - gfp &= ~OBJCGS_CLEAR_MASK; - vec = kcalloc_node(objects, sizeof(struct obj_cgroup *), gfp, - slab_nid(slab)); - if (!vec) - return -ENOMEM; - - memcg_data = (unsigned long) vec | MEMCG_DATA_OBJCGS; - if (new_slab) { - /* - * If the slab is brand new and nobody can yet access its - * memcg_data, no synchronization is required and memcg_data can - * be simply assigned. - */ - slab->memcg_data = memcg_data; - } else if (cmpxchg(&slab->memcg_data, 0, memcg_data)) { - /* - * If the slab is already in use, somebody can allocate and - * assign obj_cgroups in parallel. In this case the existing - * objcg vector should be reused. - */ - kfree(vec); - return 0; - } - - kmemleak_not_leak(vec); - return 0; -} - static __always_inline struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) { /* * Slab objects are accounted individually, not per-page. * Memcg membership data for each individual object is saved in - * slab->memcg_data. + * slab->obj_exts. */ if (folio_test_slab(folio)) { - struct obj_cgroup **objcgs; + struct slabobj_ext *obj_exts; struct slab *slab; unsigned int off; slab = folio_slab(folio); - objcgs = slab_objcgs(slab); - if (!objcgs) + obj_exts = slab_obj_exts(slab); + if (!obj_exts) return NULL; off = obj_to_index(slab->slab_cache, slab, p); - if (objcgs[off]) - return obj_cgroup_memcg(objcgs[off]); + if (obj_exts[off].objcg) + return obj_cgroup_memcg(obj_exts[off].objcg); return NULL; } @@ -3066,7 +3024,7 @@ struct mem_cgroup *mem_cgroup_from_obj_folio(struct folio *folio, void *p) /* * folio_memcg_check() is used here, because in theory we can encounter * a folio where the slab flag has been cleared already, but - * slab->memcg_data has not been freed yet + * slab->obj_exts has not been freed yet * folio_memcg_check() will guarantee that a proper memory * cgroup pointer or NULL will be returned. */ diff --git a/mm/page_owner.c b/mm/page_owner.c index 742f432e5bf0..75c23302868a 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -515,7 +515,7 @@ static inline int print_page_owner_memcg(char *kbuf, size_t count, int ret, if (!memcg_data) goto out_unlock; - if (memcg_data & MEMCG_DATA_OBJCGS) + if (memcg_data & MEMCG_DATA_OBJEXTS) ret += scnprintf(kbuf + ret, count - ret, "Slab cache page\n"); diff --git a/mm/slab.h b/mm/slab.h index d2bc9b191222..1c16dc8344fa 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -87,8 +87,8 @@ struct slab { unsigned int __unused; atomic_t __page_refcount; -#ifdef CONFIG_MEMCG - unsigned long memcg_data; +#ifdef CONFIG_SLAB_OBJ_EXT + unsigned long obj_exts; #endif }; @@ -97,8 +97,8 @@ struct slab { SLAB_MATCH(flags, __page_flags); SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ SLAB_MATCH(_refcount, __page_refcount); -#ifdef CONFIG_MEMCG -SLAB_MATCH(memcg_data, memcg_data); +#ifdef CONFIG_SLAB_OBJ_EXT +SLAB_MATCH(memcg_data, obj_exts); #endif #undef SLAB_MATCH static_assert(sizeof(struct slab) <= sizeof(struct page)); @@ -536,42 +536,44 @@ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t fla return false; } -#ifdef CONFIG_MEMCG_KMEM +#ifdef CONFIG_SLAB_OBJ_EXT + /* - * slab_objcgs - get the object cgroups vector associated with a slab + * slab_obj_exts - get the pointer to the slab object extension vector + * associated with a slab. * @slab: a pointer to the slab struct * - * Returns a pointer to the object cgroups vector associated with the slab, + * Returns a pointer to the object extension vector associated with the slab, * or NULL if no such vector has been associated yet. */ -static inline struct obj_cgroup **slab_objcgs(struct slab *slab) +static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) { - unsigned long memcg_data = READ_ONCE(slab->memcg_data); + unsigned long obj_exts = READ_ONCE(slab->obj_exts); - VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), +#ifdef CONFIG_MEMCG + VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS), slab_page(slab)); - VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); + VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); - return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct slabobj_ext *)(obj_exts & ~MEMCG_DATA_FLAGS_MASK); +#else + return (struct slabobj_ext *)obj_exts; +#endif } -int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, - gfp_t gfp, bool new_slab); -void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, - enum node_stat_item idx, int nr); -#else /* CONFIG_MEMCG_KMEM */ -static inline struct obj_cgroup **slab_objcgs(struct slab *slab) +#else /* CONFIG_SLAB_OBJ_EXT */ + +static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) { return NULL; } -static inline int memcg_alloc_slab_cgroups(struct slab *slab, - struct kmem_cache *s, gfp_t gfp, - bool new_slab) -{ - return 0; -} -#endif /* CONFIG_MEMCG_KMEM */ +#endif /* CONFIG_SLAB_OBJ_EXT */ + +#ifdef CONFIG_MEMCG_KMEM +void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, + enum node_stat_item idx, int nr); +#endif size_t __ksize(const void *objp); diff --git a/mm/slub.c b/mm/slub.c index bc9f40889834..5c896c76812d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1871,13 +1871,78 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; } -#ifdef CONFIG_MEMCG_KMEM -static inline void memcg_free_slab_cgroups(struct slab *slab) +#ifdef CONFIG_SLAB_OBJ_EXT + +/* + * The allocated objcg pointers array is not accounted directly. + * Moreover, it should not come from DMA buffer and is not readily + * reclaimable. So those GFP bits should be masked off. + */ +#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \ + __GFP_ACCOUNT | __GFP_NOFAIL) + +static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, + gfp_t gfp, bool new_slab) { - kfree(slab_objcgs(slab)); - slab->memcg_data = 0; + unsigned int objects = objs_per_slab(s, slab); + unsigned long obj_exts; + void *vec; + + gfp &= ~OBJCGS_CLEAR_MASK; + vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, + slab_nid(slab)); + if (!vec) + return -ENOMEM; + + obj_exts = (unsigned long)vec; +#ifdef CONFIG_MEMCG + obj_exts |= MEMCG_DATA_OBJEXTS; +#endif + if (new_slab) { + /* + * If the slab is brand new and nobody can yet access its + * obj_exts, no synchronization is required and obj_exts can + * be simply assigned. + */ + slab->obj_exts = obj_exts; + } else if (cmpxchg(&slab->obj_exts, 0, obj_exts)) { + /* + * If the slab is already in use, somebody can allocate and + * assign slabobj_exts in parallel. In this case the existing + * objcg vector should be reused. + */ + kfree(vec); + return 0; + } + + kmemleak_not_leak(vec); + return 0; } +static inline void free_slab_obj_exts(struct slab *slab) +{ + struct slabobj_ext *obj_exts; + + obj_exts = slab_obj_exts(slab); + if (!obj_exts) + return; + + kfree(obj_exts); + slab->obj_exts = 0; +} +#else /* CONFIG_SLAB_OBJ_EXT */ +static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, + gfp_t gfp, bool new_slab) +{ + return 0; +} + +static inline void free_slab_obj_exts(struct slab *slab) +{ +} +#endif /* CONFIG_SLAB_OBJ_EXT */ + +#ifdef CONFIG_MEMCG_KMEM static inline size_t obj_full_size(struct kmem_cache *s) { /* @@ -1956,15 +2021,15 @@ static void __memcg_slab_post_alloc_hook(struct kmem_cache *s, if (likely(p[i])) { slab = virt_to_slab(p[i]); - if (!slab_objcgs(slab) && - memcg_alloc_slab_cgroups(slab, s, flags, false)) { + if (!slab_obj_exts(slab) && + alloc_slab_obj_exts(slab, s, flags, false)) { obj_cgroup_uncharge(objcg, obj_full_size(s)); continue; } off = obj_to_index(s, slab, p[i]); obj_cgroup_get(objcg); - slab_objcgs(slab)[off] = objcg; + slab_obj_exts(slab)[off].objcg = objcg; mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), obj_full_size(s)); } else { @@ -1985,18 +2050,18 @@ void memcg_slab_post_alloc_hook(struct kmem_cache *s, struct obj_cgroup *objcg, static void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects, - struct obj_cgroup **objcgs) + struct slabobj_ext *obj_exts) { for (int i = 0; i < objects; i++) { struct obj_cgroup *objcg; unsigned int off; off = obj_to_index(s, slab, p[i]); - objcg = objcgs[off]; + objcg = obj_exts[off].objcg; if (!objcg) continue; - objcgs[off] = NULL; + obj_exts[off].objcg = NULL; obj_cgroup_uncharge(objcg, obj_full_size(s)); mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), -obj_full_size(s)); @@ -2008,16 +2073,16 @@ static __fastpath_inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p, int objects) { - struct obj_cgroup **objcgs; + struct slabobj_ext *obj_exts; if (!memcg_kmem_online()) return; - objcgs = slab_objcgs(slab); - if (likely(!objcgs)) + obj_exts = slab_obj_exts(slab); + if (likely(!obj_exts)) return; - __memcg_slab_free_hook(s, slab, p, objects, objcgs); + __memcg_slab_free_hook(s, slab, p, objects, obj_exts); } static inline @@ -2028,10 +2093,6 @@ void memcg_slab_alloc_error_hook(struct kmem_cache *s, int objects, obj_cgroup_uncharge(objcg, objects * obj_full_size(s)); } #else /* CONFIG_MEMCG_KMEM */ -static inline void memcg_free_slab_cgroups(struct slab *slab) -{ -} - static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, struct list_lru *lru, struct obj_cgroup **objcgp, @@ -2298,7 +2359,7 @@ static __always_inline void account_slab(struct slab *slab, int order, struct kmem_cache *s, gfp_t gfp) { if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) - memcg_alloc_slab_cgroups(slab, s, gfp, true); + alloc_slab_obj_exts(slab, s, gfp, true); mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), PAGE_SIZE << order); @@ -2308,7 +2369,7 @@ static __always_inline void unaccount_slab(struct slab *slab, int order, struct kmem_cache *s) { if (memcg_kmem_online()) - memcg_free_slab_cgroups(slab); + free_slab_obj_exts(slab); mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), -(PAGE_SIZE << order)); -- cgit From 768c33be1b3166791bd554abd105154719eba43d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:29 -0700 Subject: mm: introduce __GFP_NO_OBJ_EXT flag to selectively prevent slabobj_ext creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce __GFP_NO_OBJ_EXT flag in order to prevent recursive allocations when allocating slabobj_ext on a slab. Link: https://lkml.kernel.org/r/20240321163705.3067592-8-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Kees Cook Reviewed-by: Pasha Tatashin Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/gfp_types.h | 11 +++++++++++ mm/slub.c | 2 ++ 2 files changed, 13 insertions(+) (limited to 'include/linux') diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 13becafe41df..313be4ad79fd 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -54,6 +54,9 @@ enum { #endif #ifdef CONFIG_LOCKDEP ___GFP_NOLOCKDEP_BIT, +#endif +#ifdef CONFIG_SLAB_OBJ_EXT + ___GFP_NO_OBJ_EXT_BIT, #endif ___GFP_LAST_BIT }; @@ -95,6 +98,11 @@ enum { #else #define ___GFP_NOLOCKDEP 0 #endif +#ifdef CONFIG_SLAB_OBJ_EXT +#define ___GFP_NO_OBJ_EXT BIT(___GFP_NO_OBJ_EXT_BIT) +#else +#define ___GFP_NO_OBJ_EXT 0 +#endif /* * Physical address zone modifiers (see linux/mmzone.h - low four bits) @@ -135,12 +143,15 @@ enum { * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * + * %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) +#define __GFP_NO_OBJ_EXT ((__force gfp_t)___GFP_NO_OBJ_EXT) /** * DOC: Watermark modifiers diff --git a/mm/slub.c b/mm/slub.c index 5c896c76812d..2cb53642a091 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1889,6 +1889,8 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, void *vec; gfp &= ~OBJCGS_CLEAR_MASK; + /* Prevent recursive extension vector allocation */ + gfp |= __GFP_NO_OBJ_EXT; vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, slab_nid(slab)); if (!vec) -- cgit From 45012241ec5d5870d986e66c8ce20a0f6212e6f8 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:30 -0700 Subject: mm/slab: introduce SLAB_NO_OBJ_EXT to avoid obj_ext creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Slab extension objects can't be allocated before slab infrastructure is initialized. Some caches, like kmem_cache and kmem_cache_node, are created before slab infrastructure is initialized. Objects from these caches can't have extension objects. Introduce SLAB_NO_OBJ_EXT slab flag to mark these caches and avoid creating extensions for objects allocated from these slabs. Link: https://lkml.kernel.org/r/20240321163705.3067592-9-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Kees Cook Reviewed-by: Pasha Tatashin Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/slab.h | 10 ++++++++++ mm/slub.c | 5 +++-- 2 files changed, 13 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index e53cbfa18325..68ff754b85a4 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -56,6 +56,9 @@ enum _slab_flag_bits { #endif _SLAB_OBJECT_POISON, _SLAB_CMPXCHG_DOUBLE, +#ifdef CONFIG_SLAB_OBJ_EXT + _SLAB_NO_OBJ_EXT, +#endif _SLAB_FLAGS_LAST_BIT }; @@ -202,6 +205,13 @@ enum _slab_flag_bits { #endif #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ +/* Slab created using create_boot_cache */ +#ifdef CONFIG_SLAB_OBJ_EXT +#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) +#else +#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED +#endif + /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * diff --git a/mm/slub.c b/mm/slub.c index 2cb53642a091..666dcc3b8a26 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5693,7 +5693,8 @@ void __init kmem_cache_init(void) node_set(node, slab_nodes); create_boot_cache(kmem_cache_node, "kmem_cache_node", - sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); + sizeof(struct kmem_cache_node), + SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); @@ -5703,7 +5704,7 @@ void __init kmem_cache_init(void) create_boot_cache(kmem_cache, "kmem_cache", offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *), - SLAB_HWCACHE_ALIGN, 0, 0); + SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0); kmem_cache = bootstrap(&boot_kmem_cache); kmem_cache_node = bootstrap(&boot_kmem_cache_node); -- cgit From 53ce720359b8f4b531c5b8bd7fa06d74bca45f0a Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:31 -0700 Subject: slab: objext: introduce objext_flags as extension to page_memcg_data_flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce objext_flags to store additional objext flags unrelated to memcg. Link: https://lkml.kernel.org/r/20240321163705.3067592-10-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Kees Cook Reviewed-by: Pasha Tatashin Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 29 ++++++++++++++++++++++------- mm/slab.h | 5 +---- 2 files changed, 23 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 99f423742324..12afc2647cf0 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -357,7 +357,22 @@ enum page_memcg_data_flags { __NR_MEMCG_DATA_FLAGS = (1UL << 2), }; -#define MEMCG_DATA_FLAGS_MASK (__NR_MEMCG_DATA_FLAGS - 1) +#define __FIRST_OBJEXT_FLAG __NR_MEMCG_DATA_FLAGS + +#else /* CONFIG_MEMCG */ + +#define __FIRST_OBJEXT_FLAG (1UL << 0) + +#endif /* CONFIG_MEMCG */ + +enum objext_flags { + /* the next bit after the last actual flag */ + __NR_OBJEXTS_FLAGS = __FIRST_OBJEXT_FLAG, +}; + +#define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) + +#ifdef CONFIG_MEMCG static inline bool folio_memcg_kmem(struct folio *folio); @@ -391,7 +406,7 @@ static inline struct mem_cgroup *__folio_memcg(struct folio *folio) VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* @@ -412,7 +427,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); VM_BUG_ON_FOLIO(!(memcg_data & MEMCG_DATA_KMEM), folio); - return (struct obj_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct obj_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* @@ -469,11 +484,11 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) if (memcg_data & MEMCG_DATA_KMEM) { struct obj_cgroup *objcg; - objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); return obj_cgroup_memcg(objcg); } - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } /* @@ -512,11 +527,11 @@ static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) if (memcg_data & MEMCG_DATA_KMEM) { struct obj_cgroup *objcg; - objcg = (void *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); return obj_cgroup_memcg(objcg); } - return (struct mem_cgroup *)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); + return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } static inline struct mem_cgroup *page_memcg_check(struct page *page) diff --git a/mm/slab.h b/mm/slab.h index 1c16dc8344fa..65db525e93af 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -554,11 +554,8 @@ static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS), slab_page(slab)); VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); - - return (struct slabobj_ext *)(obj_exts & ~MEMCG_DATA_FLAGS_MASK); -#else - return (struct slabobj_ext *)obj_exts; #endif + return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK); } #else /* CONFIG_SLAB_OBJ_EXT */ -- cgit From 916cc5167cc6f81651f4460d9a35c939ad18ecff Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:32 -0700 Subject: lib: code tagging framework MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add basic infrastructure to support code tagging which stores tag common information consisting of the module name, function, file name and line number. Provide functions to register a new code tag type and navigate between code tags. Link: https://lkml.kernel.org/r/20240321163705.3067592-11-surenb@google.com Co-developed-by: Kent Overstreet Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/codetag.h | 68 +++++++++++++++ lib/Kconfig.debug | 4 + lib/Makefile | 1 + lib/codetag.c | 219 ++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 292 insertions(+) create mode 100644 include/linux/codetag.h create mode 100644 lib/codetag.c (limited to 'include/linux') diff --git a/include/linux/codetag.h b/include/linux/codetag.h new file mode 100644 index 000000000000..7734269cdb63 --- /dev/null +++ b/include/linux/codetag.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * code tagging framework + */ +#ifndef _LINUX_CODETAG_H +#define _LINUX_CODETAG_H + +#include + +struct codetag_iterator; +struct codetag_type; +struct codetag_module; +struct seq_buf; +struct module; + +/* + * An instance of this structure is created in a special ELF section at every + * code location being tagged. At runtime, the special section is treated as + * an array of these. + */ +struct codetag { + unsigned int flags; /* used in later patches */ + unsigned int lineno; + const char *modname; + const char *function; + const char *filename; +} __aligned(8); + +union codetag_ref { + struct codetag *ct; +}; + +struct codetag_type_desc { + const char *section; + size_t tag_size; +}; + +struct codetag_iterator { + struct codetag_type *cttype; + struct codetag_module *cmod; + unsigned long mod_id; + struct codetag *ct; +}; + +#ifdef MODULE +#define CT_MODULE_NAME KBUILD_MODNAME +#else +#define CT_MODULE_NAME NULL +#endif + +#define CODE_TAG_INIT { \ + .modname = CT_MODULE_NAME, \ + .function = __func__, \ + .filename = __FILE__, \ + .lineno = __LINE__, \ + .flags = 0, \ +} + +void codetag_lock_module_list(struct codetag_type *cttype, bool lock); +struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype); +struct codetag *codetag_next_ct(struct codetag_iterator *iter); + +void codetag_to_text(struct seq_buf *out, struct codetag *ct); + +struct codetag_type * +codetag_register_type(const struct codetag_type_desc *desc); + +#endif /* _LINUX_CODETAG_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index c63a5fbf1f1c..015fc6ee9849 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -968,6 +968,10 @@ config DEBUG_STACKOVERFLOW If in doubt, say "N". +config CODE_TAGGING + bool + select KALLSYMS + source "lib/Kconfig.kasan" source "lib/Kconfig.kfence" source "lib/Kconfig.kmsan" diff --git a/lib/Makefile b/lib/Makefile index ffc6b2341b45..910335da8f13 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -233,6 +233,7 @@ obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ of-reconfig-notifier-error-inject.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o +obj-$(CONFIG_CODE_TAGGING) += codetag.o lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o diff --git a/lib/codetag.c b/lib/codetag.c new file mode 100644 index 000000000000..8b5b89ad508d --- /dev/null +++ b/lib/codetag.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include + +struct codetag_type { + struct list_head link; + unsigned int count; + struct idr mod_idr; + struct rw_semaphore mod_lock; /* protects mod_idr */ + struct codetag_type_desc desc; +}; + +struct codetag_range { + struct codetag *start; + struct codetag *stop; +}; + +struct codetag_module { + struct module *mod; + struct codetag_range range; +}; + +static DEFINE_MUTEX(codetag_lock); +static LIST_HEAD(codetag_types); + +void codetag_lock_module_list(struct codetag_type *cttype, bool lock) +{ + if (lock) + down_read(&cttype->mod_lock); + else + up_read(&cttype->mod_lock); +} + +struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype) +{ + struct codetag_iterator iter = { + .cttype = cttype, + .cmod = NULL, + .mod_id = 0, + .ct = NULL, + }; + + return iter; +} + +static inline struct codetag *get_first_module_ct(struct codetag_module *cmod) +{ + return cmod->range.start < cmod->range.stop ? cmod->range.start : NULL; +} + +static inline +struct codetag *get_next_module_ct(struct codetag_iterator *iter) +{ + struct codetag *res = (struct codetag *) + ((char *)iter->ct + iter->cttype->desc.tag_size); + + return res < iter->cmod->range.stop ? res : NULL; +} + +struct codetag *codetag_next_ct(struct codetag_iterator *iter) +{ + struct codetag_type *cttype = iter->cttype; + struct codetag_module *cmod; + struct codetag *ct; + + lockdep_assert_held(&cttype->mod_lock); + + if (unlikely(idr_is_empty(&cttype->mod_idr))) + return NULL; + + ct = NULL; + while (true) { + cmod = idr_find(&cttype->mod_idr, iter->mod_id); + + /* If module was removed move to the next one */ + if (!cmod) + cmod = idr_get_next_ul(&cttype->mod_idr, + &iter->mod_id); + + /* Exit if no more modules */ + if (!cmod) + break; + + if (cmod != iter->cmod) { + iter->cmod = cmod; + ct = get_first_module_ct(cmod); + } else + ct = get_next_module_ct(iter); + + if (ct) + break; + + iter->mod_id++; + } + + iter->ct = ct; + return ct; +} + +void codetag_to_text(struct seq_buf *out, struct codetag *ct) +{ + if (ct->modname) + seq_buf_printf(out, "%s:%u [%s] func:%s", + ct->filename, ct->lineno, + ct->modname, ct->function); + else + seq_buf_printf(out, "%s:%u func:%s", + ct->filename, ct->lineno, ct->function); +} + +static inline size_t range_size(const struct codetag_type *cttype, + const struct codetag_range *range) +{ + return ((char *)range->stop - (char *)range->start) / + cttype->desc.tag_size; +} + +#ifdef CONFIG_MODULES +static void *get_symbol(struct module *mod, const char *prefix, const char *name) +{ + DECLARE_SEQ_BUF(sb, KSYM_NAME_LEN); + const char *buf; + + seq_buf_printf(&sb, "%s%s", prefix, name); + if (seq_buf_has_overflowed(&sb)) + return NULL; + + buf = seq_buf_str(&sb); + return mod ? + (void *)find_kallsyms_symbol_value(mod, buf) : + (void *)kallsyms_lookup_name(buf); +} + +static struct codetag_range get_section_range(struct module *mod, + const char *section) +{ + return (struct codetag_range) { + get_symbol(mod, "__start_", section), + get_symbol(mod, "__stop_", section), + }; +} + +static int codetag_module_init(struct codetag_type *cttype, struct module *mod) +{ + struct codetag_range range; + struct codetag_module *cmod; + int err; + + range = get_section_range(mod, cttype->desc.section); + if (!range.start || !range.stop) { + pr_warn("Failed to load code tags of type %s from the module %s\n", + cttype->desc.section, + mod ? mod->name : "(built-in)"); + return -EINVAL; + } + + /* Ignore empty ranges */ + if (range.start == range.stop) + return 0; + + BUG_ON(range.start > range.stop); + + cmod = kmalloc(sizeof(*cmod), GFP_KERNEL); + if (unlikely(!cmod)) + return -ENOMEM; + + cmod->mod = mod; + cmod->range = range; + + down_write(&cttype->mod_lock); + err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL); + if (err >= 0) + cttype->count += range_size(cttype, &range); + up_write(&cttype->mod_lock); + + if (err < 0) { + kfree(cmod); + return err; + } + + return 0; +} + +#else /* CONFIG_MODULES */ +static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { return 0; } +#endif /* CONFIG_MODULES */ + +struct codetag_type * +codetag_register_type(const struct codetag_type_desc *desc) +{ + struct codetag_type *cttype; + int err; + + BUG_ON(desc->tag_size <= 0); + + cttype = kzalloc(sizeof(*cttype), GFP_KERNEL); + if (unlikely(!cttype)) + return ERR_PTR(-ENOMEM); + + cttype->desc = *desc; + idr_init(&cttype->mod_idr); + init_rwsem(&cttype->mod_lock); + + err = codetag_module_init(cttype, NULL); + if (unlikely(err)) { + kfree(cttype); + return ERR_PTR(err); + } + + mutex_lock(&codetag_lock); + list_add_tail(&cttype->link, &codetag_types); + mutex_unlock(&codetag_lock); + + return cttype; +} -- cgit From a473573964e51dcb6efc182f773cd3924be4a184 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:33 -0700 Subject: lib: code tagging module support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for code tagging from dynamically loaded modules. Link: https://lkml.kernel.org/r/20240321163705.3067592-12-surenb@google.com Signed-off-by: Suren Baghdasaryan Co-developed-by: Kent Overstreet Signed-off-by: Kent Overstreet Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/codetag.h | 12 ++++++++++ kernel/module/main.c | 4 ++++ lib/codetag.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 72 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/codetag.h b/include/linux/codetag.h index 7734269cdb63..c44f5b83f24d 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -33,6 +33,10 @@ union codetag_ref { struct codetag_type_desc { const char *section; size_t tag_size; + void (*module_load)(struct codetag_type *cttype, + struct codetag_module *cmod); + void (*module_unload)(struct codetag_type *cttype, + struct codetag_module *cmod); }; struct codetag_iterator { @@ -65,4 +69,12 @@ void codetag_to_text(struct seq_buf *out, struct codetag *ct); struct codetag_type * codetag_register_type(const struct codetag_type_desc *desc); +#if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) +void codetag_load_module(struct module *mod); +void codetag_unload_module(struct module *mod); +#else +static inline void codetag_load_module(struct module *mod) {} +static inline void codetag_unload_module(struct module *mod) {} +#endif + #endif /* _LINUX_CODETAG_H */ diff --git a/kernel/module/main.c b/kernel/module/main.c index e1e8a7a9d6c1..ffa6b3e9cb43 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include "internal.h" @@ -1242,6 +1243,7 @@ static void free_module(struct module *mod) { trace_module_free(mod); + codetag_unload_module(mod); mod_sysfs_teardown(mod); /* @@ -2995,6 +2997,8 @@ static int load_module(struct load_info *info, const char __user *uargs, /* Get rid of temporary copy. */ free_copy(info, flags); + codetag_load_module(mod); + /* Done! */ trace_module_load(mod); diff --git a/lib/codetag.c b/lib/codetag.c index 8b5b89ad508d..54d2828eba25 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -124,15 +124,20 @@ static void *get_symbol(struct module *mod, const char *prefix, const char *name { DECLARE_SEQ_BUF(sb, KSYM_NAME_LEN); const char *buf; + void *ret; seq_buf_printf(&sb, "%s%s", prefix, name); if (seq_buf_has_overflowed(&sb)) return NULL; buf = seq_buf_str(&sb); - return mod ? + preempt_disable(); + ret = mod ? (void *)find_kallsyms_symbol_value(mod, buf) : (void *)kallsyms_lookup_name(buf); + preempt_enable(); + + return ret; } static struct codetag_range get_section_range(struct module *mod, @@ -173,8 +178,11 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) down_write(&cttype->mod_lock); err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL); - if (err >= 0) + if (err >= 0) { cttype->count += range_size(cttype, &range); + if (cttype->desc.module_load) + cttype->desc.module_load(cttype, cmod); + } up_write(&cttype->mod_lock); if (err < 0) { @@ -185,6 +193,52 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) return 0; } +void codetag_load_module(struct module *mod) +{ + struct codetag_type *cttype; + + if (!mod) + return; + + mutex_lock(&codetag_lock); + list_for_each_entry(cttype, &codetag_types, link) + codetag_module_init(cttype, mod); + mutex_unlock(&codetag_lock); +} + +void codetag_unload_module(struct module *mod) +{ + struct codetag_type *cttype; + + if (!mod) + return; + + mutex_lock(&codetag_lock); + list_for_each_entry(cttype, &codetag_types, link) { + struct codetag_module *found = NULL; + struct codetag_module *cmod; + unsigned long mod_id, tmp; + + down_write(&cttype->mod_lock); + idr_for_each_entry_ul(&cttype->mod_idr, cmod, tmp, mod_id) { + if (cmod->mod && cmod->mod == mod) { + found = cmod; + break; + } + } + if (found) { + if (cttype->desc.module_unload) + cttype->desc.module_unload(cttype, cmod); + + cttype->count -= range_size(cttype, &cmod->range); + idr_remove(&cttype->mod_idr, mod_id); + kfree(cmod); + } + up_write(&cttype->mod_lock); + } + mutex_unlock(&codetag_lock); +} + #else /* CONFIG_MODULES */ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { return 0; } #endif /* CONFIG_MODULES */ -- cgit From 47a92dfbe01f41bcbf359250ccb3caa589763abf Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:34 -0700 Subject: lib: prevent module unloading if memory is not freed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Skip freeing module's data section if there are non-zero allocation tags because otherwise, once these allocations are freed, the access to their code tag would cause UAF. Link: https://lkml.kernel.org/r/20240321163705.3067592-13-surenb@google.com Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/codetag.h | 6 +++--- kernel/module/main.c | 27 +++++++++++++++++++-------- lib/codetag.c | 11 ++++++++--- 3 files changed, 30 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/codetag.h b/include/linux/codetag.h index c44f5b83f24d..bfd0ba5c4185 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -35,7 +35,7 @@ struct codetag_type_desc { size_t tag_size; void (*module_load)(struct codetag_type *cttype, struct codetag_module *cmod); - void (*module_unload)(struct codetag_type *cttype, + bool (*module_unload)(struct codetag_type *cttype, struct codetag_module *cmod); }; @@ -71,10 +71,10 @@ codetag_register_type(const struct codetag_type_desc *desc); #if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) void codetag_load_module(struct module *mod); -void codetag_unload_module(struct module *mod); +bool codetag_unload_module(struct module *mod); #else static inline void codetag_load_module(struct module *mod) {} -static inline void codetag_unload_module(struct module *mod) {} +static inline bool codetag_unload_module(struct module *mod) { return true; } #endif #endif /* _LINUX_CODETAG_H */ diff --git a/kernel/module/main.c b/kernel/module/main.c index ffa6b3e9cb43..2d25eebc549d 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -1211,15 +1211,19 @@ static void *module_memory_alloc(unsigned int size, enum mod_mem_type type) return module_alloc(size); } -static void module_memory_free(void *ptr, enum mod_mem_type type) +static void module_memory_free(void *ptr, enum mod_mem_type type, + bool unload_codetags) { + if (!unload_codetags && mod_mem_type_is_core_data(type)) + return; + if (mod_mem_use_vmalloc(type)) vfree(ptr); else module_memfree(ptr); } -static void free_mod_mem(struct module *mod) +static void free_mod_mem(struct module *mod, bool unload_codetags) { for_each_mod_mem_type(type) { struct module_memory *mod_mem = &mod->mem[type]; @@ -1230,20 +1234,27 @@ static void free_mod_mem(struct module *mod) /* Free lock-classes; relies on the preceding sync_rcu(). */ lockdep_free_key_range(mod_mem->base, mod_mem->size); if (mod_mem->size) - module_memory_free(mod_mem->base, type); + module_memory_free(mod_mem->base, type, + unload_codetags); } /* MOD_DATA hosts mod, so free it at last */ lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size); - module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA); + module_memory_free(mod->mem[MOD_DATA].base, MOD_DATA, unload_codetags); } /* Free a module, remove from lists, etc. */ static void free_module(struct module *mod) { + bool unload_codetags; + trace_module_free(mod); - codetag_unload_module(mod); + unload_codetags = codetag_unload_module(mod); + if (!unload_codetags) + pr_warn("%s: memory allocation(s) from the module still alive, cannot unload cleanly\n", + mod->name); + mod_sysfs_teardown(mod); /* @@ -1285,7 +1296,7 @@ static void free_module(struct module *mod) kfree(mod->args); percpu_modfree(mod); - free_mod_mem(mod); + free_mod_mem(mod, unload_codetags); } void *__symbol_get(const char *symbol) @@ -2298,7 +2309,7 @@ static int move_module(struct module *mod, struct load_info *info) return 0; out_enomem: for (t--; t >= 0; t--) - module_memory_free(mod->mem[t].base, t); + module_memory_free(mod->mem[t].base, t, true); return ret; } @@ -2428,7 +2439,7 @@ static void module_deallocate(struct module *mod, struct load_info *info) percpu_modfree(mod); module_arch_freeing_init(mod); - free_mod_mem(mod); + free_mod_mem(mod, true); } int __weak module_finalize(const Elf_Ehdr *hdr, diff --git a/lib/codetag.c b/lib/codetag.c index 54d2828eba25..408062f722ce 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -5,6 +5,7 @@ #include #include #include +#include struct codetag_type { struct list_head link; @@ -206,12 +207,13 @@ void codetag_load_module(struct module *mod) mutex_unlock(&codetag_lock); } -void codetag_unload_module(struct module *mod) +bool codetag_unload_module(struct module *mod) { struct codetag_type *cttype; + bool unload_ok = true; if (!mod) - return; + return true; mutex_lock(&codetag_lock); list_for_each_entry(cttype, &codetag_types, link) { @@ -228,7 +230,8 @@ void codetag_unload_module(struct module *mod) } if (found) { if (cttype->desc.module_unload) - cttype->desc.module_unload(cttype, cmod); + if (!cttype->desc.module_unload(cttype, cmod)) + unload_ok = false; cttype->count -= range_size(cttype, &cmod->range); idr_remove(&cttype->mod_idr, mod_id); @@ -237,6 +240,8 @@ void codetag_unload_module(struct module *mod) up_write(&cttype->mod_lock); } mutex_unlock(&codetag_lock); + + return unload_ok; } #else /* CONFIG_MODULES */ -- cgit From 22d407b164ff79de42d21f37d99f9ee7abdd51c8 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:35 -0700 Subject: lib: add allocation tagging support for memory allocation profiling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce CONFIG_MEM_ALLOC_PROFILING which provides definitions to easily instrument memory allocators. It registers an "alloc_tags" codetag type with /proc/allocinfo interface to output allocation tag information when the feature is enabled. CONFIG_MEM_ALLOC_PROFILING_DEBUG is provided for debugging the memory allocation profiling instrumentation. Memory allocation profiling can be enabled or disabled at runtime using /proc/sys/vm/mem_profiling sysctl when CONFIG_MEM_ALLOC_PROFILING_DEBUG=n. CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT enables memory allocation profiling by default. [surenb@google.com: Documentation/filesystems/proc.rst: fix allocinfo title] Link: https://lkml.kernel.org/r/20240326073813.727090-1-surenb@google.com [surenb@google.com: do limited memory accounting for modules with ARCH_NEEDS_WEAK_PER_CPU] Link: https://lkml.kernel.org/r/20240402180933.1663992-2-surenb@google.com [klarasmodin@gmail.com: explicitly include irqflags.h in alloc_tag.h] Link: https://lkml.kernel.org/r/20240407133252.173636-1-klarasmodin@gmail.com [surenb@google.com: fix alloc_tag_init() to prevent passing NULL to PTR_ERR()] Link: https://lkml.kernel.org/r/20240417003349.2520094-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-14-surenb@google.com Signed-off-by: Suren Baghdasaryan Co-developed-by: Kent Overstreet Signed-off-by: Kent Overstreet Signed-off-by: Klara Modin Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- Documentation/admin-guide/sysctl/vm.rst | 16 ++++ Documentation/filesystems/proc.rst | 29 ++++++ include/asm-generic/codetag.lds.h | 14 +++ include/asm-generic/vmlinux.lds.h | 3 + include/linux/alloc_tag.h | 156 ++++++++++++++++++++++++++++++++ include/linux/sched.h | 24 +++++ lib/Kconfig.debug | 25 +++++ lib/Makefile | 2 + lib/alloc_tag.c | 152 +++++++++++++++++++++++++++++++ scripts/module.lds.S | 7 ++ 10 files changed, 428 insertions(+) create mode 100644 include/asm-generic/codetag.lds.h create mode 100644 include/linux/alloc_tag.h create mode 100644 lib/alloc_tag.c (limited to 'include/linux') diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index c59889de122b..e86c968a7a0e 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -43,6 +43,7 @@ Currently, these files are in /proc/sys/vm: - legacy_va_layout - lowmem_reserve_ratio - max_map_count +- mem_profiling (only if CONFIG_MEM_ALLOC_PROFILING=y) - memory_failure_early_kill - memory_failure_recovery - min_free_kbytes @@ -425,6 +426,21 @@ e.g., up to one or two maps per allocation. The default value is 65530. +mem_profiling +============== + +Enable memory profiling (when CONFIG_MEM_ALLOC_PROFILING=y) + +1: Enable memory profiling. + +0: Disable memory profiling. + +Enabling memory profiling introduces a small performance overhead for all +memory allocations. + +The default value depends on CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT. + + memory_failure_early_kill: ========================== diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index c6a6b9df2104..245269dd6e02 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -688,6 +688,7 @@ files are there, and which are missing. ============ =============================================================== File Content ============ =============================================================== + allocinfo Memory allocations profiling information apm Advanced power management info bootconfig Kernel command line obtained from boot config, and, if there were kernel parameters from the @@ -953,6 +954,34 @@ also be allocatable although a lot of filesystem metadata may have to be reclaimed to achieve this. +allocinfo +~~~~~~~~~ + +Provides information about memory allocations at all locations in the code +base. Each allocation in the code is identified by its source file, line +number, module (if originates from a loadable module) and the function calling +the allocation. The number of bytes allocated and number of calls at each +location are reported. + +Example output. + +:: + + > sort -rn /proc/allocinfo + 127664128 31168 mm/page_ext.c:270 func:alloc_page_ext + 56373248 4737 mm/slub.c:2259 func:alloc_slab_page + 14880768 3633 mm/readahead.c:247 func:page_cache_ra_unbounded + 14417920 3520 mm/mm_init.c:2530 func:alloc_large_system_hash + 13377536 234 block/blk-mq.c:3421 func:blk_mq_alloc_rqs + 11718656 2861 mm/filemap.c:1919 func:__filemap_get_folio + 9192960 2800 kernel/fork.c:307 func:alloc_thread_stack_node + 4206592 4 net/netfilter/nf_conntrack_core.c:2567 func:nf_ct_alloc_hashtable + 4136960 1010 drivers/staging/ctagmod/ctagmod.c:20 [ctagmod] func:ctagmod_start + 3940352 962 mm/memory.c:4214 func:alloc_anon_folio + 2894464 22613 fs/kernfs/dir.c:615 func:__kernfs_new_node + ... + + meminfo ~~~~~~~ diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h new file mode 100644 index 000000000000..64f536b80380 --- /dev/null +++ b/include/asm-generic/codetag.lds.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GENERIC_CODETAG_LDS_H +#define __ASM_GENERIC_CODETAG_LDS_H + +#define SECTION_WITH_BOUNDARIES(_name) \ + . = ALIGN(8); \ + __start_##_name = .; \ + KEEP(*(_name)) \ + __stop_##_name = .; + +#define CODETAG_SECTIONS() \ + SECTION_WITH_BOUNDARIES(alloc_tags) + +#endif /* __ASM_GENERIC_CODETAG_LDS_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index f7749d0f2562..3e4497b5135a 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -50,6 +50,8 @@ * [__nosave_begin, __nosave_end] for the nosave data */ +#include + #ifndef LOAD_OFFSET #define LOAD_OFFSET 0 #endif @@ -366,6 +368,7 @@ . = ALIGN(8); \ BOUNDED_SECTION_BY(__dyndbg_classes, ___dyndbg_classes) \ BOUNDED_SECTION_BY(__dyndbg, ___dyndbg) \ + CODETAG_SECTIONS() \ LIKELY_PROFILE() \ BRANCH_PROFILE() \ TRACE_PRINTKS() \ diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h new file mode 100644 index 000000000000..13561223fdab --- /dev/null +++ b/include/linux/alloc_tag.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * allocation tagging + */ +#ifndef _LINUX_ALLOC_TAG_H +#define _LINUX_ALLOC_TAG_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct alloc_tag_counters { + u64 bytes; + u64 calls; +}; + +/* + * An instance of this structure is created in a special ELF section at every + * allocation callsite. At runtime, the special section is treated as + * an array of these. Embedded codetag utilizes codetag framework. + */ +struct alloc_tag { + struct codetag ct; + struct alloc_tag_counters __percpu *counters; +} __aligned(8); + +#ifdef CONFIG_MEM_ALLOC_PROFILING + +static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) +{ + return container_of(ct, struct alloc_tag, ct); +} + +#ifdef ARCH_NEEDS_WEAK_PER_CPU +/* + * When percpu variables are required to be defined as weak, static percpu + * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION). + * Instead we will accound all module allocations to a single counter. + */ +DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); + +#define DEFINE_ALLOC_TAG(_alloc_tag) \ + static struct alloc_tag _alloc_tag __used __aligned(8) \ + __section("alloc_tags") = { \ + .ct = CODE_TAG_INIT, \ + .counters = &_shared_alloc_tag }; + +#else /* ARCH_NEEDS_WEAK_PER_CPU */ + +#define DEFINE_ALLOC_TAG(_alloc_tag) \ + static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ + static struct alloc_tag _alloc_tag __used __aligned(8) \ + __section("alloc_tags") = { \ + .ct = CODE_TAG_INIT, \ + .counters = &_alloc_tag_cntr }; + +#endif /* ARCH_NEEDS_WEAK_PER_CPU */ + +DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, + mem_alloc_profiling_key); + +static inline bool mem_alloc_profiling_enabled(void) +{ + return static_branch_maybe(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, + &mem_alloc_profiling_key); +} + +static inline struct alloc_tag_counters alloc_tag_read(struct alloc_tag *tag) +{ + struct alloc_tag_counters v = { 0, 0 }; + struct alloc_tag_counters *counter; + int cpu; + + for_each_possible_cpu(cpu) { + counter = per_cpu_ptr(tag->counters, cpu); + v.bytes += counter->bytes; + v.calls += counter->calls; + } + + return v; +} + +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG +static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) +{ + WARN_ONCE(ref && ref->ct, + "alloc_tag was not cleared (got tag for %s:%u)\n", + ref->ct->filename, ref->ct->lineno); + + WARN_ONCE(!tag, "current->alloc_tag not set"); +} + +static inline void alloc_tag_sub_check(union codetag_ref *ref) +{ + WARN_ONCE(ref && !ref->ct, "alloc_tag was not set\n"); +} +#else +static inline void alloc_tag_add_check(union codetag_ref *ref, struct alloc_tag *tag) {} +static inline void alloc_tag_sub_check(union codetag_ref *ref) {} +#endif + +/* Caller should verify both ref and tag to be valid */ +static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +{ + ref->ct = &tag->ct; + /* + * We need in increment the call counter every time we have a new + * allocation or when we split a large allocation into smaller ones. + * Each new reference for every sub-allocation needs to increment call + * counter because when we free each part the counter will be decremented. + */ + this_cpu_inc(tag->counters->calls); +} + +static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) +{ + alloc_tag_add_check(ref, tag); + if (!ref || !tag) + return; + + __alloc_tag_ref_set(ref, tag); + this_cpu_add(tag->counters->bytes, bytes); +} + +static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) +{ + struct alloc_tag *tag; + + alloc_tag_sub_check(ref); + if (!ref || !ref->ct) + return; + + tag = ct_to_alloc_tag(ref->ct); + + this_cpu_sub(tag->counters->bytes, bytes); + this_cpu_dec(tag->counters->calls); + + ref->ct = NULL; +} + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +#define DEFINE_ALLOC_TAG(_alloc_tag) +static inline bool mem_alloc_profiling_enabled(void) { return false; } +static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, + size_t bytes) {} +static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + +#endif /* _LINUX_ALLOC_TAG_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3c2abbc587b4..4118b3f959c3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -770,6 +770,10 @@ struct task_struct { unsigned int flags; unsigned int ptrace; +#ifdef CONFIG_MEM_ALLOC_PROFILING + struct alloc_tag *alloc_tag; +#endif + #ifdef CONFIG_SMP int on_cpu; struct __call_single_node wake_entry; @@ -810,6 +814,7 @@ struct task_struct { struct task_group *sched_task_group; #endif + #ifdef CONFIG_UCLAMP_TASK /* * Clamp values requested for a scheduling entity. @@ -2187,4 +2192,23 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); +#ifdef CONFIG_MEM_ALLOC_PROFILING +static inline struct alloc_tag *alloc_tag_save(struct alloc_tag *tag) +{ + swap(current->alloc_tag, tag); + return tag; +} + +static inline void alloc_tag_restore(struct alloc_tag *tag, struct alloc_tag *old) +{ +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG + WARN(current->alloc_tag != tag, "current->alloc_tag was changed:\n"); +#endif + current->alloc_tag = old; +} +#else +#define alloc_tag_save(_tag) NULL +#define alloc_tag_restore(_tag, _old) do {} while (0) +#endif + #endif diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 015fc6ee9849..fa7aa32ba11a 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -972,6 +972,31 @@ config CODE_TAGGING bool select KALLSYMS +config MEM_ALLOC_PROFILING + bool "Enable memory allocation profiling" + default n + depends on PROC_FS + depends on !DEBUG_FORCE_WEAK_PER_CPU + select CODE_TAGGING + help + Track allocation source code and record total allocation size + initiated at that code location. The mechanism can be used to track + memory leaks with a low performance and memory impact. + +config MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT + bool "Enable memory allocation profiling by default" + default y + depends on MEM_ALLOC_PROFILING + +config MEM_ALLOC_PROFILING_DEBUG + bool "Memory allocation profiler debugging" + default n + depends on MEM_ALLOC_PROFILING + select MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT + help + Adds warnings with helpful error messages for memory allocation + profiling. + source "lib/Kconfig.kasan" source "lib/Kconfig.kfence" source "lib/Kconfig.kmsan" diff --git a/lib/Makefile b/lib/Makefile index 910335da8f13..2f4e17bfb299 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -234,6 +234,8 @@ obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o obj-$(CONFIG_CODE_TAGGING) += codetag.o +obj-$(CONFIG_MEM_ALLOC_PROFILING) += alloc_tag.o + lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c new file mode 100644 index 000000000000..331dd17650f3 --- /dev/null +++ b/lib/alloc_tag.c @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include + +static struct codetag_type *alloc_tag_cttype; + +DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); +EXPORT_SYMBOL(_shared_alloc_tag); + +DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, + mem_alloc_profiling_key); + +static void *allocinfo_start(struct seq_file *m, loff_t *pos) +{ + struct codetag_iterator *iter; + struct codetag *ct; + loff_t node = *pos; + + iter = kzalloc(sizeof(*iter), GFP_KERNEL); + m->private = iter; + if (!iter) + return NULL; + + codetag_lock_module_list(alloc_tag_cttype, true); + *iter = codetag_get_ct_iter(alloc_tag_cttype); + while ((ct = codetag_next_ct(iter)) != NULL && node) + node--; + + return ct ? iter : NULL; +} + +static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos) +{ + struct codetag_iterator *iter = (struct codetag_iterator *)arg; + struct codetag *ct = codetag_next_ct(iter); + + (*pos)++; + if (!ct) + return NULL; + + return iter; +} + +static void allocinfo_stop(struct seq_file *m, void *arg) +{ + struct codetag_iterator *iter = (struct codetag_iterator *)m->private; + + if (iter) { + codetag_lock_module_list(alloc_tag_cttype, false); + kfree(iter); + } +} + +static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct) +{ + struct alloc_tag *tag = ct_to_alloc_tag(ct); + struct alloc_tag_counters counter = alloc_tag_read(tag); + s64 bytes = counter.bytes; + + seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls); + codetag_to_text(out, ct); + seq_buf_putc(out, ' '); + seq_buf_putc(out, '\n'); +} + +static int allocinfo_show(struct seq_file *m, void *arg) +{ + struct codetag_iterator *iter = (struct codetag_iterator *)arg; + char *bufp; + size_t n = seq_get_buf(m, &bufp); + struct seq_buf buf; + + seq_buf_init(&buf, bufp, n); + alloc_tag_to_text(&buf, iter->ct); + seq_commit(m, seq_buf_used(&buf)); + return 0; +} + +static const struct seq_operations allocinfo_seq_op = { + .start = allocinfo_start, + .next = allocinfo_next, + .stop = allocinfo_stop, + .show = allocinfo_show, +}; + +static void __init procfs_init(void) +{ + proc_create_seq("allocinfo", 0444, NULL, &allocinfo_seq_op); +} + +static bool alloc_tag_module_unload(struct codetag_type *cttype, + struct codetag_module *cmod) +{ + struct codetag_iterator iter = codetag_get_ct_iter(cttype); + struct alloc_tag_counters counter; + bool module_unused = true; + struct alloc_tag *tag; + struct codetag *ct; + + for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) { + if (iter.cmod != cmod) + continue; + + tag = ct_to_alloc_tag(ct); + counter = alloc_tag_read(tag); + + if (WARN(counter.bytes, + "%s:%u module %s func:%s has %llu allocated at module unload", + ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes)) + module_unused = false; + } + + return module_unused; +} + +static struct ctl_table memory_allocation_profiling_sysctls[] = { + { + .procname = "mem_profiling", + .data = &mem_alloc_profiling_key, +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG + .mode = 0444, +#else + .mode = 0644, +#endif + .proc_handler = proc_do_static_key, + }, + { } +}; + +static int __init alloc_tag_init(void) +{ + const struct codetag_type_desc desc = { + .section = "alloc_tags", + .tag_size = sizeof(struct alloc_tag), + .module_unload = alloc_tag_module_unload, + }; + + alloc_tag_cttype = codetag_register_type(&desc); + if (IS_ERR(alloc_tag_cttype)) + return PTR_ERR(alloc_tag_cttype); + + register_sysctl_init("vm", memory_allocation_profiling_sysctls); + procfs_init(); + + return 0; +} +module_init(alloc_tag_init); diff --git a/scripts/module.lds.S b/scripts/module.lds.S index bf5bcf2836d8..45c67a0994f3 100644 --- a/scripts/module.lds.S +++ b/scripts/module.lds.S @@ -9,6 +9,8 @@ #define DISCARD_EH_FRAME *(.eh_frame) #endif +#include + SECTIONS { /DISCARD/ : { *(.discard) @@ -47,12 +49,17 @@ SECTIONS { .data : { *(.data .data.[0-9a-zA-Z_]*) *(.data..L*) + CODETAG_SECTIONS() } .rodata : { *(.rodata .rodata.[0-9a-zA-Z_]*) *(.rodata..L*) } +#else + .data : { + CODETAG_SECTIONS() + } #endif } -- cgit From dcfe378c81f72f146890ce1dcfdcc742d3b66924 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:36 -0700 Subject: lib: introduce support for page allocation tagging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce helper functions to easily instrument page allocators by storing a pointer to the allocation tag associated with the code that allocated the page in a page_ext field. Link: https://lkml.kernel.org/r/20240321163705.3067592-15-surenb@google.com Signed-off-by: Suren Baghdasaryan Co-developed-by: Kent Overstreet Signed-off-by: Kent Overstreet Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/page_ext.h | 1 - include/linux/pgalloc_tag.h | 78 +++++++++++++++++++++++++++++++++++++++++++++ lib/Kconfig.debug | 1 + lib/alloc_tag.c | 17 ++++++++++ mm/mm_init.c | 1 + mm/page_alloc.c | 4 +++ mm/page_ext.c | 4 +++ 7 files changed, 105 insertions(+), 1 deletion(-) create mode 100644 include/linux/pgalloc_tag.h (limited to 'include/linux') diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index be98564191e6..07e0656898f9 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -4,7 +4,6 @@ #include #include -#include struct pglist_data; diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h new file mode 100644 index 000000000000..66bd021eb46e --- /dev/null +++ b/include/linux/pgalloc_tag.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * page allocation tagging + */ +#ifndef _LINUX_PGALLOC_TAG_H +#define _LINUX_PGALLOC_TAG_H + +#include + +#ifdef CONFIG_MEM_ALLOC_PROFILING + +#include + +extern struct page_ext_operations page_alloc_tagging_ops; +extern struct page_ext *page_ext_get(struct page *page); +extern void page_ext_put(struct page_ext *page_ext); + +static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) +{ + return (void *)page_ext + page_alloc_tagging_ops.offset; +} + +static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref) +{ + return (void *)ref - page_alloc_tagging_ops.offset; +} + +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline union codetag_ref *get_page_tag_ref(struct page *page) +{ + if (page) { + struct page_ext *page_ext = page_ext_get(page); + + if (page_ext) + return codetag_ref_from_page_ext(page_ext); + } + return NULL; +} + +static inline void put_page_tag_ref(union codetag_ref *ref) +{ + page_ext_put(page_ext_from_codetag_ref(ref)); +} + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + alloc_tag_add(ref, task->alloc_tag, PAGE_SIZE * nr); + put_page_tag_ref(ref); + } + } +} + +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) +{ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + alloc_tag_sub(ref, PAGE_SIZE * nr); + put_page_tag_ref(ref); + } + } +} + +#else /* CONFIG_MEM_ALLOC_PROFILING */ + +static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, + unsigned int nr) {} +static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + +#endif /* _LINUX_PGALLOC_TAG_H */ diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fa7aa32ba11a..a8f77de5e8ea 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -978,6 +978,7 @@ config MEM_ALLOC_PROFILING depends on PROC_FS depends on !DEBUG_FORCE_WEAK_PER_CPU select CODE_TAGGING + select PAGE_EXTENSION help Track allocation source code and record total allocation size initiated at that code location. The mechanism can be used to track diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 331dd17650f3..59bb71036837 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include #include @@ -118,6 +119,22 @@ static bool alloc_tag_module_unload(struct codetag_type *cttype, return module_unused; } +static __init bool need_page_alloc_tagging(void) +{ + return true; +} + +static __init void init_page_alloc_tagging(void) +{ +} + +struct page_ext_operations page_alloc_tagging_ops = { + .size = sizeof(union codetag_ref), + .need = need_page_alloc_tagging, + .init = init_page_alloc_tagging, +}; +EXPORT_SYMBOL(page_alloc_tagging_ops); + static struct ctl_table memory_allocation_profiling_sysctls[] = { { .procname = "mem_profiling", diff --git a/mm/mm_init.c b/mm/mm_init.c index 549e76af8f82..2fd9bf044a79 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5083ac034d26..3fd273c22749 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include "internal.h" #include "shuffle.h" @@ -1101,6 +1102,7 @@ __always_inline bool free_pages_prepare(struct page *page, /* Do not let hwpoison pages hit pcplists/buddy */ reset_page_owner(page, order); page_table_check_free(page, order); + pgalloc_tag_sub(page, 1 << order); return false; } @@ -1140,6 +1142,7 @@ __always_inline bool free_pages_prepare(struct page *page, page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; reset_page_owner(page, order); page_table_check_free(page, order); + pgalloc_tag_sub(page, 1 << order); if (!PageHighMem(page)) { debug_check_no_locks_freed(page_address(page), @@ -1533,6 +1536,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order, set_page_owner(page, order, gfp_flags); page_table_check_alloc(page, order); + pgalloc_tag_add(page, current, 1 << order); } static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, diff --git a/mm/page_ext.c b/mm/page_ext.c index 4548fcc66d74..3c58fe8a24df 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -10,6 +10,7 @@ #include #include #include +#include /* * struct page extension @@ -82,6 +83,9 @@ static struct page_ext_operations *page_ext_ops[] __initdata = { #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) &page_idle_ops, #endif +#ifdef CONFIG_MEM_ALLOC_PROFILING + &page_alloc_tagging_ops, +#endif #ifdef CONFIG_PAGE_TABLE_CHECK &page_table_check_ops, #endif -- cgit From ccdabb1d7f7a09810495ac57c0154a9913791dd2 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:38 -0700 Subject: mm: percpu: increase PERCPU_MODULE_RESERVE to accommodate allocation tags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As each allocation tag generates a per-cpu variable, more space is required to store them. Increase PERCPU_MODULE_RESERVE to provide enough area. A better long-term solution would be to allocate this memory dynamically. [surenb@google.com: increase PERCPU_MODULE_RESERVE to accommodate allocation tags] Link: https://lkml.kernel.org/r/20240406214044.1114406-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-17-surenb@google.com Signed-off-by: Suren Baghdasaryan Signed-off-by: Kent Overstreet Tested-by: Kees Cook Cc: Peter Zijlstra Cc: Tejun Heo Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/percpu.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 8c677f185901..9dfa7b526e70 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -14,7 +14,11 @@ /* enough to cover all DEFINE_PER_CPUs in modules */ #ifdef CONFIG_MODULES +#ifdef CONFIG_MEM_ALLOC_PROFILING +#define PERCPU_MODULE_RESERVE (8 << 13) +#else #define PERCPU_MODULE_RESERVE (8 << 10) +#endif #else #define PERCPU_MODULE_RESERVE 0 #endif -- cgit From 8a2f11878771da65b8ac135c73b47dae13afbd62 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:39 -0700 Subject: change alloc_pages name in dma_map_ops to avoid name conflicts MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After redefining alloc_pages, all uses of that name are being replaced. Change the conflicting names to prevent preprocessor from replacing them when it's not intended. Link: https://lkml.kernel.org/r/20240321163705.3067592-18-surenb@google.com Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- arch/alpha/kernel/pci_iommu.c | 2 +- arch/mips/jazz/jazzdma.c | 2 +- arch/powerpc/kernel/dma-iommu.c | 2 +- arch/powerpc/platforms/ps3/system-bus.c | 4 ++-- arch/powerpc/platforms/pseries/vio.c | 2 +- arch/x86/kernel/amd_gart_64.c | 2 +- drivers/iommu/dma-iommu.c | 2 +- drivers/parisc/ccio-dma.c | 2 +- drivers/parisc/sba_iommu.c | 2 +- drivers/xen/grant-dma-ops.c | 2 +- drivers/xen/swiotlb-xen.c | 2 +- include/linux/dma-map-ops.h | 2 +- kernel/dma/mapping.c | 4 ++-- 13 files changed, 15 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index c81183935e97..7fcf3e9b7103 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -929,7 +929,7 @@ const struct dma_map_ops alpha_pci_ops = { .dma_supported = alpha_pci_supported, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; EXPORT_SYMBOL(alpha_pci_ops); diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c index eabddb89d221..c97b089b9902 100644 --- a/arch/mips/jazz/jazzdma.c +++ b/arch/mips/jazz/jazzdma.c @@ -617,7 +617,7 @@ const struct dma_map_ops jazz_dma_ops = { .sync_sg_for_device = jazz_dma_sync_sg_for_device, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; EXPORT_SYMBOL(jazz_dma_ops); diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 8920862ffd79..f0ae39e77e37 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c @@ -216,6 +216,6 @@ const struct dma_map_ops dma_iommu_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c index d6b5f5ecd515..56dc6b29a3e7 100644 --- a/arch/powerpc/platforms/ps3/system-bus.c +++ b/arch/powerpc/platforms/ps3/system-bus.c @@ -695,7 +695,7 @@ static const struct dma_map_ops ps3_sb_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; @@ -709,7 +709,7 @@ static const struct dma_map_ops ps3_ioc0_dma_ops = { .unmap_page = ps3_unmap_page, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c index 90ff85c879bf..477c1d5e1737 100644 --- a/arch/powerpc/platforms/pseries/vio.c +++ b/arch/powerpc/platforms/pseries/vio.c @@ -611,7 +611,7 @@ static const struct dma_map_ops vio_dma_mapping_ops = { .get_required_mask = dma_iommu_get_required_mask, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 2ae98f754e59..c884deca839b 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -676,7 +676,7 @@ static const struct dma_map_ops gart_dma_ops = { .get_sgtable = dma_common_get_sgtable, .dma_supported = dma_direct_supported, .get_required_mask = dma_direct_get_required_mask, - .alloc_pages = dma_direct_alloc_pages, + .alloc_pages_op = dma_direct_alloc_pages, .free_pages = dma_direct_free_pages, }; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index e4cb26f6a943..07d087eecc17 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1723,7 +1723,7 @@ static const struct dma_map_ops iommu_dma_ops = { .flags = DMA_F_PCI_P2PDMA_SUPPORTED, .alloc = iommu_dma_alloc, .free = iommu_dma_free, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, .alloc_noncontiguous = iommu_dma_alloc_noncontiguous, .free_noncontiguous = iommu_dma_free_noncontiguous, diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 9ce0d20a6c58..feef537257d0 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c @@ -1022,7 +1022,7 @@ static const struct dma_map_ops ccio_ops = { .map_sg = ccio_map_sg, .unmap_sg = ccio_unmap_sg, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 784037837f65..fc3863c09f83 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1090,7 +1090,7 @@ static const struct dma_map_ops sba_ops = { .map_sg = sba_map_sg, .unmap_sg = sba_unmap_sg, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, }; diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index 76f6f26265a3..29257d2639db 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -282,7 +282,7 @@ static int xen_grant_dma_supported(struct device *dev, u64 mask) static const struct dma_map_ops xen_grant_dma_ops = { .alloc = xen_grant_dma_alloc, .free = xen_grant_dma_free, - .alloc_pages = xen_grant_dma_alloc_pages, + .alloc_pages_op = xen_grant_dma_alloc_pages, .free_pages = xen_grant_dma_free_pages, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c index 0e6c6c25d154..1c4ef5111651 100644 --- a/drivers/xen/swiotlb-xen.c +++ b/drivers/xen/swiotlb-xen.c @@ -403,7 +403,7 @@ const struct dma_map_ops xen_swiotlb_dma_ops = { .dma_supported = xen_swiotlb_dma_supported, .mmap = dma_common_mmap, .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, + .alloc_pages_op = dma_common_alloc_pages, .free_pages = dma_common_free_pages, .max_mapping_size = swiotlb_max_mapping_size, }; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 4abc60f04209..9ee319851b5f 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -29,7 +29,7 @@ struct dma_map_ops { unsigned long attrs); void (*free)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs); - struct page *(*alloc_pages)(struct device *dev, size_t size, + struct page *(*alloc_pages_op)(struct device *dev, size_t size, dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, diff --git a/kernel/dma/mapping.c b/kernel/dma/mapping.c index 58db8fd70471..5e2d51e1cdf6 100644 --- a/kernel/dma/mapping.c +++ b/kernel/dma/mapping.c @@ -570,9 +570,9 @@ static struct page *__dma_alloc_pages(struct device *dev, size_t size, size = PAGE_ALIGN(size); if (dma_alloc_direct(dev, ops)) return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); - if (!ops->alloc_pages) + if (!ops->alloc_pages_op) return NULL; - return ops->alloc_pages(dev, size, dma_handle, dir, gfp); + return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); } struct page *dma_alloc_pages(struct device *dev, size_t size, -- cgit From b951aaff503502a7fe066eeed2744ba8a6413c89 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:40 -0700 Subject: mm: enable page allocation tagging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Redefine page allocators to record allocation tags upon their invocation. Instrument post_alloc_hook and free_pages_prepare to modify current allocation tag. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-3-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-19-surenb@google.com Signed-off-by: Suren Baghdasaryan Co-developed-by: Kent Overstreet Signed-off-by: Kent Overstreet Reviewed-by: Kees Cook Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 14 ++++++ include/linux/gfp.h | 126 +++++++++++++++++++++++++++++----------------- include/linux/pagemap.h | 9 ++-- mm/compaction.c | 7 ++- mm/filemap.c | 6 +-- mm/mempolicy.c | 46 ++++++++--------- mm/page_alloc.c | 52 +++++++++---------- 7 files changed, 157 insertions(+), 103 deletions(-) (limited to 'include/linux') diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 13561223fdab..8932187124da 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -153,4 +153,18 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ +#define alloc_hooks_tag(_tag, _do_alloc) \ +({ \ + struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \ + typeof(_do_alloc) _res = _do_alloc; \ + alloc_tag_restore(_tag, _old); \ + _res; \ +}) + +#define alloc_hooks(_do_alloc) \ +({ \ + DEFINE_ALLOC_TAG(_alloc_tag); \ + alloc_hooks_tag(&_alloc_tag, _do_alloc); \ +}) + #endif /* _LINUX_ALLOC_TAG_H */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c775ea3c6015..450c2cbcf04b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -6,6 +6,8 @@ #include #include +#include +#include struct vm_area_struct; struct mempolicy; @@ -175,42 +177,46 @@ static inline void arch_free_page(struct page *page, int order) { } static inline void arch_alloc_page(struct page *page, int order) { } #endif -struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, +struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); -struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, +#define __alloc_pages(...) alloc_hooks(__alloc_pages_noprof(__VA_ARGS__)) + +struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask); +#define __folio_alloc(...) alloc_hooks(__folio_alloc_noprof(__VA_ARGS__)) -unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, +unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, struct list_head *page_list, struct page **page_array); +#define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) -unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, +unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array); +#define alloc_pages_bulk_array_mempolicy(...) \ + alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__)) /* Bulk allocate order-0 pages */ -static inline unsigned long -alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) -{ - return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, list, NULL); -} +#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \ + __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL) -static inline unsigned long -alloc_pages_bulk_array(gfp_t gfp, unsigned long nr_pages, struct page **page_array) -{ - return __alloc_pages_bulk(gfp, numa_mem_id(), NULL, nr_pages, NULL, page_array); -} +#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \ + __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array) static inline unsigned long -alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) +alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, + struct page **page_array) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array); + return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array); } +#define alloc_pages_bulk_array_node(...) \ + alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__)) + static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) { gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN); @@ -230,82 +236,104 @@ static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) * online. For more general interface, see alloc_pages_node(). */ static inline struct page * -__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) +__alloc_pages_node_noprof(int nid, gfp_t gfp_mask, unsigned int order) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); warn_if_node_offline(nid, gfp_mask); - return __alloc_pages(gfp_mask, order, nid, NULL); + return __alloc_pages_noprof(gfp_mask, order, nid, NULL); } +#define __alloc_pages_node(...) alloc_hooks(__alloc_pages_node_noprof(__VA_ARGS__)) + static inline -struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) +struct folio *__folio_alloc_node_noprof(gfp_t gfp, unsigned int order, int nid) { VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES); warn_if_node_offline(nid, gfp); - return __folio_alloc(gfp, order, nid, NULL); + return __folio_alloc_noprof(gfp, order, nid, NULL); } +#define __folio_alloc_node(...) alloc_hooks(__folio_alloc_node_noprof(__VA_ARGS__)) + /* * Allocate pages, preferring the node given as nid. When nid == NUMA_NO_NODE, * prefer the current CPU's closest node. Otherwise node must be valid and * online. */ -static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, - unsigned int order) +static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, + unsigned int order) { if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - return __alloc_pages_node(nid, gfp_mask, order); + return __alloc_pages_node_noprof(nid, gfp_mask, order); } +#define alloc_pages_node(...) alloc_hooks(alloc_pages_node_noprof(__VA_ARGS__)) + #ifdef CONFIG_NUMA -struct page *alloc_pages(gfp_t gfp, unsigned int order); -struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, +struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); +struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); -struct folio *folio_alloc(gfp_t gfp, unsigned int order); -struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, +struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); +struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage); #else -static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) +static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { - return alloc_pages_node(numa_node_id(), gfp_mask, order); + return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); } -static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, +static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid) { - return alloc_pages(gfp, order); + return alloc_pages_noprof(gfp, order); } -static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) +static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { return __folio_alloc_node(gfp, order, numa_node_id()); } -#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \ - folio_alloc(gfp, order) +#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \ + folio_alloc_noprof(gfp, order) #endif + +#define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) +#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__)) +#define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) +#define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) + #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) -static inline struct page *alloc_page_vma(gfp_t gfp, + +static inline struct page *alloc_page_vma_noprof(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = vma_alloc_folio(gfp, 0, vma, addr, false); + struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false); return &folio->page; } +#define alloc_page_vma(...) alloc_hooks(alloc_page_vma_noprof(__VA_ARGS__)) + +extern unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order); +#define __get_free_pages(...) alloc_hooks(get_free_pages_noprof(__VA_ARGS__)) -extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); -extern unsigned long get_zeroed_page(gfp_t gfp_mask); +extern unsigned long get_zeroed_page_noprof(gfp_t gfp_mask); +#define get_zeroed_page(...) alloc_hooks(get_zeroed_page_noprof(__VA_ARGS__)) + +void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) __alloc_size(1); +#define alloc_pages_exact(...) alloc_hooks(alloc_pages_exact_noprof(__VA_ARGS__)) -void *alloc_pages_exact(size_t size, gfp_t gfp_mask) __alloc_size(1); void free_pages_exact(void *virt, size_t size); -__meminit void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); -#define __get_free_page(gfp_mask) \ - __get_free_pages((gfp_mask), 0) +__meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) __alloc_size(2); +#define alloc_pages_exact_nid(...) \ + alloc_hooks(alloc_pages_exact_nid_noprof(__VA_ARGS__)) + +#define __get_free_page(gfp_mask) \ + __get_free_pages((gfp_mask), 0) -#define __get_dma_pages(gfp_mask, order) \ - __get_free_pages((gfp_mask) | GFP_DMA, (order)) +#define __get_dma_pages(gfp_mask, order) \ + __get_free_pages((gfp_mask) | GFP_DMA, (order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); @@ -374,10 +402,14 @@ extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); #ifdef CONFIG_CONTIG_ALLOC /* The below functions must be run on a range from a single zone. */ -extern int alloc_contig_range(unsigned long start, unsigned long end, +extern int alloc_contig_range_noprof(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask); -extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, - int nid, nodemask_t *nodemask); +#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) + +extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, + int nid, nodemask_t *nodemask); +#define alloc_contig_pages(...) alloc_hooks(alloc_contig_pages_noprof(__VA_ARGS__)) + #endif void free_contig_range(unsigned long pfn, unsigned long nr_pages); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 2df35e65557d..35636e67e2e1 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -542,14 +542,17 @@ static inline void *detach_page_private(struct page *page) #endif #ifdef CONFIG_NUMA -struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order); +struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order); #else -static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) +static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) { - return folio_alloc(gfp, order); + return folio_alloc_noprof(gfp, order); } #endif +#define filemap_alloc_folio(...) \ + alloc_hooks(filemap_alloc_folio_noprof(__VA_ARGS__)) + static inline struct page *__page_cache_alloc(gfp_t gfp) { return &filemap_alloc_folio(gfp, 0)->page; diff --git a/mm/compaction.c b/mm/compaction.c index 807b58e6eb68..70b01190d2f3 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1851,7 +1851,7 @@ static void isolate_freepages(struct compact_control *cc) * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to. */ -static struct folio *compaction_alloc(struct folio *src, unsigned long data) +static struct folio *compaction_alloc_noprof(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; struct folio *dst; @@ -1898,6 +1898,11 @@ again: return page_rmappable_folio(&dst->page); } +static struct folio *compaction_alloc(struct folio *src, unsigned long data) +{ + return alloc_hooks(compaction_alloc_noprof(src, data)); +} + /* * This is a migrate-callback that "frees" freepages back to the isolated * freelist. All pages on the freelist are from the same zone, so there is no diff --git a/mm/filemap.c b/mm/filemap.c index 30de18c4fd28..60890758d660 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -966,7 +966,7 @@ int filemap_add_folio(struct address_space *mapping, struct folio *folio, EXPORT_SYMBOL_GPL(filemap_add_folio); #ifdef CONFIG_NUMA -struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) +struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) { int n; struct folio *folio; @@ -981,9 +981,9 @@ struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order) return folio; } - return folio_alloc(gfp, order); + return folio_alloc_noprof(gfp, order); } -EXPORT_SYMBOL(filemap_alloc_folio); +EXPORT_SYMBOL(filemap_alloc_folio_noprof); #endif /* diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 13100a290918..0b3def99174a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2201,9 +2201,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, */ preferred_gfp = gfp | __GFP_NOWARN; preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); - page = __alloc_pages(preferred_gfp, order, nid, nodemask); + page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask); if (!page) - page = __alloc_pages(gfp, order, nid, NULL); + page = __alloc_pages_noprof(gfp, order, nid, NULL); return page; } @@ -2218,7 +2218,7 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, * * Return: The page on success or NULL if allocation fails. */ -struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, +struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *pol, pgoff_t ilx, int nid) { nodemask_t *nodemask; @@ -2249,7 +2249,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, * First, try to allocate THP only on local node, but * don't reclaim unnecessarily, just compact. */ - page = __alloc_pages_node(nid, + page = __alloc_pages_node_noprof(nid, gfp | __GFP_THISNODE | __GFP_NORETRY, order); if (page || !(gfp & __GFP_DIRECT_RECLAIM)) return page; @@ -2262,7 +2262,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, } } - page = __alloc_pages(gfp, order, nid, nodemask); + page = __alloc_pages_noprof(gfp, order, nid, nodemask); if (unlikely(pol->mode == MPOL_INTERLEAVE) && page) { /* skip NUMA_INTERLEAVE_HIT update if numa stats is disabled */ @@ -2293,7 +2293,7 @@ struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, * * Return: The folio on success or NULL if allocation fails. */ -struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, +struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned long addr, bool hugepage) { struct mempolicy *pol; @@ -2301,12 +2301,12 @@ struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, struct page *page; pol = get_vma_policy(vma, addr, order, &ilx); - page = alloc_pages_mpol(gfp | __GFP_COMP, order, - pol, ilx, numa_node_id()); + page = alloc_pages_mpol_noprof(gfp | __GFP_COMP, order, + pol, ilx, numa_node_id()); mpol_cond_put(pol); return page_rmappable_folio(page); } -EXPORT_SYMBOL(vma_alloc_folio); +EXPORT_SYMBOL(vma_alloc_folio_noprof); /** * alloc_pages - Allocate pages. @@ -2322,7 +2322,7 @@ EXPORT_SYMBOL(vma_alloc_folio); * flags are used. * Return: The page on success or NULL if allocation fails. */ -struct page *alloc_pages(gfp_t gfp, unsigned int order) +struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order) { struct mempolicy *pol = &default_policy; @@ -2333,16 +2333,16 @@ struct page *alloc_pages(gfp_t gfp, unsigned int order) if (!in_interrupt() && !(gfp & __GFP_THISNODE)) pol = get_task_policy(current); - return alloc_pages_mpol(gfp, order, - pol, NO_INTERLEAVE_INDEX, numa_node_id()); + return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX, + numa_node_id()); } -EXPORT_SYMBOL(alloc_pages); +EXPORT_SYMBOL(alloc_pages_noprof); -struct folio *folio_alloc(gfp_t gfp, unsigned int order) +struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { - return page_rmappable_folio(alloc_pages(gfp | __GFP_COMP, order)); + return page_rmappable_folio(alloc_pages_noprof(gfp | __GFP_COMP, order)); } -EXPORT_SYMBOL(folio_alloc); +EXPORT_SYMBOL(folio_alloc_noprof); static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, struct mempolicy *pol, unsigned long nr_pages, @@ -2361,13 +2361,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, for (i = 0; i < nodes; i++) { if (delta) { - nr_allocated = __alloc_pages_bulk(gfp, + nr_allocated = alloc_pages_bulk_noprof(gfp, interleave_nodes(pol), NULL, nr_pages_per_node + 1, NULL, page_array); delta--; } else { - nr_allocated = __alloc_pages_bulk(gfp, + nr_allocated = alloc_pages_bulk_noprof(gfp, interleave_nodes(pol), NULL, nr_pages_per_node, NULL, page_array); } @@ -2504,11 +2504,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, preferred_gfp = gfp | __GFP_NOWARN; preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); - nr_allocated = __alloc_pages_bulk(preferred_gfp, nid, &pol->nodes, + nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes, nr_pages, NULL, page_array); if (nr_allocated < nr_pages) - nr_allocated += __alloc_pages_bulk(gfp, numa_node_id(), NULL, + nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL, nr_pages - nr_allocated, NULL, page_array + nr_allocated); return nr_allocated; @@ -2520,7 +2520,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, * It can accelerate memory allocation especially interleaving * allocate memory. */ -unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, +unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array) { struct mempolicy *pol = &default_policy; @@ -2544,8 +2544,8 @@ unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp, nid = numa_node_id(); nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid); - return __alloc_pages_bulk(gfp, nid, nodemask, - nr_pages, NULL, page_array); + return alloc_pages_bulk_noprof(gfp, nid, nodemask, + nr_pages, NULL, page_array); } int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3fd273c22749..d3afaf8f983a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4391,7 +4391,7 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, * * Returns the number of pages on the list or array. */ -unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, +unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, struct list_head *page_list, struct page **page_array) @@ -4527,7 +4527,7 @@ failed_irq: pcp_trylock_finish(UP_flags); failed: - page = __alloc_pages(gfp, 0, preferred_nid, nodemask); + page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); if (page) { if (page_list) list_add(&page->lru, page_list); @@ -4538,13 +4538,13 @@ failed: goto out; } -EXPORT_SYMBOL_GPL(__alloc_pages_bulk); +EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); /* * This is the 'heart' of the zoned buddy allocator. */ -struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, - nodemask_t *nodemask) +struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, + int preferred_nid, nodemask_t *nodemask) { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; @@ -4606,38 +4606,38 @@ out: return page; } -EXPORT_SYMBOL(__alloc_pages); +EXPORT_SYMBOL(__alloc_pages_noprof); -struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid, +struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, nodemask_t *nodemask) { - struct page *page = __alloc_pages(gfp | __GFP_COMP, order, + struct page *page = __alloc_pages_noprof(gfp | __GFP_COMP, order, preferred_nid, nodemask); return page_rmappable_folio(page); } -EXPORT_SYMBOL(__folio_alloc); +EXPORT_SYMBOL(__folio_alloc_noprof); /* * Common helper functions. Never use with __GFP_HIGHMEM because the returned * address cannot represent highmem pages. Use alloc_pages and then kmap if * you need to access high mem. */ -unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) +unsigned long get_free_pages_noprof(gfp_t gfp_mask, unsigned int order) { struct page *page; - page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); + page = alloc_pages_noprof(gfp_mask & ~__GFP_HIGHMEM, order); if (!page) return 0; return (unsigned long) page_address(page); } -EXPORT_SYMBOL(__get_free_pages); +EXPORT_SYMBOL(get_free_pages_noprof); -unsigned long get_zeroed_page(gfp_t gfp_mask) +unsigned long get_zeroed_page_noprof(gfp_t gfp_mask) { - return __get_free_page(gfp_mask | __GFP_ZERO); + return get_free_pages_noprof(gfp_mask | __GFP_ZERO, 0); } -EXPORT_SYMBOL(get_zeroed_page); +EXPORT_SYMBOL(get_zeroed_page_noprof); /** * __free_pages - Free pages allocated with alloc_pages(). @@ -4853,7 +4853,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, * * Return: pointer to the allocated area or %NULL in case of error. */ -void *alloc_pages_exact(size_t size, gfp_t gfp_mask) +void *alloc_pages_exact_noprof(size_t size, gfp_t gfp_mask) { unsigned int order = get_order(size); unsigned long addr; @@ -4861,10 +4861,10 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask) if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); - addr = __get_free_pages(gfp_mask, order); + addr = get_free_pages_noprof(gfp_mask, order); return make_alloc_exact(addr, order, size); } -EXPORT_SYMBOL(alloc_pages_exact); +EXPORT_SYMBOL(alloc_pages_exact_noprof); /** * alloc_pages_exact_nid - allocate an exact number of physically-contiguous @@ -4878,7 +4878,7 @@ EXPORT_SYMBOL(alloc_pages_exact); * * Return: pointer to the allocated area or %NULL in case of error. */ -void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) +void * __meminit alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mask) { unsigned int order = get_order(size); struct page *p; @@ -4886,7 +4886,7 @@ void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); - p = alloc_pages_node(nid, gfp_mask, order); + p = alloc_pages_node_noprof(nid, gfp_mask, order); if (!p) return NULL; return make_alloc_exact((unsigned long)page_address(p), order, size); @@ -6343,7 +6343,7 @@ int __alloc_contig_migrate_range(struct compact_control *cc, * pages which PFN is in [start, end) are allocated for the caller and * need to be freed with free_contig_range(). */ -int alloc_contig_range(unsigned long start, unsigned long end, +int alloc_contig_range_noprof(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) { unsigned long outer_start, outer_end; @@ -6467,15 +6467,15 @@ done: undo_isolate_page_range(start, end, migratetype); return ret; } -EXPORT_SYMBOL(alloc_contig_range); +EXPORT_SYMBOL(alloc_contig_range_noprof); static int __alloc_contig_pages(unsigned long start_pfn, unsigned long nr_pages, gfp_t gfp_mask) { unsigned long end_pfn = start_pfn + nr_pages; - return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE, - gfp_mask); + return alloc_contig_range_noprof(start_pfn, end_pfn, MIGRATE_MOVABLE, + gfp_mask); } static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn, @@ -6530,8 +6530,8 @@ static bool zone_spans_last_pfn(const struct zone *zone, * * Return: pointer to contiguous pages on success, or NULL if not successful. */ -struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, - int nid, nodemask_t *nodemask) +struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, + int nid, nodemask_t *nodemask) { unsigned long ret, pfn, flags; struct zonelist *zonelist; -- cgit From be25d1d4e822a703b19043baff916aec6ac4362d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:41 -0700 Subject: mm: create new codetag references during page splitting MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a high-order page is split into smaller ones, each newly split page should get its codetag. After the split each split page will be referencing the original codetag. The codetag's "bytes" counter remains the same because the amount of allocated memory has not changed, however the "calls" counter gets increased to keep the counter correct when these individual pages get freed. Link: https://lkml.kernel.org/r/20240321163705.3067592-20-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 9 +++++++++ include/linux/pgalloc_tag.h | 30 ++++++++++++++++++++++++++++++ mm/huge_memory.c | 2 ++ mm/page_alloc.c | 2 ++ 4 files changed, 43 insertions(+) (limited to 'include/linux') diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 8932187124da..2aea7afa1fab 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -117,6 +117,15 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag this_cpu_inc(tag->counters->calls); } +static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +{ + alloc_tag_add_check(ref, tag); + if (!ref || !tag) + return; + + __alloc_tag_ref_set(ref, tag); +} + static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) { alloc_tag_add_check(ref, tag); diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 66bd021eb46e..093edf98c3d7 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -67,11 +67,41 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) } } +static inline void pgalloc_tag_split(struct page *page, unsigned int nr) +{ + int i; + struct page_ext *page_ext; + union codetag_ref *ref; + struct alloc_tag *tag; + + if (!mem_alloc_profiling_enabled()) + return; + + page_ext = page_ext_get(page); + if (unlikely(!page_ext)) + return; + + ref = codetag_ref_from_page_ext(page_ext); + if (!ref->ct) + goto out; + + tag = ct_to_alloc_tag(ref->ct); + page_ext = page_ext_next(page_ext); + for (i = 1; i < nr; i++) { + /* Set new reference to point to the original tag */ + alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag); + page_ext = page_ext_next(page_ext); + } +out: + page_ext_put(page_ext); +} + #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} +static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/mm/huge_memory.c b/mm/huge_memory.c index dd88bb3ea196..25191ab70631 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include @@ -2946,6 +2947,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, /* Caller disabled irqs, so they are still disabled here */ split_page_owner(head, order, new_order); + pgalloc_tag_split(head, 1 << order); /* See comment in __split_huge_page_tail() */ if (folio_test_anon(folio)) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d3afaf8f983a..e453ee22d489 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2630,6 +2630,7 @@ void split_page(struct page *page, unsigned int order) for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); split_page_owner(page, order, 0); + pgalloc_tag_split(page, 1 << order); split_page_memcg(page, order, 0); } EXPORT_SYMBOL_GPL(split_page); @@ -4827,6 +4828,7 @@ static void *make_alloc_exact(unsigned long addr, unsigned int order, struct page *last = page + nr; split_page_owner(page, order, 0); + pgalloc_tag_split(page, 1 << order); split_page_memcg(page, order, 0); while (page < --last) set_page_refcounted(last); -- cgit From cc92eba1c88b1f74e0f044df2738f4e4b22f1e4e Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:42 -0700 Subject: mm: fix non-compound multi-order memory accounting in __free_pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a non-compound multi-order page is freed, it is possible that a speculative reference keeps the page pinned. In this case we free all pages except for the first page, which will be freed later by the last put_page(). However the page passed to put_page() is indistinguishable from an order-0 page, so it cannot do the accounting, just as it cannot free the subsequent pages. Do the accounting here, where we free the pages. Link: https://lkml.kernel.org/r/20240321163705.3067592-21-surenb@google.com Reported-by: Vlastimil Babka Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/pgalloc_tag.h | 24 ++++++++++++++++++++++++ mm/page_alloc.c | 5 ++++- 2 files changed, 28 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 093edf98c3d7..50d212330bbb 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -96,12 +96,36 @@ out: page_ext_put(page_ext); } +static inline struct alloc_tag *pgalloc_tag_get(struct page *page) +{ + struct alloc_tag *tag = NULL; + + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + alloc_tag_sub_check(ref); + if (ref && ref->ct) + tag = ct_to_alloc_tag(ref->ct); + put_page_tag_ref(ref); + } + + return tag; +} + +static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) +{ + if (mem_alloc_profiling_enabled() && tag) + this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); +} + #else /* CONFIG_MEM_ALLOC_PROFILING */ static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {} +static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } +static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e453ee22d489..e1241ecef271 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4664,12 +4664,15 @@ void __free_pages(struct page *page, unsigned int order) { /* get PageHead before we drop reference */ int head = PageHead(page); + struct alloc_tag *tag = pgalloc_tag_get(page); if (put_page_testzero(page)) free_the_page(page, order); - else if (!head) + else if (!head) { + pgalloc_tag_sub_pages(tag, (1 << order) - 1); while (order-- > 0) free_the_page(page + (1 << order), order); + } } EXPORT_SYMBOL(__free_pages); -- cgit From c789b5fe38f386bbdec8c9c41e9af52d3775e16c Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:44 -0700 Subject: lib: add codetag reference into slabobj_ext MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To store code tag for every slab object, a codetag reference is embedded into slabobj_ext when CONFIG_MEM_ALLOC_PROFILING=y. Link: https://lkml.kernel.org/r/20240321163705.3067592-23-surenb@google.com Signed-off-by: Suren Baghdasaryan Co-developed-by: Kent Overstreet Signed-off-by: Kent Overstreet Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 5 +++++ lib/Kconfig.debug | 1 + 2 files changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 12afc2647cf0..24a6df30be49 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1653,7 +1653,12 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, * if MEMCG_DATA_OBJEXTS is set. */ struct slabobj_ext { +#ifdef CONFIG_MEMCG_KMEM struct obj_cgroup *objcg; +#endif +#ifdef CONFIG_MEM_ALLOC_PROFILING + union codetag_ref ref; +#endif } __aligned(8); static inline void __inc_lruvec_kmem_state(void *p, enum node_stat_item idx) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index a8f77de5e8ea..9d2685ec4592 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -979,6 +979,7 @@ config MEM_ALLOC_PROFILING depends on !DEBUG_FORCE_WEAK_PER_CPU select CODE_TAGGING select PAGE_EXTENSION + select SLAB_OBJ_EXT help Track allocation source code and record total allocation size initiated at that code location. The mechanism can be used to track -- cgit From 7bd230a26648ac68ab3731ebbc449090f0ac6a37 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:47 -0700 Subject: mm/slab: enable slab allocation tagging for kmalloc and friends MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Redefine kmalloc, krealloc, kzalloc, kcalloc, etc. to record allocations and deallocations done by these functions. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-7-surenb@google.com [rdunlap@infradead.org: fix kcalloc() kernel-doc warnings] Link: https://lkml.kernel.org/r/20240327044649.9199-1-rdunlap@infradead.org Link: https://lkml.kernel.org/r/20240321163705.3067592-26-surenb@google.com Signed-off-by: Suren Baghdasaryan Co-developed-by: Kent Overstreet Signed-off-by: Kent Overstreet Signed-off-by: Randy Dunlap Reviewed-by: Kees Cook Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/fortify-string.h | 5 +- include/linux/slab.h | 169 +++++++++++++++++++++-------------------- include/linux/string.h | 4 +- mm/slab_common.c | 6 +- mm/slub.c | 50 ++++++------ mm/util.c | 16 ++-- 6 files changed, 127 insertions(+), 123 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 6aeebe0a6777..b24d62bad0b3 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -725,9 +725,9 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size) return __real_memchr_inv(p, c, size); } -extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup) +extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof) __realloc_size(2); -__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp) +__FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp) { const size_t p_size = __struct_size(p); @@ -737,6 +737,7 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL); return __real_kmemdup(p, size, gfp); } +#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__)) /** * strcpy - Copy a string into another string buffer diff --git a/include/linux/slab.h b/include/linux/slab.h index 68ff754b85a4..c0be1cd03cf6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -271,7 +271,10 @@ int kmem_cache_shrink(struct kmem_cache *s); /* * Common kmalloc functions provided by all allocators */ -void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2); +void * __must_check krealloc_noprof(const void *objp, size_t new_size, + gfp_t flags) __realloc_size(2); +#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__)) + void kfree(const void *objp); void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); @@ -523,7 +526,10 @@ static __always_inline unsigned int __kmalloc_index(size_t size, static_assert(PAGE_SHIFT <= 20); #define kmalloc_index(s) __kmalloc_index(s, true) -void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); +#include + +void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1); +#define __kmalloc(...) alloc_hooks(__kmalloc_noprof(__VA_ARGS__)) /** * kmem_cache_alloc - Allocate an object @@ -535,9 +541,14 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz * * Return: pointer to the new object or %NULL in case of error */ -void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc; -void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, - gfp_t gfpflags) __assume_slab_alignment __malloc; +void *kmem_cache_alloc_noprof(struct kmem_cache *cachep, + gfp_t flags) __assume_slab_alignment __malloc; +#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) + +void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, + gfp_t gfpflags) __assume_slab_alignment __malloc; +#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) + void kmem_cache_free(struct kmem_cache *s, void *objp); /* @@ -548,29 +559,40 @@ void kmem_cache_free(struct kmem_cache *s, void *objp); * Note that interrupts must be enabled when calling these functions. */ void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); -int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p); + +int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p); +#define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__)) static __always_inline void kfree_bulk(size_t size, void **p) { kmem_cache_free_bulk(NULL, size, p); } -void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment +void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __alloc_size(1); -void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment - __malloc; +#define __kmalloc_node(...) alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__)) + +void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, + int node) __assume_slab_alignment __malloc; +#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) -void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size) +void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size) __assume_kmalloc_alignment __alloc_size(3); -void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, - int node, size_t size) __assume_kmalloc_alignment +void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, + int node, size_t size) __assume_kmalloc_alignment __alloc_size(4); -void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment +#define kmalloc_trace(...) alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__)) + +#define kmalloc_node_trace(...) alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__)) + +void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment __alloc_size(1); +#define kmalloc_large(...) alloc_hooks(kmalloc_large_noprof(__VA_ARGS__)) -void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment +void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment __alloc_size(1); +#define kmalloc_large_node(...) alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__)) /** * kmalloc - allocate kernel memory @@ -626,37 +648,39 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align * Try really hard to succeed the allocation but fail * eventually. */ -static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags) +static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags) { if (__builtin_constant_p(size) && size) { unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large(size, flags); + return kmalloc_large_noprof(size, flags); index = kmalloc_index(size); - return kmalloc_trace( + return kmalloc_trace_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, size); } - return __kmalloc(size, flags); + return __kmalloc_noprof(size, flags); } +#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__)) -static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node) +static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node) { if (__builtin_constant_p(size) && size) { unsigned int index; if (size > KMALLOC_MAX_CACHE_SIZE) - return kmalloc_large_node(size, flags, node); + return kmalloc_large_node_noprof(size, flags, node); index = kmalloc_index(size); - return kmalloc_node_trace( + return kmalloc_node_trace_noprof( kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], flags, node, size); } - return __kmalloc_node(size, flags, node); + return __kmalloc_node_noprof(size, flags, node); } +#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) /** * kmalloc_array - allocate memory for an array. @@ -664,16 +688,17 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) - return kmalloc(bytes, flags); - return __kmalloc(bytes, flags); + return kmalloc_noprof(bytes, flags); + return kmalloc_noprof(bytes, flags); } +#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__)) /** * krealloc_array - reallocate memory for an array. @@ -682,18 +707,19 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_ * @new_size: new size of a single member of the array * @flags: the type of memory to allocate (see kmalloc) */ -static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p, - size_t new_n, - size_t new_size, - gfp_t flags) +static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p, + size_t new_n, + size_t new_size, + gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) return NULL; - return krealloc(p, bytes, flags); + return krealloc_noprof(p, bytes, flags); } +#define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__)) /** * kcalloc - allocate memory for an array. The memory is set to zero. @@ -701,16 +727,12 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p, * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags) -{ - return kmalloc_array(n, size, flags | __GFP_ZERO); -} +#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO) -void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, +void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node, unsigned long caller) __alloc_size(1); -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node_track_caller(size, flags, node, \ - _RET_IP_) +#define kmalloc_node_track_caller(...) \ + alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) /* * kmalloc_track_caller is a special version of kmalloc that records the @@ -720,11 +742,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node, * allocator where we care about the real place the memory allocation * request comes from. */ -#define kmalloc_track_caller(size, flags) \ - __kmalloc_node_track_caller(size, flags, \ - NUMA_NO_NODE, _RET_IP_) +#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) -static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, +static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) { size_t bytes; @@ -732,75 +752,56 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; if (__builtin_constant_p(n) && __builtin_constant_p(size)) - return kmalloc_node(bytes, flags, node); - return __kmalloc_node(bytes, flags, node); + return kmalloc_node_noprof(bytes, flags, node); + return __kmalloc_node_noprof(bytes, flags, node); } +#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) -static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) -{ - return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); -} +#define kcalloc_node(_n, _size, _flags, _node) \ + kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node) /* * Shortcuts */ -static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) -{ - return kmem_cache_alloc(k, flags | __GFP_ZERO); -} +#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO) /** * kzalloc - allocate memory. The memory is set to zero. * @size: how many bytes of memory are required. * @flags: the type of memory to allocate (see kmalloc). */ -static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags) +static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags) { - return kmalloc(size, flags | __GFP_ZERO); + return kmalloc_noprof(size, flags | __GFP_ZERO); } +#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__)) +#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) -/** - * kzalloc_node - allocate zeroed memory from a particular memory node. - * @size: how many bytes of memory are required. - * @flags: the type of memory to allocate (see kmalloc). - * @node: memory node from which to allocate - */ -static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node) -{ - return kmalloc_node(size, flags | __GFP_ZERO, node); -} +extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_size(1); +#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) -extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1); -static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags) -{ - return kvmalloc_node(size, flags, NUMA_NO_NODE); -} -static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node) -{ - return kvmalloc_node(size, flags | __GFP_ZERO, node); -} -static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags) -{ - return kvmalloc(size, flags | __GFP_ZERO); -} +#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) +#define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO) + +#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, _flags|__GFP_ZERO, _node) -static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags) +static inline __alloc_size(1, 2) void *kvmalloc_array_noprof(size_t n, size_t size, gfp_t flags) { size_t bytes; if (unlikely(check_mul_overflow(n, size, &bytes))) return NULL; - return kvmalloc(bytes, flags); + return kvmalloc_node_noprof(bytes, flags, NUMA_NO_NODE); } -static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags) -{ - return kvmalloc_array(n, size, flags | __GFP_ZERO); -} +#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) +#define kvcalloc(_n, _size, _flags) kvmalloc_array(_n, _size, _flags|__GFP_ZERO) -extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) +extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); +#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__)) + extern void kvfree(const void *addr); DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T)) diff --git a/include/linux/string.h b/include/linux/string.h index 9ba8b4597009..793c27ad7c0d 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -282,7 +282,9 @@ extern void kfree_const(const void *x); extern char *kstrdup(const char *s, gfp_t gfp) __malloc; extern const char *kstrdup_const(const char *s, gfp_t gfp); extern char *kstrndup(const char *s, size_t len, gfp_t gfp); -extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); +extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_size(2); +#define kmemdup(...) alloc_hooks(kmemdup_noprof(__VA_ARGS__)) + extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp); diff --git a/mm/slab_common.c b/mm/slab_common.c index f5234672f03c..3179a6aeffc5 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -1189,7 +1189,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags) return (void *)p; } - ret = kmalloc_track_caller(new_size, flags); + ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_); if (ret && p) { /* Disable KASAN checks as the object's redzone is accessed. */ kasan_disable_current(); @@ -1213,7 +1213,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags) * * Return: pointer to the allocated memory or %NULL in case of error */ -void *krealloc(const void *p, size_t new_size, gfp_t flags) +void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags) { void *ret; @@ -1228,7 +1228,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags) return ret; } -EXPORT_SYMBOL(krealloc); +EXPORT_SYMBOL(krealloc_noprof); /** * kfree_sensitive - Clear sensitive information in memory before freeing diff --git a/mm/slub.c b/mm/slub.c index 5840ab963319..0fc891d44cff 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4002,7 +4002,7 @@ out: return object; } -void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) +void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags) { void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, s->object_size); @@ -4011,9 +4011,9 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) return ret; } -EXPORT_SYMBOL(kmem_cache_alloc); +EXPORT_SYMBOL(kmem_cache_alloc_noprof); -void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, +void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) { void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_, @@ -4023,7 +4023,7 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru, return ret; } -EXPORT_SYMBOL(kmem_cache_alloc_lru); +EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof); /** * kmem_cache_alloc_node - Allocate an object on the specified node @@ -4038,7 +4038,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru); * * Return: pointer to the new object or %NULL in case of error */ -void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) +void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node) { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size); @@ -4046,7 +4046,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) return ret; } -EXPORT_SYMBOL(kmem_cache_alloc_node); +EXPORT_SYMBOL(kmem_cache_alloc_node_noprof); /* * To avoid unnecessary overhead, we pass through large allocation requests @@ -4063,7 +4063,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) flags = kmalloc_fix_flags(flags); flags |= __GFP_COMP; - folio = (struct folio *)alloc_pages_node(node, flags, order); + folio = (struct folio *)alloc_pages_node_noprof(node, flags, order); if (folio) { ptr = folio_address(folio); lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, @@ -4078,7 +4078,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node) return ptr; } -void *kmalloc_large(size_t size, gfp_t flags) +void *kmalloc_large_noprof(size_t size, gfp_t flags) { void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE); @@ -4086,9 +4086,9 @@ void *kmalloc_large(size_t size, gfp_t flags) flags, NUMA_NO_NODE); return ret; } -EXPORT_SYMBOL(kmalloc_large); +EXPORT_SYMBOL(kmalloc_large_noprof); -void *kmalloc_large_node(size_t size, gfp_t flags, int node) +void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) { void *ret = __kmalloc_large_node(size, flags, node); @@ -4096,7 +4096,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) flags, node); return ret; } -EXPORT_SYMBOL(kmalloc_large_node); +EXPORT_SYMBOL(kmalloc_large_node_noprof); static __always_inline void *__do_kmalloc_node(size_t size, gfp_t flags, int node, @@ -4123,26 +4123,26 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node, return ret; } -void *__kmalloc_node(size_t size, gfp_t flags, int node) +void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, _RET_IP_); } -EXPORT_SYMBOL(__kmalloc_node); +EXPORT_SYMBOL(__kmalloc_node_noprof); -void *__kmalloc(size_t size, gfp_t flags) +void *__kmalloc_noprof(size_t size, gfp_t flags) { return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_); } -EXPORT_SYMBOL(__kmalloc); +EXPORT_SYMBOL(__kmalloc_noprof); -void *__kmalloc_node_track_caller(size_t size, gfp_t flags, - int node, unsigned long caller) +void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, + int node, unsigned long caller) { return __do_kmalloc_node(size, flags, node, caller); } -EXPORT_SYMBOL(__kmalloc_node_track_caller); +EXPORT_SYMBOL(kmalloc_node_track_caller_noprof); -void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) +void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size) { void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_, size); @@ -4152,9 +4152,9 @@ void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } -EXPORT_SYMBOL(kmalloc_trace); +EXPORT_SYMBOL(kmalloc_trace_noprof); -void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, +void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, int node, size_t size) { void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size); @@ -4164,7 +4164,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags, ret = kasan_kmalloc(s, ret, size, gfpflags); return ret; } -EXPORT_SYMBOL(kmalloc_node_trace); +EXPORT_SYMBOL(kmalloc_node_trace_noprof); static noinline void free_to_partial_list( struct kmem_cache *s, struct slab *slab, @@ -4769,8 +4769,8 @@ error: #endif /* CONFIG_SLUB_TINY */ /* Note that interrupts must be enabled when calling this function. */ -int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, - void **p) +int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, + void **p) { int i; struct obj_cgroup *objcg = NULL; @@ -4798,7 +4798,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, return i; } -EXPORT_SYMBOL(kmem_cache_alloc_bulk); +EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof); /* diff --git a/mm/util.c b/mm/util.c index 669397235787..cee3563d2071 100644 --- a/mm/util.c +++ b/mm/util.c @@ -124,16 +124,16 @@ EXPORT_SYMBOL(kstrndup); * Return: newly allocated copy of @src or %NULL in case of error, * result is physically contiguous. Use kfree() to free. */ -void *kmemdup(const void *src, size_t len, gfp_t gfp) +void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) { void *p; - p = kmalloc_track_caller(len, gfp); + p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_); if (p) memcpy(p, src, len); return p; } -EXPORT_SYMBOL(kmemdup); +EXPORT_SYMBOL(kmemdup_noprof); /** * kmemdup_array - duplicate a given array. @@ -609,7 +609,7 @@ EXPORT_SYMBOL(vm_mmap); * * Return: pointer to the allocated memory of %NULL in case of failure */ -void *kvmalloc_node(size_t size, gfp_t flags, int node) +void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) { gfp_t kmalloc_flags = flags; void *ret; @@ -631,7 +631,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) kmalloc_flags &= ~__GFP_NOFAIL; } - ret = kmalloc_node(size, kmalloc_flags, node); + ret = kmalloc_node_noprof(size, kmalloc_flags, node); /* * It doesn't really make sense to fallback to vmalloc for sub page @@ -660,7 +660,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0)); } -EXPORT_SYMBOL(kvmalloc_node); +EXPORT_SYMBOL(kvmalloc_node_noprof); /** * kvfree() - Free memory. @@ -699,7 +699,7 @@ void kvfree_sensitive(const void *addr, size_t len) } EXPORT_SYMBOL(kvfree_sensitive); -void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) +void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) { void *newp; @@ -712,7 +712,7 @@ void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) kvfree(p); return newp; } -EXPORT_SYMBOL(kvrealloc); +EXPORT_SYMBOL(kvrealloc_noprof); /** * __vmalloc_array - allocate memory for a virtually contiguous array. -- cgit From e26d8769da6d4c7319fdd4ddb497caa7d87a6c5c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 21 Mar 2024 09:36:48 -0700 Subject: mempool: hook up to memory allocation profiling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds hooks to mempools for correctly annotating mempool-backed allocations at the correct source line, so they show up correctly in /sys/kernel/debug/allocations. Various inline functions are converted to wrappers so that we can invoke alloc_hooks() in fewer places. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-4-surenb@google.com [surenb@google.com: add missing mempool_create_node documentation] Link: https://lkml.kernel.org/r/20240402180835.1661905-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-27-surenb@google.com Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/mempool.h | 73 ++++++++++++++++++++++++------------------------- mm/mempool.c | 36 ++++++++++-------------- 2 files changed, 50 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mempool.h b/include/linux/mempool.h index 16c5cc807ff6..7b151441341b 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h @@ -5,6 +5,8 @@ #ifndef _LINUX_MEMPOOL_H #define _LINUX_MEMPOOL_H +#include +#include #include #include @@ -39,18 +41,32 @@ void mempool_exit(mempool_t *pool); int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int node_id); -int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + +int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); +#define mempool_init(...) \ + alloc_hooks(mempool_init_noprof(__VA_ARGS__)) extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data); -extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, + +extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, gfp_t gfp_mask, int nid); +#define mempool_create_node(...) \ + alloc_hooks(mempool_create_node_noprof(__VA_ARGS__)) + +#define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \ + mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \ + GFP_KERNEL, NUMA_NO_NODE) extern int mempool_resize(mempool_t *pool, int new_min_nr); extern void mempool_destroy(mempool_t *pool); -extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; + +extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; +#define mempool_alloc(...) \ + alloc_hooks(mempool_alloc_noprof(__VA_ARGS__)) + extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc; extern void mempool_free(void *element, mempool_t *pool); @@ -62,19 +78,10 @@ extern void mempool_free(void *element, mempool_t *pool); void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); void mempool_free_slab(void *element, void *pool_data); -static inline int -mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc) -{ - return mempool_init(pool, min_nr, mempool_alloc_slab, - mempool_free_slab, (void *) kc); -} - -static inline mempool_t * -mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) -{ - return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, - (void *) kc); -} +#define mempool_init_slab_pool(_pool, _min_nr, _kc) \ + mempool_init(_pool, (_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc)) +#define mempool_create_slab_pool(_min_nr, _kc) \ + mempool_create((_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc)) /* * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the @@ -83,17 +90,12 @@ mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); void mempool_kfree(void *element, void *pool_data); -static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size) -{ - return mempool_init(pool, min_nr, mempool_kmalloc, - mempool_kfree, (void *) size); -} - -static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) -{ - return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, - (void *) size); -} +#define mempool_init_kmalloc_pool(_pool, _min_nr, _size) \ + mempool_init(_pool, (_min_nr), mempool_kmalloc, mempool_kfree, \ + (void *)(unsigned long)(_size)) +#define mempool_create_kmalloc_pool(_min_nr, _size) \ + mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \ + (void *)(unsigned long)(_size)) void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data); void mempool_kvfree(void *element, void *pool_data); @@ -115,16 +117,11 @@ static inline mempool_t *mempool_create_kvmalloc_pool(int min_nr, size_t size) void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); void mempool_free_pages(void *element, void *pool_data); -static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) -{ - return mempool_init(pool, min_nr, mempool_alloc_pages, - mempool_free_pages, (void *)(long)order); -} - -static inline mempool_t *mempool_create_page_pool(int min_nr, int order) -{ - return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, - (void *)(long)order); -} +#define mempool_init_page_pool(_pool, _min_nr, _order) \ + mempool_init(_pool, (_min_nr), mempool_alloc_pages, \ + mempool_free_pages, (void *)(long)(_order)) +#define mempool_create_page_pool(_min_nr, _order) \ + mempool_create((_min_nr), mempool_alloc_pages, \ + mempool_free_pages, (void *)(long)(_order)) #endif /* _LINUX_MEMPOOL_H */ diff --git a/mm/mempool.c b/mm/mempool.c index 076c736f5f1f..6ece63a00acf 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -240,22 +240,24 @@ EXPORT_SYMBOL(mempool_init_node); * * Return: %0 on success, negative error code otherwise. */ -int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data) +int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data) { return mempool_init_node(pool, min_nr, alloc_fn, free_fn, pool_data, GFP_KERNEL, NUMA_NO_NODE); } -EXPORT_SYMBOL(mempool_init); +EXPORT_SYMBOL(mempool_init_noprof); /** - * mempool_create - create a memory pool + * mempool_create_node - create a memory pool * @min_nr: the minimum number of elements guaranteed to be * allocated for this pool. * @alloc_fn: user-defined element-allocation function. * @free_fn: user-defined element-freeing function. * @pool_data: optional private data available to the user-defined functions. + * @gfp_mask: memory allocation flags + * @node_id: numa node to allocate on * * this function creates and allocates a guaranteed size, preallocated * memory pool. The pool can be used from the mempool_alloc() and mempool_free() @@ -265,17 +267,9 @@ EXPORT_SYMBOL(mempool_init); * * Return: pointer to the created memory pool object or %NULL on error. */ -mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data) -{ - return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data, - GFP_KERNEL, NUMA_NO_NODE); -} -EXPORT_SYMBOL(mempool_create); - -mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, - mempool_free_t *free_fn, void *pool_data, - gfp_t gfp_mask, int node_id) +mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, + mempool_free_t *free_fn, void *pool_data, + gfp_t gfp_mask, int node_id) { mempool_t *pool; @@ -291,7 +285,7 @@ mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, return pool; } -EXPORT_SYMBOL(mempool_create_node); +EXPORT_SYMBOL(mempool_create_node_noprof); /** * mempool_resize - resize an existing memory pool @@ -387,7 +381,7 @@ EXPORT_SYMBOL(mempool_resize); * * Return: pointer to the allocated element or %NULL on error. */ -void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) +void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; @@ -454,7 +448,7 @@ repeat_alloc: finish_wait(&pool->wait, &wait); goto repeat_alloc; } -EXPORT_SYMBOL(mempool_alloc); +EXPORT_SYMBOL(mempool_alloc_noprof); /** * mempool_alloc_preallocated - allocate an element from preallocated elements @@ -562,7 +556,7 @@ void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) { struct kmem_cache *mem = pool_data; VM_BUG_ON(mem->ctor); - return kmem_cache_alloc(mem, gfp_mask); + return kmem_cache_alloc_noprof(mem, gfp_mask); } EXPORT_SYMBOL(mempool_alloc_slab); @@ -580,7 +574,7 @@ EXPORT_SYMBOL(mempool_free_slab); void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) { size_t size = (size_t)pool_data; - return kmalloc(size, gfp_mask); + return kmalloc_noprof(size, gfp_mask); } EXPORT_SYMBOL(mempool_kmalloc); @@ -610,7 +604,7 @@ EXPORT_SYMBOL(mempool_kvfree); void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) { int order = (int)(long)pool_data; - return alloc_pages(gfp_mask, order); + return alloc_pages_noprof(gfp_mask, order); } EXPORT_SYMBOL(mempool_alloc_pages); -- cgit From 24e44cc22aa3112082f2ee23137d048c73ca96d5 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:51 -0700 Subject: mm: percpu: enable per-cpu allocation tagging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Redefine __alloc_percpu, __alloc_percpu_gfp and __alloc_reserved_percpu to record allocations and deallocations done by these functions. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-6-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-30-surenb@google.com Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/percpu.h | 23 +++++++++++++------ mm/percpu.c | 62 +++++--------------------------------------------- 2 files changed, 22 insertions(+), 63 deletions(-) (limited to 'include/linux') diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9dfa7b526e70..13a82f11e4fd 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -2,6 +2,7 @@ #ifndef __LINUX_PERCPU_H #define __LINUX_PERCPU_H +#include #include #include #include @@ -9,6 +10,7 @@ #include #include #include +#include #include @@ -125,7 +127,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn); #endif -extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1); extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); @@ -133,14 +134,16 @@ extern bool is_kernel_percpu_address(unsigned long addr); extern void __init setup_per_cpu_areas(void); #endif -extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); -extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); -extern void free_percpu(void __percpu *__pdata); +extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, + gfp_t gfp) __alloc_size(1); extern size_t pcpu_alloc_size(void __percpu *__pdata); -DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) - -extern phys_addr_t per_cpu_ptr_to_phys(void *addr); +#define __alloc_percpu_gfp(_size, _align, _gfp) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp)) +#define __alloc_percpu(_size, _align) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, false, GFP_KERNEL)) +#define __alloc_reserved_percpu(_size, _align) \ + alloc_hooks(pcpu_alloc_noprof(_size, _align, true, GFP_KERNEL)) #define alloc_percpu_gfp(type, gfp) \ (typeof(type) __percpu *)__alloc_percpu_gfp(sizeof(type), \ @@ -149,6 +152,12 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +extern void free_percpu(void __percpu *__pdata); + +DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) + +extern phys_addr_t per_cpu_ptr_to_phys(void *addr); + extern unsigned long pcpu_nr_pages(void); #endif /* __LINUX_PERCPU_H */ diff --git a/mm/percpu.c b/mm/percpu.c index 90e9e4004ac9..474e3683b74d 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1740,7 +1740,7 @@ static void pcpu_alloc_tag_free_hook(struct pcpu_chunk *chunk, int off, size_t s * RETURNS: * Percpu pointer to the allocated area on success, NULL on failure. */ -static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, +void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, gfp_t gfp) { gfp_t pcpu_gfp; @@ -1907,6 +1907,8 @@ area_found: pcpu_memcg_post_alloc_hook(objcg, chunk, off, size); + pcpu_alloc_tag_alloc_hook(chunk, off, size); + return ptr; fail_unlock: @@ -1935,61 +1937,7 @@ fail: return NULL; } - -/** - * __alloc_percpu_gfp - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * @gfp: allocation flags - * - * Allocate zero-filled percpu area of @size bytes aligned at @align. If - * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can - * be called from any context but is a lot more likely to fail. If @gfp - * has __GFP_NOWARN then no warning will be triggered on invalid or failed - * allocation requests. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) -{ - return pcpu_alloc(size, align, false, gfp); -} -EXPORT_SYMBOL_GPL(__alloc_percpu_gfp); - -/** - * __alloc_percpu - allocate dynamic percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL). - */ -void __percpu *__alloc_percpu(size_t size, size_t align) -{ - return pcpu_alloc(size, align, false, GFP_KERNEL); -} -EXPORT_SYMBOL_GPL(__alloc_percpu); - -/** - * __alloc_reserved_percpu - allocate reserved percpu area - * @size: size of area to allocate in bytes - * @align: alignment of area (max PAGE_SIZE) - * - * Allocate zero-filled percpu area of @size bytes aligned at @align - * from reserved percpu area if arch has set it up; otherwise, - * allocation is served from the same dynamic area. Might sleep. - * Might trigger writeouts. - * - * CONTEXT: - * Does GFP_KERNEL allocation. - * - * RETURNS: - * Percpu pointer to the allocated area on success, NULL on failure. - */ -void __percpu *__alloc_reserved_percpu(size_t size, size_t align) -{ - return pcpu_alloc(size, align, true, GFP_KERNEL); -} +EXPORT_SYMBOL_GPL(pcpu_alloc_noprof); /** * pcpu_balance_free - manage the amount of free chunks @@ -2328,6 +2276,8 @@ void free_percpu(void __percpu *ptr) spin_lock_irqsave(&pcpu_lock, flags); size = pcpu_free_area(chunk, off); + pcpu_alloc_tag_free_hook(chunk, off, size); + pcpu_memcg_free_hook(chunk, off, size); /* -- cgit From 88ae5fb755b0d45f790b392f578924d1ca5693a7 Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 21 Mar 2024 09:36:52 -0700 Subject: mm: vmalloc: enable memory allocation profiling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This wrapps all external vmalloc allocation functions with the alloc_hooks() wrapper, and switches internal allocations to _noprof variants where appropriate, for the new memory allocation profiling feature. [surenb@google.com: arch/um: fix forward declaration for vmalloc] Link: https://lkml.kernel.org/r/20240326073750.726636-1-surenb@google.com [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-5-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-31-surenb@google.com Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- arch/um/include/shared/um_malloc.h | 3 +- drivers/staging/media/atomisp/pci/hmm/hmm.c | 2 +- include/linux/vmalloc.h | 60 +++++++++++++++++------ kernel/kallsyms_selftest.c | 2 +- mm/nommu.c | 56 +++++++++++----------- mm/util.c | 18 +++---- mm/vmalloc.c | 74 ++++++++++++++--------------- 7 files changed, 123 insertions(+), 92 deletions(-) (limited to 'include/linux') diff --git a/arch/um/include/shared/um_malloc.h b/arch/um/include/shared/um_malloc.h index 13da93284c2c..bf503658f08e 100644 --- a/arch/um/include/shared/um_malloc.h +++ b/arch/um/include/shared/um_malloc.h @@ -11,7 +11,8 @@ extern void *uml_kmalloc(int size, int flags); extern void kfree(const void *ptr); -extern void *vmalloc(unsigned long size); +extern void *vmalloc_noprof(unsigned long size); +#define vmalloc(...) vmalloc_noprof(__VA_ARGS__) extern void vfree(void *ptr); #endif /* __UM_MALLOC_H__ */ diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c index bb12644fd033..3e2899ad8517 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c @@ -205,7 +205,7 @@ static ia_css_ptr __hmm_alloc(size_t bytes, enum hmm_bo_type type, } dev_dbg(atomisp_dev, "pages: 0x%08x (%zu bytes), type: %d, vmalloc %p\n", - bo->start, bytes, type, vmalloc); + bo->start, bytes, type, vmalloc_noprof); return bo->start; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 98ea90e90439..e4a631ec430b 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -2,6 +2,8 @@ #ifndef _LINUX_VMALLOC_H #define _LINUX_VMALLOC_H +#include +#include #include #include #include @@ -138,26 +140,54 @@ extern unsigned long vmalloc_nr_pages(void); static inline unsigned long vmalloc_nr_pages(void) { return 0; } #endif -extern void *vmalloc(unsigned long size) __alloc_size(1); -extern void *vzalloc(unsigned long size) __alloc_size(1); -extern void *vmalloc_user(unsigned long size) __alloc_size(1); -extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1); -extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1); -extern void *vmalloc_32(unsigned long size) __alloc_size(1); -extern void *vmalloc_32_user(unsigned long size) __alloc_size(1); -extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1); -extern void *__vmalloc_node_range(unsigned long size, unsigned long align, +extern void *vmalloc_noprof(unsigned long size) __alloc_size(1); +#define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__)) + +extern void *vzalloc_noprof(unsigned long size) __alloc_size(1); +#define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__)) + +extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1); +#define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__)) + +extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1); +#define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__)) + +extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1); +#define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__)) + +extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1); +#define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__)) + +extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1); +#define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__)) + +extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); +#define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__)) + +extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) __alloc_size(1); -void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, +#define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__)) + +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) __alloc_size(1); -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1); +#define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__)) + +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1); +#define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__)) + +extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +#define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__)) + +extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2); +#define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__)) + +extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); +#define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__)) -extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); -extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2); -extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2); -extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2); +extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); +#define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/kernel/kallsyms_selftest.c b/kernel/kallsyms_selftest.c index 8a689b4ff4f9..2f84896a7bcb 100644 --- a/kernel/kallsyms_selftest.c +++ b/kernel/kallsyms_selftest.c @@ -82,7 +82,7 @@ static struct test_item test_items[] = { ITEM_FUNC(kallsyms_test_func_static), ITEM_FUNC(kallsyms_test_func), ITEM_FUNC(kallsyms_test_func_weak), - ITEM_FUNC(vmalloc), + ITEM_FUNC(vmalloc_noprof), ITEM_FUNC(vfree), #ifdef CONFIG_KALLSYMS_ALL ITEM_DATA(kallsyms_test_var_bss_static), diff --git a/mm/nommu.c b/mm/nommu.c index 5ec8f44e7ce9..88b646ea6d30 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -137,28 +137,28 @@ void vfree(const void *addr) } EXPORT_SYMBOL(vfree); -void *__vmalloc(unsigned long size, gfp_t gfp_mask) +void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) { /* * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc() * returns only a logical address. */ - return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); + return kmalloc_noprof(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); } -EXPORT_SYMBOL(__vmalloc); +EXPORT_SYMBOL(__vmalloc_noprof); -void *__vmalloc_node_range(unsigned long size, unsigned long align, +void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) { - return __vmalloc(size, gfp_mask); + return __vmalloc_noprof(size, gfp_mask); } -void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask, +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) { - return __vmalloc(size, gfp_mask); + return __vmalloc_noprof(size, gfp_mask); } static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) @@ -179,11 +179,11 @@ static void *__vmalloc_user_flags(unsigned long size, gfp_t flags) return ret; } -void *vmalloc_user(unsigned long size) +void *vmalloc_user_noprof(unsigned long size) { return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO); } -EXPORT_SYMBOL(vmalloc_user); +EXPORT_SYMBOL(vmalloc_user_noprof); struct page *vmalloc_to_page(const void *addr) { @@ -217,13 +217,13 @@ long vread_iter(struct iov_iter *iter, const char *addr, size_t count) * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ -void *vmalloc(unsigned long size) +void *vmalloc_noprof(unsigned long size) { - return __vmalloc(size, GFP_KERNEL); + return __vmalloc_noprof(size, GFP_KERNEL); } -EXPORT_SYMBOL(vmalloc); +EXPORT_SYMBOL(vmalloc_noprof); -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc); +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc_noprof); /* * vzalloc - allocate virtually contiguous memory with zero fill @@ -237,11 +237,11 @@ void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc) * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ -void *vzalloc(unsigned long size) +void *vzalloc_noprof(unsigned long size) { - return __vmalloc(size, GFP_KERNEL | __GFP_ZERO); + return __vmalloc_noprof(size, GFP_KERNEL | __GFP_ZERO); } -EXPORT_SYMBOL(vzalloc); +EXPORT_SYMBOL(vzalloc_noprof); /** * vmalloc_node - allocate memory on a specific node @@ -254,11 +254,11 @@ EXPORT_SYMBOL(vzalloc); * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ -void *vmalloc_node(unsigned long size, int node) +void *vmalloc_node_noprof(unsigned long size, int node) { - return vmalloc(size); + return vmalloc_noprof(size); } -EXPORT_SYMBOL(vmalloc_node); +EXPORT_SYMBOL(vmalloc_node_noprof); /** * vzalloc_node - allocate memory on a specific node with zero fill @@ -272,11 +272,11 @@ EXPORT_SYMBOL(vmalloc_node); * For tight control over page level allocator and protection flags * use __vmalloc() instead. */ -void *vzalloc_node(unsigned long size, int node) +void *vzalloc_node_noprof(unsigned long size, int node) { - return vzalloc(size); + return vzalloc_noprof(size); } -EXPORT_SYMBOL(vzalloc_node); +EXPORT_SYMBOL(vzalloc_node_noprof); /** * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) @@ -285,11 +285,11 @@ EXPORT_SYMBOL(vzalloc_node); * Allocate enough 32bit PA addressable pages to cover @size from the * page level allocator and map them into contiguous kernel virtual space. */ -void *vmalloc_32(unsigned long size) +void *vmalloc_32_noprof(unsigned long size) { - return __vmalloc(size, GFP_KERNEL); + return __vmalloc_noprof(size, GFP_KERNEL); } -EXPORT_SYMBOL(vmalloc_32); +EXPORT_SYMBOL(vmalloc_32_noprof); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory @@ -301,15 +301,15 @@ EXPORT_SYMBOL(vmalloc_32); * VM_USERMAP is set on the corresponding VMA so that subsequent calls to * remap_vmalloc_range() are permissible. */ -void *vmalloc_32_user(unsigned long size) +void *vmalloc_32_user_noprof(unsigned long size) { /* * We'll have to sort out the ZONE_DMA bits for 64-bit, * but for now this can simply use vmalloc_user() directly. */ - return vmalloc_user(size); + return vmalloc_user_noprof(size); } -EXPORT_SYMBOL(vmalloc_32_user); +EXPORT_SYMBOL(vmalloc_32_user_noprof); void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot) { diff --git a/mm/util.c b/mm/util.c index cee3563d2071..a9e911b22b99 100644 --- a/mm/util.c +++ b/mm/util.c @@ -656,7 +656,7 @@ void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) * about the resulting pointer, and cannot play * protection games. */ - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, node, __builtin_return_address(0)); } @@ -720,7 +720,7 @@ EXPORT_SYMBOL(kvrealloc_noprof); * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -void *__vmalloc_array(size_t n, size_t size, gfp_t flags) +void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) { size_t bytes; @@ -728,18 +728,18 @@ void *__vmalloc_array(size_t n, size_t size, gfp_t flags) return NULL; return __vmalloc(bytes, flags); } -EXPORT_SYMBOL(__vmalloc_array); +EXPORT_SYMBOL(__vmalloc_array_noprof); /** * vmalloc_array - allocate memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ -void *vmalloc_array(size_t n, size_t size) +void *vmalloc_array_noprof(size_t n, size_t size) { return __vmalloc_array(n, size, GFP_KERNEL); } -EXPORT_SYMBOL(vmalloc_array); +EXPORT_SYMBOL(vmalloc_array_noprof); /** * __vcalloc - allocate and zero memory for a virtually contiguous array. @@ -747,22 +747,22 @@ EXPORT_SYMBOL(vmalloc_array); * @size: element size. * @flags: the type of memory to allocate (see kmalloc). */ -void *__vcalloc(size_t n, size_t size, gfp_t flags) +void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) { return __vmalloc_array(n, size, flags | __GFP_ZERO); } -EXPORT_SYMBOL(__vcalloc); +EXPORT_SYMBOL(__vcalloc_noprof); /** * vcalloc - allocate and zero memory for a virtually contiguous array. * @n: number of elements. * @size: element size. */ -void *vcalloc(size_t n, size_t size) +void *vcalloc_noprof(size_t n, size_t size) { return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO); } -EXPORT_SYMBOL(vcalloc); +EXPORT_SYMBOL(vcalloc_noprof); struct anon_vma *folio_anon_vma(struct folio *folio) { diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 68fa001648cc..61af431bb6b8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3523,12 +3523,12 @@ vm_area_alloc_pages(gfp_t gfp, int nid, * but mempolicy wants to alloc memory by interleaving. */ if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) - nr = alloc_pages_bulk_array_mempolicy(bulk_gfp, + nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp, nr_pages_request, pages + nr_allocated); else - nr = alloc_pages_bulk_array_node(bulk_gfp, nid, + nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid, nr_pages_request, pages + nr_allocated); @@ -3558,9 +3558,9 @@ vm_area_alloc_pages(gfp_t gfp, int nid, break; if (nid == NUMA_NO_NODE) - page = alloc_pages(alloc_gfp, order); + page = alloc_pages_noprof(alloc_gfp, order); else - page = alloc_pages_node(nid, alloc_gfp, order); + page = alloc_pages_node_noprof(nid, alloc_gfp, order); if (unlikely(!page)) { if (!nofail) break; @@ -3617,10 +3617,10 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, /* Please note that the recursion is strictly bounded. */ if (array_size > PAGE_SIZE) { - area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, + area->pages = __vmalloc_node_noprof(array_size, 1, nested_gfp, node, area->caller); } else { - area->pages = kmalloc_node(array_size, nested_gfp, node); + area->pages = kmalloc_node_noprof(array_size, nested_gfp, node); } if (!area->pages) { @@ -3730,7 +3730,7 @@ fail: * * Return: the address of the area or %NULL on failure */ -void *__vmalloc_node_range(unsigned long size, unsigned long align, +void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align, unsigned long start, unsigned long end, gfp_t gfp_mask, pgprot_t prot, unsigned long vm_flags, int node, const void *caller) @@ -3877,10 +3877,10 @@ fail: * * Return: pointer to the allocated memory or %NULL on error */ -void *__vmalloc_node(unsigned long size, unsigned long align, +void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask, int node, const void *caller) { - return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range_noprof(size, align, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, 0, node, caller); } /* @@ -3889,15 +3889,15 @@ void *__vmalloc_node(unsigned long size, unsigned long align, * than that. */ #ifdef CONFIG_TEST_VMALLOC_MODULE -EXPORT_SYMBOL_GPL(__vmalloc_node); +EXPORT_SYMBOL_GPL(__vmalloc_node_noprof); #endif -void *__vmalloc(unsigned long size, gfp_t gfp_mask) +void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) { - return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, + return __vmalloc_node_noprof(size, 1, gfp_mask, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL(__vmalloc); +EXPORT_SYMBOL(__vmalloc_noprof); /** * vmalloc - allocate virtually contiguous memory @@ -3911,12 +3911,12 @@ EXPORT_SYMBOL(__vmalloc); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc(unsigned long size) +void *vmalloc_noprof(unsigned long size) { - return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE, + return __vmalloc_node_noprof(size, 1, GFP_KERNEL, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL(vmalloc); +EXPORT_SYMBOL(vmalloc_noprof); /** * vmalloc_huge - allocate virtually contiguous memory, allow huge pages @@ -3930,13 +3930,13 @@ EXPORT_SYMBOL(vmalloc); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) +void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) { - return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END, gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL_GPL(vmalloc_huge); +EXPORT_SYMBOL_GPL(vmalloc_huge_noprof); /** * vzalloc - allocate virtually contiguous memory with zero fill @@ -3951,12 +3951,12 @@ EXPORT_SYMBOL_GPL(vmalloc_huge); * * Return: pointer to the allocated memory or %NULL on error */ -void *vzalloc(unsigned long size) +void *vzalloc_noprof(unsigned long size) { - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, + return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL(vzalloc); +EXPORT_SYMBOL(vzalloc_noprof); /** * vmalloc_user - allocate zeroed virtually contiguous memory for userspace @@ -3967,14 +3967,14 @@ EXPORT_SYMBOL(vzalloc); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_user(unsigned long size) +void *vmalloc_user_noprof(unsigned long size) { - return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL(vmalloc_user); +EXPORT_SYMBOL(vmalloc_user_noprof); /** * vmalloc_node - allocate memory on a specific node @@ -3989,12 +3989,12 @@ EXPORT_SYMBOL(vmalloc_user); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_node(unsigned long size, int node) +void *vmalloc_node_noprof(unsigned long size, int node) { - return __vmalloc_node(size, 1, GFP_KERNEL, node, + return __vmalloc_node_noprof(size, 1, GFP_KERNEL, node, __builtin_return_address(0)); } -EXPORT_SYMBOL(vmalloc_node); +EXPORT_SYMBOL(vmalloc_node_noprof); /** * vzalloc_node - allocate memory on a specific node with zero fill @@ -4007,12 +4007,12 @@ EXPORT_SYMBOL(vmalloc_node); * * Return: pointer to the allocated memory or %NULL on error */ -void *vzalloc_node(unsigned long size, int node) +void *vzalloc_node_noprof(unsigned long size, int node) { - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node, + return __vmalloc_node_noprof(size, 1, GFP_KERNEL | __GFP_ZERO, node, __builtin_return_address(0)); } -EXPORT_SYMBOL(vzalloc_node); +EXPORT_SYMBOL(vzalloc_node_noprof); #if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32) #define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL) @@ -4035,12 +4035,12 @@ EXPORT_SYMBOL(vzalloc_node); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_32(unsigned long size) +void *vmalloc_32_noprof(unsigned long size) { - return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, + return __vmalloc_node_noprof(size, 1, GFP_VMALLOC32, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL(vmalloc_32); +EXPORT_SYMBOL(vmalloc_32_noprof); /** * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory @@ -4051,14 +4051,14 @@ EXPORT_SYMBOL(vmalloc_32); * * Return: pointer to the allocated memory or %NULL on error */ -void *vmalloc_32_user(unsigned long size) +void *vmalloc_32_user_noprof(unsigned long size) { - return __vmalloc_node_range(size, SHMLBA, VMALLOC_START, VMALLOC_END, + return __vmalloc_node_range_noprof(size, SHMLBA, VMALLOC_START, VMALLOC_END, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL, VM_USERMAP, NUMA_NO_NODE, __builtin_return_address(0)); } -EXPORT_SYMBOL(vmalloc_32_user); +EXPORT_SYMBOL(vmalloc_32_user_noprof); /* * Atomically zero bytes in the iterator. -- cgit From 9e54dd8b64dccdf5336172bf905017a6f97ee18d Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 21 Mar 2024 09:36:53 -0700 Subject: rhashtable: plumb through alloc tag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This gives better memory allocation profiling results; rhashtable allocations will be accounted to the code that initialized the rhashtable. [surenb@google.com: undo _noprof additions in the documentation] Link: https://lkml.kernel.org/r/20240326231453.1206227-1-surenb@google.com Link: https://lkml.kernel.org/r/20240321163705.3067592-32-surenb@google.com Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 3 +++ include/linux/rhashtable-types.h | 11 +++++++++-- lib/rhashtable.c | 22 ++++++++++++++-------- 3 files changed, 26 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 2aea7afa1fab..0567766e50bd 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -152,6 +152,8 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) ref->ct = NULL; } +#define alloc_tag_record(p) ((p) = current->alloc_tag) + #else /* CONFIG_MEM_ALLOC_PROFILING */ #define DEFINE_ALLOC_TAG(_alloc_tag) @@ -159,6 +161,7 @@ static inline bool mem_alloc_profiling_enabled(void) { return false; } static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) {} static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} +#define alloc_tag_record(p) do {} while (0) #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h index b6f3797277ff..015c8298bebc 100644 --- a/include/linux/rhashtable-types.h +++ b/include/linux/rhashtable-types.h @@ -9,6 +9,7 @@ #ifndef _LINUX_RHASHTABLE_TYPES_H #define _LINUX_RHASHTABLE_TYPES_H +#include #include #include #include @@ -88,6 +89,9 @@ struct rhashtable { struct mutex mutex; spinlock_t lock; atomic_t nelems; +#ifdef CONFIG_MEM_ALLOC_PROFILING + struct alloc_tag *alloc_tag; +#endif }; /** @@ -127,9 +131,12 @@ struct rhashtable_iter { bool end_of_table; }; -int rhashtable_init(struct rhashtable *ht, +int rhashtable_init_noprof(struct rhashtable *ht, const struct rhashtable_params *params); -int rhltable_init(struct rhltable *hlt, +#define rhashtable_init(...) alloc_hooks(rhashtable_init_noprof(__VA_ARGS__)) + +int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params); +#define rhltable_init(...) alloc_hooks(rhltable_init_noprof(__VA_ARGS__)) #endif /* _LINUX_RHASHTABLE_TYPES_H */ diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6ae2ba8e06a2..dbbed19f8fff 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht, if (ntbl) return ntbl; - ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); + ntbl = alloc_hooks_tag(ht->alloc_tag, + kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO)); if (ntbl && leaf) { for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) @@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht, size = sizeof(*tbl) + sizeof(tbl->buckets[0]); - tbl = kzalloc(size, gfp); + tbl = alloc_hooks_tag(ht->alloc_tag, + kmalloc_noprof(size, gfp|__GFP_ZERO)); if (!tbl) return NULL; @@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, int i; static struct lock_class_key __key; - tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); + tbl = alloc_hooks_tag(ht->alloc_tag, + kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), + gfp|__GFP_ZERO, NUMA_NO_NODE)); size = nbuckets; @@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed) * .obj_hashfn = my_hash_fn, * }; */ -int rhashtable_init(struct rhashtable *ht, +int rhashtable_init_noprof(struct rhashtable *ht, const struct rhashtable_params *params) { struct bucket_table *tbl; @@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht, spin_lock_init(&ht->lock); memcpy(&ht->p, params, sizeof(*params)); + alloc_tag_record(ht->alloc_tag); + if (params->min_size) ht->p.min_size = roundup_pow_of_two(params->min_size); @@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht, return 0; } -EXPORT_SYMBOL_GPL(rhashtable_init); +EXPORT_SYMBOL_GPL(rhashtable_init_noprof); /** * rhltable_init - initialize a new hash list table @@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init); * * See documentation for rhashtable_init. */ -int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) +int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params) { int err; - err = rhashtable_init(&hlt->ht, params); + err = rhashtable_init_noprof(&hlt->ht, params); hlt->ht.rhlist = true; return err; } -EXPORT_SYMBOL_GPL(rhltable_init); +EXPORT_SYMBOL_GPL(rhltable_init_noprof); static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, void (*free_fn)(void *ptr, void *arg), -- cgit From 1438d349d16b78d88f9e978a4a5496f078c8191b Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:54 -0700 Subject: lib: add memory allocations report in show_mem() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Include allocations in show_mem reports. Link: https://lkml.kernel.org/r/20240321163705.3067592-33-surenb@google.com Signed-off-by: Kent Overstreet Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 7 +++++++ include/linux/codetag.h | 1 + lib/alloc_tag.c | 38 ++++++++++++++++++++++++++++++++++++++ lib/codetag.c | 5 +++++ mm/show_mem.c | 26 ++++++++++++++++++++++++++ 5 files changed, 77 insertions(+) (limited to 'include/linux') diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 0567766e50bd..d18388c0e9e1 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -31,6 +31,13 @@ struct alloc_tag { #ifdef CONFIG_MEM_ALLOC_PROFILING +struct codetag_bytes { + struct codetag *ct; + s64 bytes; +}; + +size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep); + static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) { return container_of(ct, struct alloc_tag, ct); diff --git a/include/linux/codetag.h b/include/linux/codetag.h index bfd0ba5c4185..c2a579ccd455 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -61,6 +61,7 @@ struct codetag_iterator { } void codetag_lock_module_list(struct codetag_type *cttype, bool lock); +bool codetag_trylock_module_list(struct codetag_type *cttype); struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype); struct codetag *codetag_next_ct(struct codetag_iterator *iter); diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 6bd8e85beaf7..26af9982ddc4 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -89,6 +89,44 @@ static const struct seq_operations allocinfo_seq_op = { .show = allocinfo_show, }; +size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep) +{ + struct codetag_iterator iter; + struct codetag *ct; + struct codetag_bytes n; + unsigned int i, nr = 0; + + if (can_sleep) + codetag_lock_module_list(alloc_tag_cttype, true); + else if (!codetag_trylock_module_list(alloc_tag_cttype)) + return 0; + + iter = codetag_get_ct_iter(alloc_tag_cttype); + while ((ct = codetag_next_ct(&iter))) { + struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct)); + + n.ct = ct; + n.bytes = counter.bytes; + + for (i = 0; i < nr; i++) + if (n.bytes > tags[i].bytes) + break; + + if (i < count) { + nr -= nr == count; + memmove(&tags[i + 1], + &tags[i], + sizeof(tags[0]) * (nr - i)); + nr++; + tags[i] = n; + } + } + + codetag_lock_module_list(alloc_tag_cttype, false); + + return nr; +} + static void __init procfs_init(void) { proc_create_seq("allocinfo", 0444, NULL, &allocinfo_seq_op); diff --git a/lib/codetag.c b/lib/codetag.c index 408062f722ce..5ace625f2328 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -36,6 +36,11 @@ void codetag_lock_module_list(struct codetag_type *cttype, bool lock) up_read(&cttype->mod_lock); } +bool codetag_trylock_module_list(struct codetag_type *cttype) +{ + return down_read_trylock(&cttype->mod_lock) != 0; +} + struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype) { struct codetag_iterator iter = { diff --git a/mm/show_mem.c b/mm/show_mem.c index 8dcfafbd283c..bdb439551eef 100644 --- a/mm/show_mem.c +++ b/mm/show_mem.c @@ -423,4 +423,30 @@ void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) #ifdef CONFIG_MEMORY_FAILURE printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); #endif +#ifdef CONFIG_MEM_ALLOC_PROFILING + { + struct codetag_bytes tags[10]; + size_t i, nr; + + nr = alloc_tag_top_users(tags, ARRAY_SIZE(tags), false); + if (nr) { + pr_notice("Memory allocations:\n"); + for (i = 0; i < nr; i++) { + struct codetag *ct = tags[i].ct; + struct alloc_tag *tag = ct_to_alloc_tag(ct); + struct alloc_tag_counters counter = alloc_tag_read(tag); + + /* Same as alloc_tag_to_text() but w/o intermediate buffer */ + if (ct->modname) + pr_notice("%12lli %8llu %s:%u [%s] func:%s\n", + counter.bytes, counter.calls, ct->filename, + ct->lineno, ct->modname, ct->function); + else + pr_notice("%12lli %8llu %s:%u func:%s\n", + counter.bytes, counter.calls, ct->filename, + ct->lineno, ct->function); + } + } + } +#endif } -- cgit From 239d6c96d86f8a42d799b9a85c09cc7f36aef3f8 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:55 -0700 Subject: codetag: debug: skip objext checking when it's for objext itself MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit objext objects are created with __GFP_NO_OBJ_EXT flag and therefore have no corresponding objext themselves (otherwise we would get an infinite recursion). When freeing these objects their codetag will be empty and when CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled this will lead to false warnings. Introduce CODETAG_EMPTY special codetag value to mark allocations which intentionally lack codetag to avoid these warnings. Set objext codetags to CODETAG_EMPTY before freeing to indicate that the codetag is expected to be empty. Link: https://lkml.kernel.org/r/20240321163705.3067592-34-surenb@google.com Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 26 ++++++++++++++++++++++++++ mm/slub.c | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) (limited to 'include/linux') diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index d18388c0e9e1..3af5d6a5ced4 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -29,6 +29,27 @@ struct alloc_tag { struct alloc_tag_counters __percpu *counters; } __aligned(8); +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG + +#define CODETAG_EMPTY ((void *)1) + +static inline bool is_codetag_empty(union codetag_ref *ref) +{ + return ref->ct == CODETAG_EMPTY; +} + +static inline void set_codetag_empty(union codetag_ref *ref) +{ + if (ref) + ref->ct = CODETAG_EMPTY; +} + +#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ + +static inline bool is_codetag_empty(union codetag_ref *ref) { return false; } + +#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ + #ifdef CONFIG_MEM_ALLOC_PROFILING struct codetag_bytes { @@ -151,6 +172,11 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) if (!ref || !ref->ct) return; + if (is_codetag_empty(ref)) { + ref->ct = NULL; + return; + } + tag = ct_to_alloc_tag(ref->ct); this_cpu_sub(tag->counters->bytes, bytes); diff --git a/mm/slub.c b/mm/slub.c index 0fc891d44cff..00f461826462 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1873,6 +1873,30 @@ static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) #ifdef CONFIG_SLAB_OBJ_EXT +#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG + +static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) +{ + struct slabobj_ext *slab_exts; + struct slab *obj_exts_slab; + + obj_exts_slab = virt_to_slab(obj_exts); + slab_exts = slab_obj_exts(obj_exts_slab); + if (slab_exts) { + unsigned int offs = obj_to_index(obj_exts_slab->slab_cache, + obj_exts_slab, obj_exts); + /* codetag should be NULL */ + WARN_ON(slab_exts[offs].ref.ct); + set_codetag_empty(&slab_exts[offs].ref); + } +} + +#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ + +static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} + +#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ + /* * The allocated objcg pointers array is not accounted directly. * Moreover, it should not come from DMA buffer and is not readily @@ -1913,6 +1937,7 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, * assign slabobj_exts in parallel. In this case the existing * objcg vector should be reused. */ + mark_objexts_empty(vec); kfree(vec); return 0; } @@ -1929,6 +1954,14 @@ static inline void free_slab_obj_exts(struct slab *slab) if (!obj_exts) return; + /* + * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its + * corresponding extension will be NULL. alloc_tag_sub() will throw a + * warning if slab has extensions but the extension of an object is + * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that + * the extension for obj_exts is expected to be NULL. + */ + mark_objexts_empty(obj_exts); kfree(obj_exts); slab->obj_exts = 0; } -- cgit From d224eb0287fbd84f4f13eca042c7f08f87138f3b Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:56 -0700 Subject: codetag: debug: mark codetags for reserved pages as empty MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To avoid debug warnings while freeing reserved pages which were not allocated with usual allocators, mark their codetags as empty before freeing. Link: https://lkml.kernel.org/r/20240321163705.3067592-35-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Kees Cook Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 1 + include/linux/mm.h | 9 +++++++++ include/linux/pgalloc_tag.h | 2 ++ mm/mm_init.c | 12 +++++++++++- 4 files changed, 23 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 3af5d6a5ced4..afc9e259a2d3 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -47,6 +47,7 @@ static inline void set_codetag_empty(union codetag_ref *ref) #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ static inline bool is_codetag_empty(union codetag_ref *ref) { return false; } +static inline void set_codetag_empty(union codetag_ref *ref) {} #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b6bdaa18b9e9..0dbd737cfabd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -3134,6 +3135,14 @@ extern void reserve_bootmem_region(phys_addr_t start, /* Free the reserved page into the buddy system, so it gets managed. */ static inline void free_reserved_page(struct page *page) { + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + set_codetag_empty(ref); + put_page_tag_ref(ref); + } + } ClearPageReserved(page); init_page_count(page); __free_page(page); diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 50d212330bbb..62d8dad74b37 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -120,6 +120,8 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) #else /* CONFIG_MEM_ALLOC_PROFILING */ +static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; } +static inline void put_page_tag_ref(union codetag_ref *ref) {} static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} diff --git a/mm/mm_init.c b/mm/mm_init.c index 2fd9bf044a79..f45c2b32ba82 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2567,7 +2567,6 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) void __init memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) { - if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { int nid = early_pfn_to_nid(pfn); @@ -2579,6 +2578,17 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, /* KMSAN will take care of these pages. */ return; } + + /* pages were reserved and not allocated */ + if (mem_alloc_profiling_enabled()) { + union codetag_ref *ref = get_page_tag_ref(page); + + if (ref) { + set_codetag_empty(ref); + put_page_tag_ref(ref); + } + } + __free_pages_core(page, order); } -- cgit From 09c46563ff6d5f090211e48ff1fdba0ec7f4c97f Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 21 Mar 2024 09:36:57 -0700 Subject: codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If slabobj_ext vector allocation for a slab object fails and later on it succeeds for another object in the same slab, the slabobj_ext for the original object will be NULL and will be flagged in case when CONFIG_MEM_ALLOC_PROFILING_DEBUG is enabled. Mark failed slabobj_ext vector allocations using a new objext_flags flag stored in the lower bits of slab->obj_exts. When new allocation succeeds it marks all tag references in the same slabobj_ext vector as empty to avoid warnings implemented by CONFIG_MEM_ALLOC_PROFILING_DEBUG checks. Link: https://lkml.kernel.org/r/20240321163705.3067592-36-surenb@google.com Signed-off-by: Suren Baghdasaryan Tested-by: Kees Cook Cc: Alexander Viro Cc: Alex Gaynor Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Benno Lossin Cc: "Björn Roy Baron" Cc: Boqun Feng Cc: Christoph Lameter Cc: Dennis Zhou Cc: Gary Guo Cc: Kent Overstreet Cc: Miguel Ojeda Cc: Pasha Tatashin Cc: Peter Zijlstra Cc: Tejun Heo Cc: Vlastimil Babka Cc: Wedson Almeida Filho Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 4 +++- mm/slub.c | 46 +++++++++++++++++++++++++++++++++++++++------- 2 files changed, 42 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 24a6df30be49..8f332b4ae84c 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -366,8 +366,10 @@ enum page_memcg_data_flags { #endif /* CONFIG_MEMCG */ enum objext_flags { + /* slabobj_ext vector failed to allocate */ + OBJEXTS_ALLOC_FAIL = __FIRST_OBJEXT_FLAG, /* the next bit after the last actual flag */ - __NR_OBJEXTS_FLAGS = __FIRST_OBJEXT_FLAG, + __NR_OBJEXTS_FLAGS = (__FIRST_OBJEXT_FLAG << 1), }; #define OBJEXTS_FLAGS_MASK (__NR_OBJEXTS_FLAGS - 1) diff --git a/mm/slub.c b/mm/slub.c index 00f461826462..be047279c9e9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1891,9 +1891,33 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) } } +static inline void mark_failed_objexts_alloc(struct slab *slab) +{ + slab->obj_exts = OBJEXTS_ALLOC_FAIL; +} + +static inline void handle_failed_objexts_alloc(unsigned long obj_exts, + struct slabobj_ext *vec, unsigned int objects) +{ + /* + * If vector previously failed to allocate then we have live + * objects with no tag reference. Mark all references in this + * vector as empty to avoid warnings later on. + */ + if (obj_exts & OBJEXTS_ALLOC_FAIL) { + unsigned int i; + + for (i = 0; i < objects; i++) + set_codetag_empty(&vec[i].ref); + } +} + #else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {} +static inline void mark_failed_objexts_alloc(struct slab *slab) {} +static inline void handle_failed_objexts_alloc(unsigned long obj_exts, + struct slabobj_ext *vec, unsigned int objects) {} #endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */ @@ -1909,29 +1933,37 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, gfp_t gfp, bool new_slab) { unsigned int objects = objs_per_slab(s, slab); - unsigned long obj_exts; - void *vec; + unsigned long new_exts; + unsigned long old_exts; + struct slabobj_ext *vec; gfp &= ~OBJCGS_CLEAR_MASK; /* Prevent recursive extension vector allocation */ gfp |= __GFP_NO_OBJ_EXT; vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp, slab_nid(slab)); - if (!vec) + if (!vec) { + /* Mark vectors which failed to allocate */ + if (new_slab) + mark_failed_objexts_alloc(slab); + return -ENOMEM; + } - obj_exts = (unsigned long)vec; + new_exts = (unsigned long)vec; #ifdef CONFIG_MEMCG - obj_exts |= MEMCG_DATA_OBJEXTS; + new_exts |= MEMCG_DATA_OBJEXTS; #endif + old_exts = slab->obj_exts; + handle_failed_objexts_alloc(old_exts, vec, objects); if (new_slab) { /* * If the slab is brand new and nobody can yet access its * obj_exts, no synchronization is required and obj_exts can * be simply assigned. */ - slab->obj_exts = obj_exts; - } else if (cmpxchg(&slab->obj_exts, 0, obj_exts)) { + slab->obj_exts = new_exts; + } else if (cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) { /* * If the slab is already in use, somebody can allocate and * assign slabobj_exts in parallel. In this case the existing -- cgit From 2c321f3f70bc284510598f712b702ce8d60c4d14 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sun, 14 Apr 2024 19:07:31 -0700 Subject: mm: change inlined allocation helpers to account at the call site Main goal of memory allocation profiling patchset is to provide accounting that is cheap enough to run in production. To achieve that we inject counters using codetags at the allocation call sites to account every time allocation is made. This injection allows us to perform accounting efficiently because injected counters are immediately available as opposed to the alternative methods, such as using _RET_IP_, which would require counter lookup and appropriate locking that makes accounting much more expensive. This method requires all allocation functions to inject separate counters at their call sites so that their callers can be individually accounted. Counter injection is implemented by allocation hooks which should wrap all allocation functions. Inlined functions which perform allocations but do not use allocation hooks are directly charged for the allocations they perform. In most cases these functions are just specialized allocation wrappers used from multiple places to allocate objects of a specific type. It would be more useful to do the accounting at their call sites instead. Instrument these helpers to do accounting at the call site. Simple inlined allocation wrappers are converted directly into macros. More complex allocators or allocators with documentation are converted into _noprof versions and allocation hooks are added. This allows memory allocation profiling mechanism to charge allocations to the callers of these functions. Link: https://lkml.kernel.org/r/20240415020731.1152108-1-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Jan Kara [jbd2] Cc: Anna Schumaker Cc: Arnd Bergmann Cc: Benjamin Tissoires Cc: Christoph Lameter Cc: David Rientjes Cc: David S. Miller Cc: Dennis Zhou Cc: Eric Dumazet Cc: Herbert Xu Cc: Jakub Kicinski Cc: Jakub Sitnicki Cc: Jiri Kosina Cc: Joerg Roedel Cc: Joonsoo Kim Cc: Kent Overstreet Cc: Matthew Wilcox (Oracle) Cc: Paolo Abeni Cc: Pekka Enberg Cc: Tejun Heo Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Vlastimil Babka Cc: Will Deacon Signed-off-by: Andrew Morton --- drivers/iommu/amd/amd_iommu.h | 5 +++-- fs/nfs/iostat.h | 5 +---- include/acpi/platform/aclinuxex.h | 19 ++++++------------ include/asm-generic/pgalloc.h | 35 ++++++++++++++++++++------------- include/crypto/hash.h | 7 ++++--- include/crypto/internal/acompress.h | 5 +++-- include/crypto/skcipher.h | 7 ++++--- include/linux/bpf.h | 33 ++++++++----------------------- include/linux/bpfptr.h | 5 +++-- include/linux/dma-fence-chain.h | 6 ++---- include/linux/hid_bpf.h | 6 ++---- include/linux/jbd2.h | 12 ++++------- include/linux/mm.h | 5 +++-- include/linux/mm_types.h | 5 +++-- include/linux/percpu.h | 3 +++ include/linux/ptr_ring.h | 28 +++++++++++++++----------- include/linux/skb_array.h | 19 ++++++++++-------- include/linux/skbuff.h | 20 ++++++++----------- include/linux/skmsg.h | 8 +++----- include/linux/slab.h | 5 +++++ include/linux/sockptr.h | 10 ++++++---- include/net/netlabel.h | 16 +++++++++------ include/net/netlink.h | 5 +++-- include/net/request_sock.h | 5 +++-- include/net/tcx.h | 5 +++-- net/sunrpc/auth_gss/auth_gss_internal.h | 6 ++++-- 26 files changed, 142 insertions(+), 143 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index f482aab420f7..52575ba9a141 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -134,13 +134,14 @@ static inline int get_pci_sbdf_id(struct pci_dev *pdev) return PCI_SEG_DEVID_TO_SBDF(seg, devid); } -static inline void *alloc_pgtable_page(int nid, gfp_t gfp) +static inline void *alloc_pgtable_page_noprof(int nid, gfp_t gfp) { struct page *page; - page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0); + page = alloc_pages_node_noprof(nid, gfp | __GFP_ZERO, 0); return page ? page_address(page) : NULL; } +#define alloc_pgtable_page(...) alloc_hooks(alloc_pgtable_page_noprof(__VA_ARGS__)) /* * This must be called after device probe completes. During probe diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h index 5aa776b5a3e7..b17a9eb9b148 100644 --- a/fs/nfs/iostat.h +++ b/fs/nfs/iostat.h @@ -46,10 +46,7 @@ static inline void nfs_add_stats(const struct inode *inode, nfs_add_server_stats(NFS_SERVER(inode), stat, addend); } -static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void) -{ - return alloc_percpu(struct nfs_iostats); -} +#define nfs_alloc_iostats() alloc_percpu(struct nfs_iostats) static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats) { diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h index 600d4e2641da..62cac266a1c8 100644 --- a/include/acpi/platform/aclinuxex.h +++ b/include/acpi/platform/aclinuxex.h @@ -47,26 +47,19 @@ acpi_status acpi_os_terminate(void); * However, boot has (system_state != SYSTEM_RUNNING) * to quiet __might_sleep() in kmalloc() and resume does not. */ -static inline void *acpi_os_allocate(acpi_size size) -{ - return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); -} +#define acpi_os_allocate(_size) \ + kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) -static inline void *acpi_os_allocate_zeroed(acpi_size size) -{ - return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); -} +#define acpi_os_allocate_zeroed(_size) \ + kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) static inline void acpi_os_free(void *memory) { kfree(memory); } -static inline void *acpi_os_acquire_object(acpi_cache_t * cache) -{ - return kmem_cache_zalloc(cache, - irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); -} +#define acpi_os_acquire_object(_cache) \ + kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) static inline acpi_thread_id acpi_os_get_thread_id(void) { diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 879e5f8aa5e9..7c48f5fbf8aa 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -16,15 +16,16 @@ * * Return: pointer to the allocated memory or %NULL on error */ -static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) +static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) { - struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & + struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL & ~__GFP_HIGHMEM, 0); if (!ptdesc) return NULL; return ptdesc_address(ptdesc); } +#define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__)) #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL /** @@ -33,10 +34,11 @@ static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) * * Return: pointer to the allocated memory or %NULL on error */ -static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) +static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) { - return __pte_alloc_one_kernel(mm); + return __pte_alloc_one_kernel_noprof(mm); } +#define pte_alloc_one_kernel(...) alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__)) #endif /** @@ -61,11 +63,11 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) * * Return: `struct page` referencing the ptdesc or %NULL on error */ -static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) +static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp) { struct ptdesc *ptdesc; - ptdesc = pagetable_alloc(gfp, 0); + ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; if (!pagetable_pte_ctor(ptdesc)) { @@ -75,6 +77,7 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) return ptdesc_page(ptdesc); } +#define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__)) #ifndef __HAVE_ARCH_PTE_ALLOC_ONE /** @@ -85,10 +88,11 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) * * Return: `struct page` referencing the ptdesc or %NULL on error */ -static inline pgtable_t pte_alloc_one(struct mm_struct *mm) +static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm) { - return __pte_alloc_one(mm, GFP_PGTABLE_USER); + return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER); } +#define pte_alloc_one(...) alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__)) #endif /* @@ -124,14 +128,14 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) * * Return: pointer to the allocated memory or %NULL on error */ -static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) { struct ptdesc *ptdesc; gfp_t gfp = GFP_PGTABLE_USER; if (mm == &init_mm) gfp = GFP_PGTABLE_KERNEL; - ptdesc = pagetable_alloc(gfp, 0); + ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; if (!pagetable_pmd_ctor(ptdesc)) { @@ -140,6 +144,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) } return ptdesc_address(ptdesc); } +#define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__)) #endif #ifndef __HAVE_ARCH_PMD_FREE @@ -157,7 +162,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) #if CONFIG_PGTABLE_LEVELS > 3 -static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) { gfp_t gfp = GFP_PGTABLE_USER; struct ptdesc *ptdesc; @@ -166,13 +171,14 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) gfp = GFP_PGTABLE_KERNEL; gfp &= ~__GFP_HIGHMEM; - ptdesc = pagetable_alloc(gfp, 0); + ptdesc = pagetable_alloc_noprof(gfp, 0); if (!ptdesc) return NULL; pagetable_pud_ctor(ptdesc); return ptdesc_address(ptdesc); } +#define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__)) #ifndef __HAVE_ARCH_PUD_ALLOC_ONE /** @@ -184,10 +190,11 @@ static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) * * Return: pointer to the allocated memory or %NULL on error */ -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) { - return __pud_alloc_one(mm, addr); + return __pud_alloc_one_noprof(mm, addr); } +#define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__)) #endif static inline void __pud_free(struct mm_struct *mm, pud_t *pud) diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 5d61f576cfc8..e5181cc9b7c5 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -578,19 +578,20 @@ static inline void ahash_request_set_tfm(struct ahash_request *req, * * Return: allocated request handle in case of success, or NULL if out of memory */ -static inline struct ahash_request *ahash_request_alloc( +static inline struct ahash_request *ahash_request_alloc_noprof( struct crypto_ahash *tfm, gfp_t gfp) { struct ahash_request *req; - req = kmalloc(sizeof(struct ahash_request) + - crypto_ahash_reqsize(tfm), gfp); + req = kmalloc_noprof(sizeof(struct ahash_request) + + crypto_ahash_reqsize(tfm), gfp); if (likely(req)) ahash_request_set_tfm(req, tfm); return req; } +#define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__)) /** * ahash_request_free() - zeroize and free the request data structure diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 4ac46bafba9d..2a67793f52ad 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -69,15 +69,16 @@ static inline void acomp_request_complete(struct acomp_req *req, crypto_request_complete(&req->base, err); } -static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) +static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm) { struct acomp_req *req; - req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); + req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); if (likely(req)) acomp_request_set_tfm(req, tfm); return req; } +#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__)) static inline void __acomp_request_free(struct acomp_req *req) { diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index c8857d7bdb37..6c5330e316b0 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -861,19 +861,20 @@ static inline struct skcipher_request *skcipher_request_cast( * * Return: allocated request handle in case of success, or NULL if out of memory */ -static inline struct skcipher_request *skcipher_request_alloc( +static inline struct skcipher_request *skcipher_request_alloc_noprof( struct crypto_skcipher *tfm, gfp_t gfp) { struct skcipher_request *req; - req = kmalloc(sizeof(struct skcipher_request) + - crypto_skcipher_reqsize(tfm), gfp); + req = kmalloc_noprof(sizeof(struct skcipher_request) + + crypto_skcipher_reqsize(tfm), gfp); if (likely(req)) skcipher_request_set_tfm(req, tfm); return req; } +#define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__)) /** * skcipher_request_free() - zeroize and free request data structure diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 890e152d553e..a86da1f38b3c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2244,31 +2244,14 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, gfp_t flags); #else -static inline void * -bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, - int node) -{ - return kmalloc_node(size, flags, node); -} - -static inline void * -bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) -{ - return kzalloc(size, flags); -} - -static inline void * -bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) -{ - return kvcalloc(n, size, flags); -} - -static inline void __percpu * -bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, - gfp_t flags) -{ - return __alloc_percpu_gfp(size, align, flags); -} +#define bpf_map_kmalloc_node(_map, _size, _flags, _node) \ + kmalloc_node(_size, _flags, _node) +#define bpf_map_kzalloc(_map, _size, _flags) \ + kzalloc(_size, _flags) +#define bpf_map_kvcalloc(_map, _n, _size, _flags) \ + kvcalloc(_n, _size, _flags) +#define bpf_map_alloc_percpu(_map, _size, _align, _flags) \ + __alloc_percpu_gfp(_size, _align, _flags) #endif static inline int diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index 79b2f78eec1a..1af241525a17 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -65,9 +65,9 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset, return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); } -static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) +static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len) { - void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN); + void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); @@ -77,6 +77,7 @@ static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) } return p; } +#define kvmemdup_bpfptr(...) alloc_hooks(kvmemdup_bpfptr_noprof(__VA_ARGS__)) static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) { diff --git a/include/linux/dma-fence-chain.h b/include/linux/dma-fence-chain.h index 4bdf0b96da28..ad9e2506c2f4 100644 --- a/include/linux/dma-fence-chain.h +++ b/include/linux/dma-fence-chain.h @@ -86,10 +86,8 @@ dma_fence_chain_contained(struct dma_fence *fence) * * Returns a new struct dma_fence_chain object or NULL on failure. */ -static inline struct dma_fence_chain *dma_fence_chain_alloc(void) -{ - return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); -}; +#define dma_fence_chain_alloc() \ + ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL)) /** * dma_fence_chain_free diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index 7118ac28d468..ca70eeb6393e 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -149,10 +149,8 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; } static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {} static inline void hid_bpf_destroy_device(struct hid_device *hid) {} static inline void hid_bpf_device_init(struct hid_device *hid) {} -static inline u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size) -{ - return kmemdup(rdesc, *size, GFP_KERNEL); -} +#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \ + ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL)) #endif /* CONFIG_HID_BPF */ diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 971f3e826e15..9512fe332668 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1586,10 +1586,8 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); */ extern struct kmem_cache *jbd2_handle_cache; -static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) -{ - return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags); -} +#define jbd2_alloc_handle(_gfp_flags) \ + ((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags)) static inline void jbd2_free_handle(handle_t *handle) { @@ -1602,10 +1600,8 @@ static inline void jbd2_free_handle(handle_t *handle) */ extern struct kmem_cache *jbd2_inode_cache; -static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags) -{ - return kmem_cache_alloc(jbd2_inode_cache, gfp_flags); -} +#define jbd2_alloc_inode(_gfp_flags) \ + ((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags)) static inline void jbd2_free_inode(struct jbd2_inode *jinode) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 0dbd737cfabd..60eabc6c8e00 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2860,12 +2860,13 @@ static inline bool pagetable_is_reserved(struct ptdesc *pt) * * Return: The ptdesc describing the allocated page tables. */ -static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order) +static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order) { - struct page *page = alloc_pages(gfp | __GFP_COMP, order); + struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order); return page_ptdesc(page); } +#define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__)) /** * pagetable_free - Free pagetables diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 4ae4684d1add..6fef56938a17 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1170,14 +1170,15 @@ static inline void mm_init_cid(struct mm_struct *mm) cpumask_clear(mm_cidmask(mm)); } -static inline int mm_alloc_cid(struct mm_struct *mm) +static inline int mm_alloc_cid_noprof(struct mm_struct *mm) { - mm->pcpu_cid = alloc_percpu(struct mm_cid); + mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid); if (!mm->pcpu_cid) return -ENOMEM; mm_init_cid(mm); return 0; } +#define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__)) static inline void mm_destroy_cid(struct mm_struct *mm) { diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 13a82f11e4fd..03053de557cf 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -151,6 +151,9 @@ extern size_t pcpu_alloc_size(void __percpu *__pdata); #define alloc_percpu(type) \ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ __alignof__(type)) +#define alloc_percpu_noprof(type) \ + ((typeof(type) __percpu *)pcpu_alloc_noprof(sizeof(type), \ + __alignof__(type), false, GFP_KERNEL)) extern void free_percpu(void __percpu *__pdata); diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 808f9d3ee546..fd037c127bb0 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h @@ -464,11 +464,11 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, /* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See * documentation for vmalloc for which of them are legal. */ -static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) +static inline void **__ptr_ring_init_queue_alloc_noprof(unsigned int size, gfp_t gfp) { if (size > KMALLOC_MAX_SIZE / sizeof(void *)) return NULL; - return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); + return kvmalloc_array_noprof(size, sizeof(void *), gfp | __GFP_ZERO); } static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) @@ -484,9 +484,9 @@ static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) r->batch = 1; } -static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) +static inline int ptr_ring_init_noprof(struct ptr_ring *r, int size, gfp_t gfp) { - r->queue = __ptr_ring_init_queue_alloc(size, gfp); + r->queue = __ptr_ring_init_queue_alloc_noprof(size, gfp); if (!r->queue) return -ENOMEM; @@ -497,6 +497,7 @@ static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) return 0; } +#define ptr_ring_init(...) alloc_hooks(ptr_ring_init_noprof(__VA_ARGS__)) /* * Return entries into ring. Destroy entries that don't fit. @@ -587,11 +588,11 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ -static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, +static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; - void **queue = __ptr_ring_init_queue_alloc(size, gfp); + void **queue = __ptr_ring_init_queue_alloc_noprof(size, gfp); void **old; if (!queue) @@ -609,6 +610,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, return 0; } +#define ptr_ring_resize(...) alloc_hooks(ptr_ring_resize_noprof(__VA_ARGS__)) /* * Note: producer lock is nested within consumer lock, so if you @@ -616,21 +618,21 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, * In particular if you consume ring in interrupt or BH context, you must * disable interrupts/BH when doing so. */ -static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, - unsigned int nrings, - int size, - gfp_t gfp, void (*destroy)(void *)) +static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings, + unsigned int nrings, + int size, + gfp_t gfp, void (*destroy)(void *)) { unsigned long flags; void ***queues; int i; - queues = kmalloc_array(nrings, sizeof(*queues), gfp); + queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp); if (!queues) goto noqueues; for (i = 0; i < nrings; ++i) { - queues[i] = __ptr_ring_init_queue_alloc(size, gfp); + queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp); if (!queues[i]) goto nomem; } @@ -660,6 +662,8 @@ nomem: noqueues: return -ENOMEM; } +#define ptr_ring_resize_multiple(...) \ + alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__)) static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) { diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index e2d45b7cb619..926496c9cc9c 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h @@ -177,10 +177,11 @@ static inline int skb_array_peek_len_any(struct skb_array *a) return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); } -static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) +static inline int skb_array_init_noprof(struct skb_array *a, int size, gfp_t gfp) { - return ptr_ring_init(&a->ring, size, gfp); + return ptr_ring_init_noprof(&a->ring, size, gfp); } +#define skb_array_init(...) alloc_hooks(skb_array_init_noprof(__VA_ARGS__)) static void __skb_array_destroy_skb(void *ptr) { @@ -198,15 +199,17 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp) return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); } -static inline int skb_array_resize_multiple(struct skb_array **rings, - int nrings, unsigned int size, - gfp_t gfp) +static inline int skb_array_resize_multiple_noprof(struct skb_array **rings, + int nrings, unsigned int size, + gfp_t gfp) { BUILD_BUG_ON(offsetof(struct skb_array, ring)); - return ptr_ring_resize_multiple((struct ptr_ring **)rings, - nrings, size, gfp, - __skb_array_destroy_skb); + return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings, + nrings, size, gfp, + __skb_array_destroy_skb); } +#define skb_array_resize_multiple(...) \ + alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__)) static inline void skb_array_cleanup(struct skb_array *a) { diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9d24aec064e8..b72920c9887b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3371,7 +3371,7 @@ void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason); * * %NULL is returned if there is no free memory. */ -static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, +static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { /* This piece of code contains several assumptions. @@ -3384,13 +3384,11 @@ static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, */ gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; - return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); + return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order); } +#define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__)) -static inline struct page *dev_alloc_pages(unsigned int order) -{ - return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); -} +#define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order) /** * __dev_alloc_page - allocate a page for network Rx @@ -3400,15 +3398,13 @@ static inline struct page *dev_alloc_pages(unsigned int order) * * %NULL is returned if there is no free memory. */ -static inline struct page *__dev_alloc_page(gfp_t gfp_mask) +static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask) { - return __dev_alloc_pages(gfp_mask, 0); + return __dev_alloc_pages_noprof(gfp_mask, 0); } +#define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__)) -static inline struct page *dev_alloc_page(void) -{ - return dev_alloc_pages(0); -} +#define dev_alloc_page() dev_alloc_pages(0) /** * dev_page_is_reusable - check whether a page can be reused for network Rx diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index e65ec3fd2799..78efc5b20284 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -410,11 +410,9 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock); int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, struct sk_msg *msg); -static inline struct sk_psock_link *sk_psock_init_link(void) -{ - return kzalloc(sizeof(struct sk_psock_link), - GFP_ATOMIC | __GFP_NOWARN); -} +#define sk_psock_init_link() \ + ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \ + GFP_ATOMIC | __GFP_NOWARN)) static inline void sk_psock_free_link(struct sk_psock_link *link) { diff --git a/include/linux/slab.h b/include/linux/slab.h index c0be1cd03cf6..4cc37ef22aae 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -744,6 +744,9 @@ void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node, */ #define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) +#define kmalloc_track_caller_noprof(...) \ + kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) + static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) { @@ -781,6 +784,7 @@ extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_si #define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) +#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) #define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO) #define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, _flags|__GFP_ZERO, _node) @@ -797,6 +801,7 @@ static inline __alloc_size(1, 2) void *kvmalloc_array_noprof(size_t n, size_t si #define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) #define kvcalloc(_n, _size, _flags) kvmalloc_array(_n, _size, _flags|__GFP_ZERO) +#define kvcalloc_noprof(_n, _size, _flags) kvmalloc_array_noprof(_n, _size, _flags|__GFP_ZERO) extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 317200cd3a60..fc5a206c4043 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -117,9 +117,9 @@ static inline int copy_to_sockptr(sockptr_t dst, const void *src, size_t size) return copy_to_sockptr_offset(dst, 0, src, size); } -static inline void *memdup_sockptr(sockptr_t src, size_t len) +static inline void *memdup_sockptr_noprof(sockptr_t src, size_t len) { - void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); + void *p = kmalloc_track_caller_noprof(len, GFP_USER | __GFP_NOWARN); if (!p) return ERR_PTR(-ENOMEM); @@ -129,10 +129,11 @@ static inline void *memdup_sockptr(sockptr_t src, size_t len) } return p; } +#define memdup_sockptr(...) alloc_hooks(memdup_sockptr_noprof(__VA_ARGS__)) -static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) +static inline void *memdup_sockptr_nul_noprof(sockptr_t src, size_t len) { - char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); + char *p = kmalloc_track_caller_noprof(len + 1, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); @@ -143,6 +144,7 @@ static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) p[len] = '\0'; return p; } +#define memdup_sockptr_nul(...) alloc_hooks(memdup_sockptr_nul_noprof(__VA_ARGS__)) static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) { diff --git a/include/net/netlabel.h b/include/net/netlabel.h index f3ab0b8a4b18..c31bd96dafdb 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h @@ -274,15 +274,17 @@ struct netlbl_calipso_ops { * on success, NULL on failure. * */ -static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags) +static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc_noprof(gfp_t flags) { struct netlbl_lsm_cache *cache; - cache = kzalloc(sizeof(*cache), flags); + cache = kzalloc_noprof(sizeof(*cache), flags); if (cache) refcount_set(&cache->refcount, 1); return cache; } +#define netlbl_secattr_cache_alloc(...) \ + alloc_hooks(netlbl_secattr_cache_alloc_noprof(__VA_ARGS__)) /** * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct @@ -311,10 +313,11 @@ static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache) * on failure. * */ -static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc(gfp_t flags) +static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc_noprof(gfp_t flags) { - return kzalloc(sizeof(struct netlbl_lsm_catmap), flags); + return kzalloc_noprof(sizeof(struct netlbl_lsm_catmap), flags); } +#define netlbl_catmap_alloc(...) alloc_hooks(netlbl_catmap_alloc_noprof(__VA_ARGS__)) /** * netlbl_catmap_free - Free a LSM secattr catmap @@ -376,10 +379,11 @@ static inline void netlbl_secattr_destroy(struct netlbl_lsm_secattr *secattr) * pointer on success, or NULL on failure. * */ -static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(gfp_t flags) +static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc_noprof(gfp_t flags) { - return kzalloc(sizeof(struct netlbl_lsm_secattr), flags); + return kzalloc_noprof(sizeof(struct netlbl_lsm_secattr), flags); } +#define netlbl_secattr_alloc(...) alloc_hooks(netlbl_secattr_alloc_noprof(__VA_ARGS__)) /** * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct diff --git a/include/net/netlink.h b/include/net/netlink.h index c19ff921b661..972b5484fa6f 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -1891,10 +1891,11 @@ static inline struct nla_bitfield32 nla_get_bitfield32(const struct nlattr *nla) * @src: netlink attribute to duplicate from * @gfp: GFP mask */ -static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp) +static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp) { - return kmemdup(nla_data(src), nla_len(src), gfp); + return kmemdup_noprof(nla_data(src), nla_len(src), gfp); } +#define nla_memdup(...) alloc_hooks(nla_memdup_noprof(__VA_ARGS__)) /** * nla_nest_start_noflag - Start a new level of nested attributes diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 004e651e6067..29495c331d20 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -127,12 +127,12 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb, } static inline struct request_sock * -reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, +reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, bool attach_listener) { struct request_sock *req; - req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); + req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); if (!req) return NULL; req->rsk_listener = NULL; @@ -157,6 +157,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, return req; } +#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) static inline void __reqsk_free(struct request_sock *req) { diff --git a/include/net/tcx.h b/include/net/tcx.h index 04be9377785d..72a3e75e539f 100644 --- a/include/net/tcx.h +++ b/include/net/tcx.h @@ -75,9 +75,9 @@ tcx_entry_fetch(struct net_device *dev, bool ingress) return rcu_dereference_rtnl(dev->tcx_egress); } -static inline struct bpf_mprog_entry *tcx_entry_create(void) +static inline struct bpf_mprog_entry *tcx_entry_create_noprof(void) { - struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL); + struct tcx_entry *tcx = kzalloc_noprof(sizeof(*tcx), GFP_KERNEL); if (tcx) { bpf_mprog_bundle_init(&tcx->bundle); @@ -85,6 +85,7 @@ static inline struct bpf_mprog_entry *tcx_entry_create(void) } return NULL; } +#define tcx_entry_create(...) alloc_hooks(tcx_entry_create_noprof(__VA_ARGS__)) static inline void tcx_entry_free(struct bpf_mprog_entry *entry) { diff --git a/net/sunrpc/auth_gss/auth_gss_internal.h b/net/sunrpc/auth_gss/auth_gss_internal.h index c53b329092d4..4ebc1b7043d9 100644 --- a/net/sunrpc/auth_gss/auth_gss_internal.h +++ b/net/sunrpc/auth_gss/auth_gss_internal.h @@ -23,7 +23,7 @@ simple_get_bytes(const void *p, const void *end, void *res, size_t len) } static inline const void * -simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) +simple_get_netobj_noprof(const void *p, const void *end, struct xdr_netobj *dest) { const void *q; unsigned int len; @@ -35,7 +35,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) if (unlikely(q > end || q < p)) return ERR_PTR(-EFAULT); if (len) { - dest->data = kmemdup(p, len, GFP_KERNEL); + dest->data = kmemdup_noprof(p, len, GFP_KERNEL); if (unlikely(dest->data == NULL)) return ERR_PTR(-ENOMEM); } else @@ -43,3 +43,5 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) dest->len = len; return q; } + +#define simple_get_netobj(...) alloc_hooks(simple_get_netobj_noprof(__VA_ARGS__)) -- cgit From 85edc15a4c606094a14c36ebf5bceea7f9a3e395 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 21 Mar 2024 14:24:41 +0000 Subject: mm: remove folio_prep_large_rmappable() Now that prep_compound_page() initialises folio->_deferred_list, folio_prep_large_rmappable()'s only purpose is to set the large_rmappable flag, so inline it into the two callers. Take the opportunity to convert the large_rmappable definition from PAGEFLAG to FOLIO_FLAG and remove the existance of PageTestLargeRmappable and friends. Link: https://lkml.kernel.org/r/20240321142448.1645400-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Acked-by: Vlastimil Babka Cc: Miaohe Lin Cc: Muchun Song Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 3 --- include/linux/page-flags.h | 4 ++-- mm/huge_memory.c | 9 +-------- mm/internal.h | 3 ++- 4 files changed, 5 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index de0c89105076..0e16451adaba 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -263,7 +263,6 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -void folio_prep_large_rmappable(struct folio *folio); bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); @@ -411,8 +410,6 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, return 0; } -static inline void folio_prep_large_rmappable(struct folio *folio) {} - #define transparent_hugepage_flags 0UL #define thp_get_unmapped_area NULL diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4bf1c25fd1dc..6fb3cd42ee59 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -868,9 +868,9 @@ static inline void ClearPageCompound(struct page *page) BUG_ON(!PageHead(page)); ClearPageHead(page); } -PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND) +FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) #else -TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable) +FOLIO_FLAG_FALSE(large_rmappable) #endif #define PG_head_mask ((1UL << PG_head)) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4cc7133aaa4b..16b2c5622fb1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -789,13 +789,6 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio) } #endif -void folio_prep_large_rmappable(struct folio *folio) -{ - if (!folio || !folio_test_large(folio)) - return; - folio_set_large_rmappable(folio); -} - static inline bool is_transparent_hugepage(struct folio *folio) { if (!folio_test_large(folio)) @@ -2862,7 +2855,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail, clear_compound_head(page_tail); if (new_order) { prep_compound_page(page_tail, new_order); - folio_prep_large_rmappable(new_folio); + folio_set_large_rmappable(new_folio); } /* Finally unfreeze refcount. Additional reference from page cache. */ diff --git a/mm/internal.h b/mm/internal.h index 5c0c57c9cd19..ab8250d8a591 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -513,7 +513,8 @@ static inline struct folio *page_rmappable_folio(struct page *page) { struct folio *folio = (struct folio *)page; - folio_prep_large_rmappable(folio); + if (folio && folio_test_large(folio)) + folio_set_large_rmappable(folio); return folio; } -- cgit From 8682a7be36d8c6ebd484753034a716a13f8a1f54 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 21 Mar 2024 14:24:44 +0000 Subject: mm: remove a call to compound_head() from is_page_hwpoison() We can call it only once instead of twice. Link: https://lkml.kernel.org/r/20240321142448.1645400-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: Miaohe Lin Acked-by: Vlastimil Babka Cc: Muchun Song Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6fb3cd42ee59..94eb8a11a321 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1065,11 +1065,14 @@ static inline bool PageHuge(const struct page *page) * best effort only and inherently racy: there is no way to synchronize with * failing hardware. */ -static inline bool is_page_hwpoison(struct page *page) +static inline bool is_page_hwpoison(const struct page *page) { + const struct folio *folio; + if (PageHWPoison(page)) return true; - return PageHuge(page) && PageHWPoison(compound_head(page)); + folio = page_folio(page); + return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); } extern bool is_free_buddy_page(struct page *page); -- cgit From 46df8e73a4a3f1445f2a8429111e72ede1f4d291 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 21 Mar 2024 14:24:45 +0000 Subject: mm: free up PG_slab Reclaim the Slab page flag by using a spare bit in PageType. We are perennially short of page flags for various purposes, and now that the original SLAB allocator has been retired, SLUB does not use the mapcount/page_type field. This lets us remove a number of special cases for ignoring mapcount on Slab pages. [willy@infradead.org: update vmcoreinfo] Link: https://lkml.kernel.org/r/ZgGV-O8WYQ_83kxp@casper.infradead.org Link: https://lkml.kernel.org/r/20240321142448.1645400-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: David Hildenbrand Cc: Miaohe Lin Cc: Muchun Song Cc: Oscar Salvador Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 21 +++++++++++++++++---- include/trace/events/mmflags.h | 2 +- kernel/vmcore_info.c | 3 ++- mm/memory-failure.c | 9 --------- mm/slab.h | 2 +- 5 files changed, 21 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 94eb8a11a321..73e0b17c7728 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -109,7 +109,6 @@ enum pageflags { PG_active, PG_workingset, PG_error, - PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, PG_reserved, @@ -524,7 +523,6 @@ PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) TESTCLEARFLAG(Active, active, PF_HEAD) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) -__PAGEFLAG(Slab, slab, PF_NO_TAIL) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ /* Xen */ @@ -931,7 +929,7 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) #endif /* - * For pages that are never mapped to userspace (and aren't PageSlab), + * For pages that are never mapped to userspace, * page_type may be used. Because it is initialised to -1, we invert the * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and @@ -947,6 +945,7 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) #define PG_table 0x00000200 #define PG_guard 0x00000400 #define PG_hugetlb 0x00000800 +#define PG_slab 0x00001000 #define PageType(page, flag) \ ((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) @@ -1041,6 +1040,20 @@ PAGE_TYPE_OPS(Table, table, pgtable) */ PAGE_TYPE_OPS(Guard, guard, guard) +FOLIO_TYPE_OPS(slab, slab) + +/** + * PageSlab - Determine if the page belongs to the slab allocator + * @page: The page to test. + * + * Context: Any context. + * Return: True for slab pages, false for any other kind of page. + */ +static inline bool PageSlab(const struct page *page) +{ + return folio_test_slab(page_folio(page)); +} + #ifdef CONFIG_HUGETLB_PAGE FOLIO_TYPE_OPS(hugetlb, hugetlb) #else @@ -1121,7 +1134,7 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) (1UL << PG_lru | 1UL << PG_locked | \ 1UL << PG_private | 1UL << PG_private_2 | \ 1UL << PG_writeback | 1UL << PG_reserved | \ - 1UL << PG_slab | 1UL << PG_active | \ + 1UL << PG_active | \ 1UL << PG_unevictable | __PG_MLOCKED | LRU_GEN_MASK) /* diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index d55e53ac91bd..e46d6e82765e 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -107,7 +107,6 @@ DEF_PAGEFLAG_NAME(lru), \ DEF_PAGEFLAG_NAME(active), \ DEF_PAGEFLAG_NAME(workingset), \ - DEF_PAGEFLAG_NAME(slab), \ DEF_PAGEFLAG_NAME(owner_priv_1), \ DEF_PAGEFLAG_NAME(arch_1), \ DEF_PAGEFLAG_NAME(reserved), \ @@ -135,6 +134,7 @@ IF_HAVE_PG_ARCH_X(arch_3) #define DEF_PAGETYPE_NAME(_name) { PG_##_name, __stringify(_name) } #define __def_pagetype_names \ + DEF_PAGETYPE_NAME(slab), \ DEF_PAGETYPE_NAME(hugetlb), \ DEF_PAGETYPE_NAME(offline), \ DEF_PAGETYPE_NAME(guard), \ diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c index 23c125c2e243..1d5eadd9dd61 100644 --- a/kernel/vmcore_info.c +++ b/kernel/vmcore_info.c @@ -198,7 +198,8 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_NUMBER(PG_private); VMCOREINFO_NUMBER(PG_swapcache); VMCOREINFO_NUMBER(PG_swapbacked); - VMCOREINFO_NUMBER(PG_slab); +#define PAGE_SLAB_MAPCOUNT_VALUE (~PG_slab) + VMCOREINFO_NUMBER(PAGE_SLAB_MAPCOUNT_VALUE); #ifdef CONFIG_MEMORY_FAILURE VMCOREINFO_NUMBER(PG_hwpoison); #endif diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9e62a00b46dd..0a7a8a4ba421 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1251,7 +1251,6 @@ static int me_huge_page(struct page_state *ps, struct page *p) #define mlock (1UL << PG_mlocked) #define lru (1UL << PG_lru) #define head (1UL << PG_head) -#define slab (1UL << PG_slab) #define reserved (1UL << PG_reserved) static struct page_state error_states[] = { @@ -1261,13 +1260,6 @@ static struct page_state error_states[] = { * PG_buddy pages only make a small fraction of all free pages. */ - /* - * Could in theory check if slab page is free or if we can drop - * currently unused objects without touching them. But just - * treat it as standard kernel for now. - */ - { slab, slab, MF_MSG_SLAB, me_kernel }, - { head, head, MF_MSG_HUGE, me_huge_page }, { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, @@ -1294,7 +1286,6 @@ static struct page_state error_states[] = { #undef mlock #undef lru #undef head -#undef slab #undef reserved static void update_per_node_mf_stats(unsigned long pfn, diff --git a/mm/slab.h b/mm/slab.h index 65db525e93af..1343bfa12cee 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -84,8 +84,8 @@ struct slab { }; struct rcu_head rcu_head; }; - unsigned int __unused; + unsigned int __page_type; atomic_t __page_refcount; #ifdef CONFIG_SLAB_OBJ_EXT unsigned long obj_exts; -- cgit From eff201b8755bbc95a65165960a6464ca2ae18587 Mon Sep 17 00:00:00 2001 From: Hao Ge Date: Thu, 21 Mar 2024 11:22:56 +0800 Subject: mm/page-flags: make __PageMovable return bool Make __PageMovable() return bool like __folio_test_movable(). Link: https://lkml.kernel.org/r/20240321032256.82063-1-gehao@kylinos.cn Signed-off-by: Hao Ge Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 73e0b17c7728..9c7ca28ac84c 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -707,7 +707,7 @@ static __always_inline bool __folio_test_movable(const struct folio *folio) PAGE_MAPPING_MOVABLE; } -static __always_inline int __PageMovable(const struct page *page) +static __always_inline bool __PageMovable(const struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_MOVABLE; -- cgit From 64c2e895df14c642fbb05e5ac7f55044cd4913a3 Mon Sep 17 00:00:00 2001 From: Hao Ge Date: Thu, 21 Mar 2024 11:07:12 +0800 Subject: mm/page-flags: make PageMappingFlags return bool Make PageMappingFlags() return bool like folio_mapping_flags(). Link: https://lkml.kernel.org/r/20240321030712.80618-1-gehao@kylinos.cn Signed-off-by: Hao Ge Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 9c7ca28ac84c..0163d6da6283 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -686,7 +686,7 @@ static __always_inline bool folio_mapping_flags(const struct folio *folio) return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; } -static __always_inline int PageMappingFlags(const struct page *page) +static __always_inline bool PageMappingFlags(const struct page *page) { return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; } -- cgit From c0cd6f557b9090525d288806cccbc73440ac235a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 20 Mar 2024 14:02:11 -0400 Subject: mm: page_alloc: fix freelist movement during block conversion Currently, page block type conversion during fallbacks, atomic reservations and isolation can strand various amounts of free pages on incorrect freelists. For example, fallback stealing moves free pages in the block to the new type's freelists, but then may not actually claim the block for that type if there aren't enough compatible pages already allocated. In all cases, free page moving might fail if the block straddles more than one zone, in which case no free pages are moved at all, but the block type is changed anyway. This is detrimental to type hygiene on the freelists. It encourages incompatible page mixing down the line (ask for one type, get another) and thus contributes to long-term fragmentation. Split the process into a proper transaction: check first if conversion will happen, then try to move the free pages, and only if that was successful convert the block to the new type. [baolin.wang@linux.alibaba.com: fix allocation failures with CONFIG_CMA] Link: https://lkml.kernel.org/r/a97697e0-45b0-4f71-b087-fdc7a1d43c0e@linux.alibaba.com Link: https://lkml.kernel.org/r/20240320180429.678181-7-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Signed-off-by: Baolin Wang Tested-by: "Huang, Ying" Reviewed-by: Vlastimil Babka Tested-by: Baolin Wang Cc: David Hildenbrand Cc: Mel Gorman Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/page-isolation.h | 3 +- mm/page_alloc.c | 174 +++++++++++++++++++++++++---------------- mm/page_isolation.c | 22 ++++-- 3 files changed, 121 insertions(+), 78 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 4ac34392823a..8550b3c91480 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -34,8 +34,7 @@ static inline bool is_migrate_isolate(int migratetype) #define REPORT_FAILURE 0x2 void set_pageblock_migratetype(struct page *page, int migratetype); -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype, int *num_movable); +int move_freepages_block(struct zone *zone, struct page *page, int migratetype); int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype, int flags, gfp_t gfp_flags); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0e223d5b94fa..a7cfe65e45c1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1601,9 +1601,8 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, * Note that start_page and end_pages are not aligned on a pageblock * boundary. If alignment is required, use move_freepages_block() */ -static int move_freepages(struct zone *zone, - unsigned long start_pfn, unsigned long end_pfn, - int migratetype, int *num_movable) +static int move_freepages(struct zone *zone, unsigned long start_pfn, + unsigned long end_pfn, int migratetype) { struct page *page; unsigned long pfn; @@ -1613,14 +1612,6 @@ static int move_freepages(struct zone *zone, for (pfn = start_pfn; pfn <= end_pfn;) { page = pfn_to_page(pfn); if (!PageBuddy(page)) { - /* - * We assume that pages that could be isolated for - * migration are movable. But we don't actually try - * isolating, as that would be expensive. - */ - if (num_movable && - (PageLRU(page) || __PageMovable(page))) - (*num_movable)++; pfn++; continue; } @@ -1638,17 +1629,16 @@ static int move_freepages(struct zone *zone, return pages_moved; } -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype, int *num_movable) +static bool prep_move_freepages_block(struct zone *zone, struct page *page, + unsigned long *start_pfn, + unsigned long *end_pfn, + int *num_free, int *num_movable) { - unsigned long start_pfn, end_pfn, pfn; - - if (num_movable) - *num_movable = 0; + unsigned long pfn, start, end; pfn = page_to_pfn(page); - start_pfn = pageblock_start_pfn(pfn); - end_pfn = pageblock_end_pfn(pfn) - 1; + start = pageblock_start_pfn(pfn); + end = pageblock_end_pfn(pfn) - 1; /* * The caller only has the lock for @zone, don't touch ranges @@ -1657,13 +1647,50 @@ int move_freepages_block(struct zone *zone, struct page *page, * accompanied by other operations such as migratetype updates * which also should be locked. */ - if (!zone_spans_pfn(zone, start_pfn)) - return 0; - if (!zone_spans_pfn(zone, end_pfn)) - return 0; + if (!zone_spans_pfn(zone, start)) + return false; + if (!zone_spans_pfn(zone, end)) + return false; + + *start_pfn = start; + *end_pfn = end; - return move_freepages(zone, start_pfn, end_pfn, migratetype, - num_movable); + if (num_free) { + *num_free = 0; + *num_movable = 0; + for (pfn = start; pfn <= end;) { + page = pfn_to_page(pfn); + if (PageBuddy(page)) { + int nr = 1 << buddy_order(page); + + *num_free += nr; + pfn += nr; + continue; + } + /* + * We assume that pages that could be isolated for + * migration are movable. But we don't actually try + * isolating, as that would be expensive. + */ + if (PageLRU(page) || __PageMovable(page)) + (*num_movable)++; + pfn++; + } + } + + return true; +} + +int move_freepages_block(struct zone *zone, struct page *page, + int migratetype) +{ + unsigned long start_pfn, end_pfn; + + if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, + NULL, NULL)) + return -1; + + return move_freepages(zone, start_pfn, end_pfn, migratetype); } static void change_pageblock_range(struct page *pageblock_page, @@ -1748,33 +1775,37 @@ static inline bool boost_watermark(struct zone *zone) } /* - * This function implements actual steal behaviour. If order is large enough, - * we can steal whole pageblock. If not, we first move freepages in this - * pageblock to our migratetype and determine how many already-allocated pages - * are there in the pageblock with a compatible migratetype. If at least half - * of pages are free or compatible, we can change migratetype of the pageblock - * itself, so pages freed in the future will be put on the correct free list. + * This function implements actual steal behaviour. If order is large enough, we + * can claim the whole pageblock for the requested migratetype. If not, we check + * the pageblock for constituent pages; if at least half of the pages are free + * or compatible, we can still claim the whole block, so pages freed in the + * future will be put on the correct free list. Otherwise, we isolate exactly + * the order we need from the fallback block and leave its migratetype alone. */ -static void steal_suitable_fallback(struct zone *zone, struct page *page, - unsigned int alloc_flags, int start_type, bool whole_block) +static struct page * +steal_suitable_fallback(struct zone *zone, struct page *page, + int current_order, int order, int start_type, + unsigned int alloc_flags, bool whole_block) { - unsigned int current_order = buddy_order(page); int free_pages, movable_pages, alike_pages; - int old_block_type; + unsigned long start_pfn, end_pfn; + int block_type; - old_block_type = get_pageblock_migratetype(page); + block_type = get_pageblock_migratetype(page); /* * This can happen due to races and we want to prevent broken * highatomic accounting. */ - if (is_migrate_highatomic(old_block_type)) + if (is_migrate_highatomic(block_type)) goto single_page; /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { + del_page_from_free_list(page, zone, current_order); change_pageblock_range(page, current_order, start_type); - goto single_page; + expand(zone, page, order, current_order, start_type); + return page; } /* @@ -1789,10 +1820,9 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, if (!whole_block) goto single_page; - free_pages = move_freepages_block(zone, page, start_type, - &movable_pages); /* moving whole block can fail due to zone boundary conditions */ - if (!free_pages) + if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, + &free_pages, &movable_pages)) goto single_page; /* @@ -1810,7 +1840,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, * vice versa, be conservative since we can't distinguish the * exact migratetype of non-movable pages. */ - if (old_block_type == MIGRATE_MOVABLE) + if (block_type == MIGRATE_MOVABLE) alike_pages = pageblock_nr_pages - (free_pages + movable_pages); else @@ -1821,13 +1851,16 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, * compatible migratability as our allocation, claim the whole block. */ if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || - page_group_by_mobility_disabled) + page_group_by_mobility_disabled) { + move_freepages(zone, start_pfn, end_pfn, start_type); set_pageblock_migratetype(page, start_type); - - return; + return __rmqueue_smallest(zone, order, start_type); + } single_page: - move_to_free_list(page, zone, current_order, start_type); + del_page_from_free_list(page, zone, current_order); + expand(zone, page, order, current_order, block_type); + return page; } /* @@ -1895,9 +1928,10 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) mt = get_pageblock_migratetype(page); /* Only reserve normal pageblocks (i.e., they can merge with others) */ if (migratetype_is_mergeable(mt)) { - zone->nr_reserved_highatomic += pageblock_nr_pages; - set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); - move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); + if (move_freepages_block(zone, page, MIGRATE_HIGHATOMIC) != -1) { + set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); + zone->nr_reserved_highatomic += pageblock_nr_pages; + } } out_unlock: @@ -1922,7 +1956,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, struct zone *zone; struct page *page; int order; - bool ret; + int ret; for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, ac->nodemask) { @@ -1971,10 +2005,14 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * of pageblocks that cannot be completely freed * may increase. */ + ret = move_freepages_block(zone, page, ac->migratetype); + /* + * Reserving this block already succeeded, so this should + * not fail on zone boundaries. + */ + WARN_ON_ONCE(ret == -1); set_pageblock_migratetype(page, ac->migratetype); - ret = move_freepages_block(zone, page, ac->migratetype, - NULL); - if (ret) { + if (ret > 0) { spin_unlock_irqrestore(&zone->lock, flags); return ret; } @@ -1995,7 +2033,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * deviation from the rest of this file, to make the for loop * condition simpler. */ -static __always_inline bool +static __always_inline struct page * __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, unsigned int alloc_flags) { @@ -2042,7 +2080,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype, goto do_steal; } - return false; + return NULL; find_smallest: for (current_order = order; current_order < NR_PAGE_ORDERS; current_order++) { @@ -2062,14 +2100,14 @@ find_smallest: do_steal: page = get_page_from_free_area(area, fallback_mt); - steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, - can_steal); + /* take off list, maybe claim block, expand remainder */ + page = steal_suitable_fallback(zone, page, current_order, order, + start_migratetype, alloc_flags, can_steal); trace_mm_page_alloc_extfrag(page, order, current_order, start_migratetype, fallback_mt); - return true; - + return page; } /* @@ -2096,15 +2134,15 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype, return page; } } -retry: + page = __rmqueue_smallest(zone, order, migratetype); if (unlikely(!page)) { if (alloc_flags & ALLOC_CMA) page = __rmqueue_cma_fallback(zone, order); - if (!page && __rmqueue_fallback(zone, order, migratetype, - alloc_flags)) - goto retry; + if (!page) + page = __rmqueue_fallback(zone, order, migratetype, + alloc_flags); } return page; } @@ -2659,12 +2697,10 @@ int __isolate_free_page(struct page *page, unsigned int order) * Only change normal pageblocks (i.e., they can merge * with others) */ - if (migratetype_is_mergeable(mt)) { - set_pageblock_migratetype(page, - MIGRATE_MOVABLE); - move_freepages_block(zone, page, - MIGRATE_MOVABLE, NULL); - } + if (migratetype_is_mergeable(mt) && + move_freepages_block(zone, page, + MIGRATE_MOVABLE) != -1) + set_pageblock_migratetype(page, MIGRATE_MOVABLE); } } diff --git a/mm/page_isolation.c b/mm/page_isolation.c index a5c8fa4c2a75..71539d7b96cf 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -178,15 +178,18 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end, migratetype, isol_flags); if (!unmovable) { - unsigned long nr_pages; + int nr_pages; int mt = get_pageblock_migratetype(page); + nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); + /* Block spans zone boundaries? */ + if (nr_pages == -1) { + spin_unlock_irqrestore(&zone->lock, flags); + return -EBUSY; + } + __mod_zone_freepage_state(zone, -nr_pages, mt); set_pageblock_migratetype(page, MIGRATE_ISOLATE); zone->nr_isolate_pageblock++; - nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE, - NULL); - - __mod_zone_freepage_state(zone, -nr_pages, mt); spin_unlock_irqrestore(&zone->lock, flags); return 0; } @@ -206,7 +209,7 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ static void unset_migratetype_isolate(struct page *page, int migratetype) { struct zone *zone; - unsigned long flags, nr_pages; + unsigned long flags; bool isolated_page = false; unsigned int order; struct page *buddy; @@ -252,7 +255,12 @@ static void unset_migratetype_isolate(struct page *page, int migratetype) * allocation. */ if (!isolated_page) { - nr_pages = move_freepages_block(zone, page, migratetype, NULL); + int nr_pages = move_freepages_block(zone, page, migratetype); + /* + * Isolating this block already succeeded, so this + * should not fail on zone boundaries. + */ + WARN_ON_ONCE(nr_pages == -1); __mod_zone_freepage_state(zone, nr_pages, migratetype); } set_pageblock_migratetype(page, migratetype); -- cgit From fd919a85cd55be5d00a6a7372071f44c8eafb825 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 20 Mar 2024 14:02:14 -0400 Subject: mm: page_isolation: prepare for hygienic freelists Page isolation currently sets MIGRATE_ISOLATE on a block, then drops zone->lock and scans the block for straddling buddies to split up. Because this happens non-atomically wrt the page allocator, it's possible for allocations to get a buddy whose first block is a regular pcp migratetype but whose tail is isolated. This means that in certain cases memory can still be allocated after isolation. It will also trigger the freelist type hygiene warnings in subsequent patches. start_isolate_page_range() isolate_single_pageblock() set_migratetype_isolate(tail) lock zone->lock move_freepages_block(tail) // nop set_pageblock_migratetype(tail) unlock zone->lock __rmqueue_smallest() del_page_from_freelist(head) expand(head, head_mt) WARN(head_mt != tail_mt) start_pfn = ALIGN_DOWN(MAX_ORDER_NR_PAGES) for (pfn = start_pfn, pfn < end_pfn) if (PageBuddy()) split_free_page(head) Introduce a variant of move_freepages_block() provided by the allocator specifically for page isolation; it moves free pages, converts the block, and handles the splitting of straddling buddies while holding zone->lock. The allocator knows that pageblocks and buddies are always naturally aligned, which means that buddies can only straddle blocks if they're actually >pageblock_order. This means the search-and-split part can be simplified compared to what page isolation used to do. Also tighten up the page isolation code around the expectations of which pages can be large, and how they are freed. Based on extensive discussions with and invaluable input from Zi Yan. [hannes@cmpxchg.org: work around older gcc warning] Link: https://lkml.kernel.org/r/20240321142426.GB777580@cmpxchg.org Link: https://lkml.kernel.org/r/20240320180429.678181-10-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Reviewed-by: Vlastimil Babka Tested-by: Baolin Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Mel Gorman Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/page-isolation.h | 4 +- mm/internal.h | 4 - mm/page_alloc.c | 204 ++++++++++++++++++++++++----------------- mm/page_isolation.c | 106 +++++++-------------- 4 files changed, 155 insertions(+), 163 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 8550b3c91480..c16db0067090 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -34,7 +34,9 @@ static inline bool is_migrate_isolate(int migratetype) #define REPORT_FAILURE 0x2 void set_pageblock_migratetype(struct page *page, int migratetype); -int move_freepages_block(struct zone *zone, struct page *page, int migratetype); + +bool move_freepages_block_isolate(struct zone *zone, struct page *page, + int migratetype); int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype, int flags, gfp_t gfp_flags); diff --git a/mm/internal.h b/mm/internal.h index ab8250d8a591..cbc40f07537c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -562,10 +562,6 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, void memmap_init_range(unsigned long, int, unsigned long, unsigned long, unsigned long, enum meminit_context, struct vmem_altmap *, int); - -int split_free_page(struct page *free_page, - unsigned int order, unsigned long split_pfn_offset); - #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fee52ce8ab2d..fe7853eb3ea0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -833,64 +833,6 @@ done_merging: page_reporting_notify_free(order); } -/** - * split_free_page() -- split a free page at split_pfn_offset - * @free_page: the original free page - * @order: the order of the page - * @split_pfn_offset: split offset within the page - * - * Return -ENOENT if the free page is changed, otherwise 0 - * - * It is used when the free page crosses two pageblocks with different migratetypes - * at split_pfn_offset within the page. The split free page will be put into - * separate migratetype lists afterwards. Otherwise, the function achieves - * nothing. - */ -int split_free_page(struct page *free_page, - unsigned int order, unsigned long split_pfn_offset) -{ - struct zone *zone = page_zone(free_page); - unsigned long free_page_pfn = page_to_pfn(free_page); - unsigned long pfn; - unsigned long flags; - int free_page_order; - int mt; - int ret = 0; - - if (split_pfn_offset == 0) - return ret; - - spin_lock_irqsave(&zone->lock, flags); - - if (!PageBuddy(free_page) || buddy_order(free_page) != order) { - ret = -ENOENT; - goto out; - } - - mt = get_pfnblock_migratetype(free_page, free_page_pfn); - if (likely(!is_migrate_isolate(mt))) - __mod_zone_freepage_state(zone, -(1UL << order), mt); - - del_page_from_free_list(free_page, zone, order); - for (pfn = free_page_pfn; - pfn < free_page_pfn + (1UL << order);) { - int mt = get_pfnblock_migratetype(pfn_to_page(pfn), pfn); - - free_page_order = min_t(unsigned int, - pfn ? __ffs(pfn) : order, - __fls(split_pfn_offset)); - __free_one_page(pfn_to_page(pfn), pfn, zone, free_page_order, - mt, FPI_NONE); - pfn += 1UL << free_page_order; - split_pfn_offset -= (1UL << free_page_order); - /* we have done the first part, now switch to second part */ - if (split_pfn_offset == 0) - split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); - } -out: - spin_unlock_irqrestore(&zone->lock, flags); - return ret; -} /* * A bad page could be due to a number of fields. Instead of multiple branches, * try and check multiple fields with one check. The caller must do a detailed @@ -1674,8 +1616,8 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page, return true; } -int move_freepages_block(struct zone *zone, struct page *page, - int migratetype) +static int move_freepages_block(struct zone *zone, struct page *page, + int migratetype) { unsigned long start_pfn, end_pfn; @@ -1686,6 +1628,123 @@ int move_freepages_block(struct zone *zone, struct page *page, return move_freepages(zone, start_pfn, end_pfn, migratetype); } +#ifdef CONFIG_MEMORY_ISOLATION +/* Look for a buddy that straddles start_pfn */ +static unsigned long find_large_buddy(unsigned long start_pfn) +{ + int order = 0; + struct page *page; + unsigned long pfn = start_pfn; + + while (!PageBuddy(page = pfn_to_page(pfn))) { + /* Nothing found */ + if (++order > MAX_PAGE_ORDER) + return start_pfn; + pfn &= ~0UL << order; + } + + /* + * Found a preceding buddy, but does it straddle? + */ + if (pfn + (1 << buddy_order(page)) > start_pfn) + return pfn; + + /* Nothing found */ + return start_pfn; +} + +/* Split a multi-block free page into its individual pageblocks */ +static void split_large_buddy(struct zone *zone, struct page *page, + unsigned long pfn, int order) +{ + unsigned long end_pfn = pfn + (1 << order); + + VM_WARN_ON_ONCE(order <= pageblock_order); + VM_WARN_ON_ONCE(pfn & (pageblock_nr_pages - 1)); + + /* Caller removed page from freelist, buddy info cleared! */ + VM_WARN_ON_ONCE(PageBuddy(page)); + + while (pfn != end_pfn) { + int mt = get_pfnblock_migratetype(page, pfn); + + __free_one_page(page, pfn, zone, pageblock_order, mt, FPI_NONE); + pfn += pageblock_nr_pages; + page = pfn_to_page(pfn); + } +} + +/** + * move_freepages_block_isolate - move free pages in block for page isolation + * @zone: the zone + * @page: the pageblock page + * @migratetype: migratetype to set on the pageblock + * + * This is similar to move_freepages_block(), but handles the special + * case encountered in page isolation, where the block of interest + * might be part of a larger buddy spanning multiple pageblocks. + * + * Unlike the regular page allocator path, which moves pages while + * stealing buddies off the freelist, page isolation is interested in + * arbitrary pfn ranges that may have overlapping buddies on both ends. + * + * This function handles that. Straddling buddies are split into + * individual pageblocks. Only the block of interest is moved. + * + * Returns %true if pages could be moved, %false otherwise. + */ +bool move_freepages_block_isolate(struct zone *zone, struct page *page, + int migratetype) +{ + unsigned long start_pfn, end_pfn, pfn; + int nr_moved, mt; + + if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, + NULL, NULL)) + return false; + + /* No splits needed if buddies can't span multiple blocks */ + if (pageblock_order == MAX_PAGE_ORDER) + goto move; + + /* We're a tail block in a larger buddy */ + pfn = find_large_buddy(start_pfn); + if (pfn != start_pfn) { + struct page *buddy = pfn_to_page(pfn); + int order = buddy_order(buddy); + int mt = get_pfnblock_migratetype(buddy, pfn); + + if (!is_migrate_isolate(mt)) + __mod_zone_freepage_state(zone, -(1UL << order), mt); + del_page_from_free_list(buddy, zone, order); + set_pageblock_migratetype(page, migratetype); + split_large_buddy(zone, buddy, pfn, order); + return true; + } + + /* We're the starting block of a larger buddy */ + if (PageBuddy(page) && buddy_order(page) > pageblock_order) { + int mt = get_pfnblock_migratetype(page, pfn); + int order = buddy_order(page); + + if (!is_migrate_isolate(mt)) + __mod_zone_freepage_state(zone, -(1UL << order), mt); + del_page_from_free_list(page, zone, order); + set_pageblock_migratetype(page, migratetype); + split_large_buddy(zone, page, pfn, order); + return true; + } +move: + mt = get_pfnblock_migratetype(page, start_pfn); + nr_moved = move_freepages(zone, start_pfn, end_pfn, migratetype); + if (!is_migrate_isolate(mt)) + __mod_zone_freepage_state(zone, -nr_moved, mt); + else if (!is_migrate_isolate(migratetype)) + __mod_zone_freepage_state(zone, nr_moved, migratetype); + return true; +} +#endif /* CONFIG_MEMORY_ISOLATION */ + static void change_pageblock_range(struct page *pageblock_page, int start_order, int migratetype) { @@ -6365,7 +6424,6 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, unsigned migratetype, gfp_t gfp_mask) { unsigned long outer_start, outer_end; - int order; int ret = 0; struct compact_control cc = { @@ -6438,29 +6496,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, * We don't have to hold zone->lock here because the pages are * isolated thus they won't get removed from buddy. */ - - order = 0; - outer_start = start; - while (!PageBuddy(pfn_to_page(outer_start))) { - if (++order > MAX_PAGE_ORDER) { - outer_start = start; - break; - } - outer_start &= ~0UL << order; - } - - if (outer_start != start) { - order = buddy_order(pfn_to_page(outer_start)); - - /* - * outer_start page could be small order buddy page and - * it doesn't include start page. Adjust outer_start - * in this case to report failed page properly - * on tracepoint in test_pages_isolated() - */ - if (outer_start + (1UL << order) <= start) - outer_start = start; - } + outer_start = find_large_buddy(start); /* Make sure the range is really isolated. */ if (test_pages_isolated(outer_start, end, 0)) { diff --git a/mm/page_isolation.c b/mm/page_isolation.c index f84f0981b2df..042937d5abe4 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -178,16 +178,10 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_ unmovable = has_unmovable_pages(check_unmovable_start, check_unmovable_end, migratetype, isol_flags); if (!unmovable) { - int nr_pages; - int mt = get_pageblock_migratetype(page); - - nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); - /* Block spans zone boundaries? */ - if (nr_pages == -1) { + if (!move_freepages_block_isolate(zone, page, MIGRATE_ISOLATE)) { spin_unlock_irqrestore(&zone->lock, flags); return -EBUSY; } - __mod_zone_freepage_state(zone, -nr_pages, mt); zone->nr_isolate_pageblock++; spin_unlock_irqrestore(&zone->lock, flags); return 0; @@ -254,13 +248,11 @@ static void unset_migratetype_isolate(struct page *page, int migratetype) * allocation. */ if (!isolated_page) { - int nr_pages = move_freepages_block(zone, page, migratetype); /* * Isolating this block already succeeded, so this * should not fail on zone boundaries. */ - WARN_ON_ONCE(nr_pages == -1); - __mod_zone_freepage_state(zone, nr_pages, migratetype); + WARN_ON_ONCE(!move_freepages_block_isolate(zone, page, migratetype)); } else { set_pageblock_migratetype(page, migratetype); __putback_isolated_page(page, order, migratetype); @@ -374,26 +366,29 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, VM_BUG_ON(!page); pfn = page_to_pfn(page); - /* - * start_pfn is MAX_ORDER_NR_PAGES aligned, if there is any - * free pages in [start_pfn, boundary_pfn), its head page will - * always be in the range. - */ + if (PageBuddy(page)) { int order = buddy_order(page); - if (pfn + (1UL << order) > boundary_pfn) { - /* free page changed before split, check it again */ - if (split_free_page(page, order, boundary_pfn - pfn)) - continue; - } + /* move_freepages_block_isolate() handled this */ + VM_WARN_ON_ONCE(pfn + (1 << order) > boundary_pfn); pfn += 1UL << order; continue; } + /* - * migrate compound pages then let the free page handling code - * above do the rest. If migration is not possible, just fail. + * If a compound page is straddling our block, attempt + * to migrate it out of the way. + * + * We don't have to worry about this creating a large + * free page that straddles into our block: gigantic + * pages are freed as order-0 chunks, and LRU pages + * (currently) do not exceed pageblock_order. + * + * The block of interest has already been marked + * MIGRATE_ISOLATE above, so when migration is done it + * will free its pages onto the correct freelists. */ if (PageCompound(page)) { struct page *head = compound_head(page); @@ -404,16 +399,10 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, pfn = head_pfn + nr_pages; continue; } + #if defined CONFIG_COMPACTION || defined CONFIG_CMA - /* - * hugetlb, lru compound (THP), and movable compound pages - * can be migrated. Otherwise, fail the isolation. - */ - if (PageHuge(page) || PageLRU(page) || __PageMovable(page)) { - int order; - unsigned long outer_pfn; + if (PageHuge(page)) { int page_mt = get_pageblock_migratetype(page); - bool isolate_page = !is_migrate_isolate_page(page); struct compact_control cc = { .nr_migratepages = 0, .order = -1, @@ -426,56 +415,25 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, }; INIT_LIST_HEAD(&cc.migratepages); - /* - * XXX: mark the page as MIGRATE_ISOLATE so that - * no one else can grab the freed page after migration. - * Ideally, the page should be freed as two separate - * pages to be added into separate migratetype free - * lists. - */ - if (isolate_page) { - ret = set_migratetype_isolate(page, page_mt, - flags, head_pfn, head_pfn + nr_pages); - if (ret) - goto failed; - } - ret = __alloc_contig_migrate_range(&cc, head_pfn, head_pfn + nr_pages, page_mt); - - /* - * restore the page's migratetype so that it can - * be split into separate migratetype free lists - * later. - */ - if (isolate_page) - unset_migratetype_isolate(page, page_mt); - if (ret) goto failed; - /* - * reset pfn to the head of the free page, so - * that the free page handling code above can split - * the free page to the right migratetype list. - * - * head_pfn is not used here as a hugetlb page order - * can be bigger than MAX_PAGE_ORDER, but after it is - * freed, the free page order is not. Use pfn within - * the range to find the head of the free page. - */ - order = 0; - outer_pfn = pfn; - while (!PageBuddy(pfn_to_page(outer_pfn))) { - /* stop if we cannot find the free page */ - if (++order > MAX_PAGE_ORDER) - goto failed; - outer_pfn &= ~0UL << order; - } - pfn = outer_pfn; + pfn = head_pfn + nr_pages; continue; - } else + } + + /* + * These pages are movable too, but they're + * not expected to exceed pageblock_order. + * + * Let us know when they do, so we can add + * proper free and split handling for them. + */ + VM_WARN_ON_ONCE_PAGE(PageLRU(page), page); + VM_WARN_ON_ONCE_PAGE(__PageMovable(page), page); #endif - goto failed; + goto failed; } pfn++; -- cgit From e0932b6c1f942fa747258e152cdce0d0b2b5be5c Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 20 Mar 2024 14:02:15 -0400 Subject: mm: page_alloc: consolidate free page accounting Free page accounting currently happens a bit too high up the call stack, where it has to deal with guard pages, compaction capturing, block stealing and even page isolation. This is subtle and fragile, and makes it difficult to hack on the code. Now that type violations on the freelists have been fixed, push the accounting down to where pages enter and leave the freelist. [hannes@cmpxchg.org: undo unrelated drive-by line wrap] Link: https://lkml.kernel.org/r/20240327185736.GA7597@cmpxchg.org [hannes@cmpxchg.org: remove unused page parameter from account_freepages()] Link: https://lkml.kernel.org/r/20240327185831.GB7597@cmpxchg.org [baolin.wang@linux.alibaba.com: fix free page accounting] Link: https://lkml.kernel.org/r/a2a48baca69f103aa431fd201f8a06e3b95e203d.1712648441.git.baolin.wang@linux.alibaba.com [andriy.shevchenko@linux.intel.com: avoid defining unused function] Link: https://lkml.kernel.org/r/20240423161506.2637177-1-andriy.shevchenko@linux.intel.com Link: https://lkml.kernel.org/r/20240320180429.678181-11-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Signed-off-by: Andy Shevchenko Signed-off-by: Baolin Wang Reviewed-by: Vlastimil Babka Tested-by: Baolin Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Mel Gorman Cc: Zi Yan Signed-off-by: Andrew Morton --- include/linux/mm.h | 18 +++-- include/linux/vmstat.h | 8 --- mm/debug_page_alloc.c | 12 +--- mm/internal.h | 5 -- mm/page_alloc.c | 192 +++++++++++++++++++++++++++---------------------- 5 files changed, 118 insertions(+), 117 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 60eabc6c8e00..1588fe15a38e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3797,24 +3797,22 @@ static inline bool page_is_guard(struct page *page) return PageGuard(page); } -bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, - int migratetype); +bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order); static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) + unsigned int order) { if (!debug_guardpage_enabled()) return false; - return __set_page_guard(zone, page, order, migratetype); + return __set_page_guard(zone, page, order); } -void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, - int migratetype); +void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order); static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) + unsigned int order) { if (!debug_guardpage_enabled()) return; - __clear_page_guard(zone, page, order, migratetype); + __clear_page_guard(zone, page, order); } #else /* CONFIG_DEBUG_PAGEALLOC */ @@ -3824,9 +3822,9 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; } static inline bool debug_guardpage_enabled(void) { return false; } static inline bool page_is_guard(struct page *page) { return false; } static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) { return false; } + unsigned int order) { return false; } static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) {} + unsigned int order) {} #endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 343906a98d6e..735eae6e272c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -487,14 +487,6 @@ static inline void node_stat_sub_folio(struct folio *folio, mod_node_page_state(folio_pgdat(folio), item, -folio_nr_pages(folio)); } -static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages, - int migratetype) -{ - __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); - if (is_migrate_cma(migratetype)) - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); -} - extern const char * const vmstat_text[]; static inline const char *zone_stat_name(enum zone_stat_item item) diff --git a/mm/debug_page_alloc.c b/mm/debug_page_alloc.c index 6755f0c9d4a3..d46acf989dde 100644 --- a/mm/debug_page_alloc.c +++ b/mm/debug_page_alloc.c @@ -32,8 +32,7 @@ static int __init debug_guardpage_minorder_setup(char *buf) } early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); -bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, - int migratetype) +bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order) { if (order >= debug_guardpage_minorder()) return false; @@ -41,19 +40,12 @@ bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, __SetPageGuard(page); INIT_LIST_HEAD(&page->buddy_list); set_page_private(page, order); - /* Guard pages are not available for any usage */ - if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, -(1 << order), migratetype); return true; } -void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, - int migratetype) +void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order) { __ClearPageGuard(page); - set_page_private(page, 0); - if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, (1 << order), migratetype); } diff --git a/mm/internal.h b/mm/internal.h index cbc40f07537c..fb219e31f0f0 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1039,11 +1039,6 @@ static inline bool is_migrate_highatomic(enum migratetype migratetype) return migratetype == MIGRATE_HIGHATOMIC; } -static inline bool is_migrate_highatomic_page(struct page *page) -{ - return get_pageblock_migratetype(page) == MIGRATE_HIGHATOMIC; -} - void setup_zone_pageset(struct zone *zone); struct migration_target_control { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fe7853eb3ea0..b3b98a91d6a7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -643,23 +643,33 @@ compaction_capture(struct capture_control *capc, struct page *page, } #endif /* CONFIG_COMPACTION */ -/* Used for pages not on another list */ -static inline void add_to_free_list(struct page *page, struct zone *zone, - unsigned int order, int migratetype) +static inline void account_freepages(struct zone *zone, int nr_pages, + int migratetype) { - struct free_area *area = &zone->free_area[order]; + if (is_migrate_isolate(migratetype)) + return; - list_add(&page->buddy_list, &area->free_list[migratetype]); - area->nr_free++; + __mod_zone_page_state(zone, NR_FREE_PAGES, nr_pages); + + if (is_migrate_cma(migratetype)) + __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, nr_pages); } /* Used for pages not on another list */ -static inline void add_to_free_list_tail(struct page *page, struct zone *zone, - unsigned int order, int migratetype) +static inline void __add_to_free_list(struct page *page, struct zone *zone, + unsigned int order, int migratetype, + bool tail) { struct free_area *area = &zone->free_area[order]; - list_add_tail(&page->buddy_list, &area->free_list[migratetype]); + VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, + "page type is %lu, passed migratetype is %d (nr=%d)\n", + get_pageblock_migratetype(page), migratetype, 1 << order); + + if (tail) + list_add_tail(&page->buddy_list, &area->free_list[migratetype]); + else + list_add(&page->buddy_list, &area->free_list[migratetype]); area->nr_free++; } @@ -669,16 +679,28 @@ static inline void add_to_free_list_tail(struct page *page, struct zone *zone, * allocation again (e.g., optimization for memory onlining). */ static inline void move_to_free_list(struct page *page, struct zone *zone, - unsigned int order, int migratetype) + unsigned int order, int old_mt, int new_mt) { struct free_area *area = &zone->free_area[order]; - list_move_tail(&page->buddy_list, &area->free_list[migratetype]); + /* Free page moving can fail, so it happens before the type update */ + VM_WARN_ONCE(get_pageblock_migratetype(page) != old_mt, + "page type is %lu, passed migratetype is %d (nr=%d)\n", + get_pageblock_migratetype(page), old_mt, 1 << order); + + list_move_tail(&page->buddy_list, &area->free_list[new_mt]); + + account_freepages(zone, -(1 << order), old_mt); + account_freepages(zone, 1 << order, new_mt); } -static inline void del_page_from_free_list(struct page *page, struct zone *zone, - unsigned int order) +static inline void __del_page_from_free_list(struct page *page, struct zone *zone, + unsigned int order, int migratetype) { + VM_WARN_ONCE(get_pageblock_migratetype(page) != migratetype, + "page type is %lu, passed migratetype is %d (nr=%d)\n", + get_pageblock_migratetype(page), migratetype, 1 << order); + /* clear reported state and update reported page count */ if (page_reported(page)) __ClearPageReported(page); @@ -689,6 +711,13 @@ static inline void del_page_from_free_list(struct page *page, struct zone *zone, zone->free_area[order].nr_free--; } +static inline void del_page_from_free_list(struct page *page, struct zone *zone, + unsigned int order, int migratetype) +{ + __del_page_from_free_list(page, zone, order, migratetype); + account_freepages(zone, -(1 << order), migratetype); +} + static inline struct page *get_page_from_free_area(struct free_area *area, int migratetype) { @@ -760,16 +789,16 @@ static inline void __free_one_page(struct page *page, VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); VM_BUG_ON(migratetype == -1); - if (likely(!is_migrate_isolate(migratetype))) - __mod_zone_freepage_state(zone, 1 << order, migratetype); - VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); VM_BUG_ON_PAGE(bad_range(zone, page), page); + account_freepages(zone, 1 << order, migratetype); + while (order < MAX_PAGE_ORDER) { + int buddy_mt = migratetype; + if (compaction_capture(capc, page, order, migratetype)) { - __mod_zone_freepage_state(zone, -(1 << order), - migratetype); + account_freepages(zone, -(1 << order), migratetype); return; } @@ -784,19 +813,12 @@ static inline void __free_one_page(struct page *page, * pageblock isolation could cause incorrect freepage or CMA * accounting or HIGHATOMIC accounting. */ - int buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); + buddy_mt = get_pfnblock_migratetype(buddy, buddy_pfn); - if (migratetype != buddy_mt) { - if (!migratetype_is_mergeable(migratetype) || - !migratetype_is_mergeable(buddy_mt)) - goto done_merging; - /* - * Match buddy type. This ensures that - * an expand() down the line puts the - * sub-blocks on the right freelists. - */ - set_pageblock_migratetype(buddy, migratetype); - } + if (migratetype != buddy_mt && + (!migratetype_is_mergeable(migratetype) || + !migratetype_is_mergeable(buddy_mt))) + goto done_merging; } /* @@ -804,9 +826,19 @@ static inline void __free_one_page(struct page *page, * merge with it and move up one order. */ if (page_is_guard(buddy)) - clear_page_guard(zone, buddy, order, migratetype); + clear_page_guard(zone, buddy, order); else - del_page_from_free_list(buddy, zone, order); + __del_page_from_free_list(buddy, zone, order, buddy_mt); + + if (unlikely(buddy_mt != migratetype)) { + /* + * Match buddy type. This ensures that an + * expand() down the line puts the sub-blocks + * on the right freelists. + */ + set_pageblock_migratetype(buddy, migratetype); + } + combined_pfn = buddy_pfn & pfn; page = page + (combined_pfn - pfn); pfn = combined_pfn; @@ -823,10 +855,7 @@ done_merging: else to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); - if (to_tail) - add_to_free_list_tail(page, zone, order, migratetype); - else - add_to_free_list(page, zone, order, migratetype); + __add_to_free_list(page, zone, order, migratetype, to_tail); /* Notify page reporting subsystem of freed page */ if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY)) @@ -1318,10 +1347,10 @@ static inline void expand(struct zone *zone, struct page *page, * Corresponding page table entries will not be touched, * pages will stay not present in virtual address space */ - if (set_page_guard(zone, &page[size], high, migratetype)) + if (set_page_guard(zone, &page[size], high)) continue; - add_to_free_list(&page[size], zone, high, migratetype); + add_to_free_list(&page[size], zone, high, migratetype, false); set_buddy_order(&page[size], high); } } @@ -1492,7 +1521,7 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, page = get_page_from_free_area(area, migratetype); if (!page) continue; - del_page_from_free_list(page, zone, current_order); + del_page_from_free_list(page, zone, current_order, migratetype); expand(zone, page, order, current_order, migratetype); trace_mm_page_alloc_zone_locked(page, order, migratetype, pcp_allowed_order(order) && @@ -1532,7 +1561,7 @@ static inline struct page *__rmqueue_cma_fallback(struct zone *zone, * type's freelist. */ static int move_freepages(struct zone *zone, unsigned long start_pfn, - unsigned long end_pfn, int migratetype) + unsigned long end_pfn, int old_mt, int new_mt) { struct page *page; unsigned long pfn; @@ -1554,12 +1583,14 @@ static int move_freepages(struct zone *zone, unsigned long start_pfn, VM_BUG_ON_PAGE(page_zone(page) != zone, page); order = buddy_order(page); - move_to_free_list(page, zone, order, migratetype); + + move_to_free_list(page, zone, order, old_mt, new_mt); + pfn += 1 << order; pages_moved += 1 << order; } - set_pageblock_migratetype(pfn_to_page(start_pfn), migratetype); + set_pageblock_migratetype(pfn_to_page(start_pfn), new_mt); return pages_moved; } @@ -1617,7 +1648,7 @@ static bool prep_move_freepages_block(struct zone *zone, struct page *page, } static int move_freepages_block(struct zone *zone, struct page *page, - int migratetype) + int old_mt, int new_mt) { unsigned long start_pfn, end_pfn; @@ -1625,7 +1656,7 @@ static int move_freepages_block(struct zone *zone, struct page *page, NULL, NULL)) return -1; - return move_freepages(zone, start_pfn, end_pfn, migratetype); + return move_freepages(zone, start_pfn, end_pfn, old_mt, new_mt); } #ifdef CONFIG_MEMORY_ISOLATION @@ -1697,7 +1728,6 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page, int migratetype) { unsigned long start_pfn, end_pfn, pfn; - int nr_moved, mt; if (!prep_move_freepages_block(zone, page, &start_pfn, &end_pfn, NULL, NULL)) @@ -1712,11 +1742,9 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page, if (pfn != start_pfn) { struct page *buddy = pfn_to_page(pfn); int order = buddy_order(buddy); - int mt = get_pfnblock_migratetype(buddy, pfn); - if (!is_migrate_isolate(mt)) - __mod_zone_freepage_state(zone, -(1UL << order), mt); - del_page_from_free_list(buddy, zone, order); + del_page_from_free_list(buddy, zone, order, + get_pfnblock_migratetype(buddy, pfn)); set_pageblock_migratetype(page, migratetype); split_large_buddy(zone, buddy, pfn, order); return true; @@ -1724,23 +1752,17 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page, /* We're the starting block of a larger buddy */ if (PageBuddy(page) && buddy_order(page) > pageblock_order) { - int mt = get_pfnblock_migratetype(page, pfn); int order = buddy_order(page); - if (!is_migrate_isolate(mt)) - __mod_zone_freepage_state(zone, -(1UL << order), mt); - del_page_from_free_list(page, zone, order); + del_page_from_free_list(page, zone, order, + get_pfnblock_migratetype(page, pfn)); set_pageblock_migratetype(page, migratetype); split_large_buddy(zone, page, pfn, order); return true; } move: - mt = get_pfnblock_migratetype(page, start_pfn); - nr_moved = move_freepages(zone, start_pfn, end_pfn, migratetype); - if (!is_migrate_isolate(mt)) - __mod_zone_freepage_state(zone, -nr_moved, mt); - else if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, nr_moved, migratetype); + move_freepages(zone, start_pfn, end_pfn, + get_pfnblock_migratetype(page, start_pfn), migratetype); return true; } #endif /* CONFIG_MEMORY_ISOLATION */ @@ -1854,7 +1876,7 @@ steal_suitable_fallback(struct zone *zone, struct page *page, /* Take ownership for orders >= pageblock_order */ if (current_order >= pageblock_order) { - del_page_from_free_list(page, zone, current_order); + del_page_from_free_list(page, zone, current_order, block_type); change_pageblock_range(page, current_order, start_type); expand(zone, page, order, current_order, start_type); return page; @@ -1904,12 +1926,12 @@ steal_suitable_fallback(struct zone *zone, struct page *page, */ if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || page_group_by_mobility_disabled) { - move_freepages(zone, start_pfn, end_pfn, start_type); + move_freepages(zone, start_pfn, end_pfn, block_type, start_type); return __rmqueue_smallest(zone, order, start_type); } single_page: - del_page_from_free_list(page, zone, current_order); + del_page_from_free_list(page, zone, current_order, block_type); expand(zone, page, order, current_order, block_type); return page; } @@ -1979,7 +2001,7 @@ static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) mt = get_pageblock_migratetype(page); /* Only reserve normal pageblocks (i.e., they can merge with others) */ if (migratetype_is_mergeable(mt)) - if (move_freepages_block(zone, page, + if (move_freepages_block(zone, page, mt, MIGRATE_HIGHATOMIC) != -1) zone->nr_reserved_highatomic += pageblock_nr_pages; @@ -2020,11 +2042,13 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < NR_PAGE_ORDERS; order++) { struct free_area *area = &(zone->free_area[order]); + int mt; page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); if (!page) continue; + mt = get_pageblock_migratetype(page); /* * In page freeing path, migratetype change is racy so * we can counter several free pages in a pageblock @@ -2032,7 +2056,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * from highatomic to ac->migratetype. So we should * adjust the count once. */ - if (is_migrate_highatomic_page(page)) { + if (is_migrate_highatomic(mt)) { /* * It should never happen but changes to * locking could inadvertently allow a per-cpu @@ -2054,7 +2078,8 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac, * of pageblocks that cannot be completely freed * may increase. */ - ret = move_freepages_block(zone, page, ac->migratetype); + ret = move_freepages_block(zone, page, mt, + ac->migratetype); /* * Reserving this block already succeeded, so this should * not fail on zone boundaries. @@ -2225,12 +2250,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, * pages are ordered properly. */ list_add_tail(&page->pcp_list, list); - if (is_migrate_cma(get_pageblock_migratetype(page))) - __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, - -(1 << order)); } - - __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); spin_unlock_irqrestore(&zone->lock, flags); return i; @@ -2723,11 +2743,9 @@ int __isolate_free_page(struct page *page, unsigned int order) watermark = zone->_watermark[WMARK_MIN] + (1UL << order); if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA)) return 0; - - __mod_zone_freepage_state(zone, -(1UL << order), mt); } - del_page_from_free_list(page, zone, order); + del_page_from_free_list(page, zone, order, mt); /* * Set the pageblock if the isolated page is at least half of a @@ -2742,7 +2760,7 @@ int __isolate_free_page(struct page *page, unsigned int order) * with others) */ if (migratetype_is_mergeable(mt)) - move_freepages_block(zone, page, + move_freepages_block(zone, page, mt, MIGRATE_MOVABLE); } } @@ -2827,8 +2845,6 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, return NULL; } } - __mod_zone_freepage_state(zone, -(1 << order), - get_pageblock_migratetype(page)); spin_unlock_irqrestore(&zone->lock, flags); } while (check_new_pages(page, order)); @@ -6716,8 +6732,9 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); + VM_WARN_ON(get_pageblock_migratetype(page) != MIGRATE_ISOLATE); order = buddy_order(page); - del_page_from_free_list(page, zone, order); + del_page_from_free_list(page, zone, order, MIGRATE_ISOLATE); pfn += (1 << order); } spin_unlock_irqrestore(&zone->lock, flags); @@ -6745,6 +6762,14 @@ bool is_free_buddy_page(struct page *page) EXPORT_SYMBOL(is_free_buddy_page); #ifdef CONFIG_MEMORY_FAILURE +static inline void add_to_free_list(struct page *page, struct zone *zone, + unsigned int order, int migratetype, + bool tail) +{ + __add_to_free_list(page, zone, order, migratetype, tail); + account_freepages(zone, 1 << order, migratetype); +} + /* * Break down a higher-order page in sub-pages, and keep our target out of * buddy allocator. @@ -6767,10 +6792,10 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page, current_buddy = page + size; } - if (set_page_guard(zone, current_buddy, high, migratetype)) + if (set_page_guard(zone, current_buddy, high)) continue; - add_to_free_list(current_buddy, zone, high, migratetype); + add_to_free_list(current_buddy, zone, high, migratetype, false); set_buddy_order(current_buddy, high); } } @@ -6796,12 +6821,11 @@ bool take_page_off_buddy(struct page *page) int migratetype = get_pfnblock_migratetype(page_head, pfn_head); - del_page_from_free_list(page_head, zone, page_order); + del_page_from_free_list(page_head, zone, page_order, + migratetype); break_down_buddy_pages(zone, page_head, page, 0, page_order, migratetype); SetPageHWPoisonTakenOff(page); - if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, -1, migratetype); ret = true; break; } @@ -6909,7 +6933,7 @@ static bool try_to_accept_memory_one(struct zone *zone) list_del(&page->lru); last = list_empty(&zone->unaccepted_pages); - __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); + account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); spin_unlock_irqrestore(&zone->lock, flags); @@ -6961,7 +6985,7 @@ static bool __free_unaccepted(struct page *page) spin_lock_irqsave(&zone->lock, flags); first = list_empty(&zone->unaccepted_pages); list_add_tail(&page->lru, &zone->unaccepted_pages); - __mod_zone_freepage_state(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); + account_freepages(zone, MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); __mod_zone_page_state(zone, NR_UNACCEPTED, MAX_ORDER_NR_PAGES); spin_unlock_irqrestore(&zone->lock, flags); -- cgit From 42d0c3fbb5811fbfb663d8ede1d7ffba02e7ae18 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 6 Mar 2024 18:13:27 +0800 Subject: mm: hugetlb: make the hugetlb migration strategy consistent As discussed in previous thread [1], there is an inconsistency when handing hugetlb migration. When handling the migration of freed hugetlb, it prevents fallback to other NUMA nodes in alloc_and_dissolve_hugetlb_folio(). However, when dealing with in-use hugetlb, it allows fallback to other NUMA nodes in alloc_hugetlb_folio_nodemask(), which can break the per-node hugetlb pool and might result in unexpected failures when node bound workloads doesn't get what is asssumed available. To make hugetlb migration strategy more clear, we should list all the scenarios of hugetlb migration and analyze whether allocation fallback is permitted: 1) Memory offline: will call dissolve_free_huge_pages() to free the freed hugetlb, and call do_migrate_range() to migrate the in-use hugetlb. Both can break the per-node hugetlb pool, but as this is an explicit offlining operation, no better choice. So should allow the hugetlb allocation fallback. 2) Memory failure: same as memory offline. Should allow fallback to a different node might be the only option to handle it, otherwise the impact of poisoned memory can be amplified. 3) Longterm pinning: will call migrate_longterm_unpinnable_pages() to migrate in-use and not-longterm-pinnable hugetlb, which can break the per-node pool. But we should fail to longterm pinning if can not allocate on current node to avoid breaking the per-node pool. 4) Syscalls (mbind, migrate_pages, move_pages): these are explicit users operation to move pages to other nodes, so fallback to other nodes should not be prohibited. 5) alloc_contig_range: used by CMA allocation and virtio-mem fake-offline to allocate given range of pages. Now the freed hugetlb migration is not allowed to fallback, to keep consistency, the in-use hugetlb migration should be also not allowed to fallback. 6) alloc_contig_pages: used by kfence, pgtable_debug etc. The strategy should be consistent with that of alloc_contig_range(). Based on the analysis of the various scenarios above, introducing a new helper to determine whether fallback is permitted according to the migration reason.. [1] https://lore.kernel.org/all/6f26ce22d2fcd523418a085f2c588fe0776d46e7.1706794035.git.baolin.wang@linux.alibaba.com/ Link: https://lkml.kernel.org/r/3519fcd41522817307a05b40fb551e2e17e68101.1709719720.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: David Hildenbrand Cc: Miaohe Lin Cc: Michal Hocko Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 35 +++++++++++++++++++++++++++++++++-- mm/hugetlb.c | 14 ++++++++++++-- mm/mempolicy.c | 3 ++- mm/migrate.c | 3 ++- 4 files changed, 49 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 300de33c6fde..d748628efc5e 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -723,7 +723,8 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask); + nodemask_t *nmask, gfp_t gfp_mask, + bool allow_alloc_fallback); int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, pgoff_t idx); void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, @@ -946,6 +947,30 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) return modified_mask; } +static inline bool htlb_allow_alloc_fallback(int reason) +{ + bool allowed_fallback = false; + + /* + * Note: the memory offline, memory failure and migration syscalls will + * be allowed to fallback to other nodes due to lack of a better chioce, + * that might break the per-node hugetlb pool. While other cases will + * set the __GFP_THISNODE to avoid breaking the per-node hugetlb pool. + */ + switch (reason) { + case MR_MEMORY_HOTPLUG: + case MR_MEMORY_FAILURE: + case MR_SYSCALL: + case MR_MEMPOLICY_MBIND: + allowed_fallback = true; + break; + default: + break; + } + + return allowed_fallback; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { @@ -1041,7 +1066,8 @@ static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, static inline struct folio * alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask) + nodemask_t *nmask, gfp_t gfp_mask, + bool allow_alloc_fallback) { return NULL; } @@ -1157,6 +1183,11 @@ static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) return 0; } +static inline bool htlb_allow_alloc_fallback(int reason) +{ + return false; +} + static inline spinlock_t *huge_pte_lockptr(struct hstate *h, struct mm_struct *mm, pte_t *pte) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e4dd0c802489..2e984554ec63 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2598,7 +2598,7 @@ struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h, /* folio migration callback function */ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, - nodemask_t *nmask, gfp_t gfp_mask) + nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback) { spin_lock_irq(&hugetlb_lock); if (available_huge_pages(h)) { @@ -2613,6 +2613,10 @@ struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, } spin_unlock_irq(&hugetlb_lock); + /* We cannot fallback to other nodes, as we could break the per-node pool. */ + if (!allow_alloc_fallback) + gfp_mask |= __GFP_THISNODE; + return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask); } @@ -6636,7 +6640,13 @@ static struct folio *alloc_hugetlb_folio_vma(struct hstate *h, gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); - folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask); + /* + * This is used to allocate a temporary hugetlb to hold the copied + * content, which will then be copied again to the final hugetlb + * consuming a reservation. Set the alloc_fallback to false to indicate + * that breaking the per-node hugetlb pool is not allowed in this case. + */ + folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false); mpol_cond_put(mpol); return folio; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e128e6b7bbcb..72626b2cefa9 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1228,7 +1228,8 @@ static struct folio *alloc_migration_target_by_mpol(struct folio *src, h = folio_hstate(src); gfp = htlb_alloc_mask(h); nodemask = policy_nodemask(gfp, pol, ilx, &nid); - return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp); + return alloc_hugetlb_folio_nodemask(h, nid, nodemask, gfp, + htlb_allow_alloc_fallback(MR_MEMPOLICY_MBIND)); } if (folio_test_large(src)) diff --git a/mm/migrate.c b/mm/migrate.c index bde63010a3cf..ab9856f5931b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2022,7 +2022,8 @@ struct folio *alloc_migration_target(struct folio *src, unsigned long private) gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); return alloc_hugetlb_folio_nodemask(h, nid, - mtc->nmask, gfp_mask); + mtc->nmask, gfp_mask, + htlb_allow_alloc_fallback(mtc->reason)); } if (folio_test_large(src)) { -- cgit From f238b8c33c6738f146bbfbb09b78870ea157c2b7 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Sat, 23 Mar 2024 00:41:36 +1300 Subject: arm64: mm: swap: support THP_SWAP on hardware with MTE Commit d0637c505f8a1 ("arm64: enable THP_SWAP for arm64") brings up THP_SWAP on ARM64, but it doesn't enable THP_SWP on hardware with MTE as the MTE code works with the assumption tags save/restore is always handling a folio with only one page. The limitation should be removed as more and more ARM64 SoCs have this feature. Co-existence of MTE and THP_SWAP becomes more and more important. This patch makes MTE tags saving support large folios, then we don't need to split large folios into base pages for swapping out on ARM64 SoCs with MTE any more. arch_prepare_to_swap() should take folio rather than page as parameter because we support THP swap-out as a whole. It saves tags for all pages in a large folio. As now we are restoring tags based-on folio, in arch_swap_restore(), we may increase some extra loops and early-exitings while refaulting a large folio which is still in swapcache in do_swap_page(). In case a large folio has nr pages, do_swap_page() will only set the PTE of the particular page which is causing the page fault. Thus do_swap_page() runs nr times, and each time, arch_swap_restore() will loop nr times for those subpages in the folio. So right now the algorithmic complexity becomes O(nr^2). Once we support mapping large folios in do_swap_page(), extra loops and early-exitings will decrease while not being completely removed as a large folio might get partially tagged in corner cases such as, 1. a large folio in swapcache can be partially unmapped, thus, MTE tags for the unmapped pages will be invalidated; 2. users might use mprotect() to set MTEs on a part of a large folio. arch_thp_swp_supported() is dropped since ARM64 MTE was the only one who needed it. Link: https://lkml.kernel.org/r/20240322114136.61386-2-21cnbao@gmail.com Signed-off-by: Barry Song Reviewed-by: Steven Price Acked-by: Chris Li Reviewed-by: Ryan Roberts Cc: Catalin Marinas Cc: Will Deacon Cc: Mark Rutland Cc: David Hildenbrand Cc: Kemeng Shi Cc: "Matthew Wilcox (Oracle)" Cc: Anshuman Khandual Cc: Peter Collingbourne Cc: Yosry Ahmed Cc: Peter Xu Cc: Lorenzo Stoakes Cc: "Mike Rapoport (IBM)" Cc: Hugh Dickins Cc: "Aneesh Kumar K.V" Cc: Rick Edgecombe Signed-off-by: Andrew Morton --- arch/arm64/include/asm/pgtable.h | 19 ++--------------- arch/arm64/mm/mteswap.c | 45 ++++++++++++++++++++++++++++++++++++++++ include/linux/huge_mm.h | 12 ----------- include/linux/pgtable.h | 2 +- mm/internal.h | 14 +++++++++++++ mm/memory.c | 2 +- mm/page_io.c | 2 +- mm/shmem.c | 2 +- mm/swap_slots.c | 2 +- mm/swapfile.c | 2 +- 10 files changed, 67 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 6870b60158fc..9fd8613b2db2 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -49,12 +49,6 @@ __flush_tlb_range(vma, addr, end, PUD_SIZE, false, 1) #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ -static inline bool arch_thp_swp_supported(void) -{ - return !system_supports_mte(); -} -#define arch_thp_swp_supported arch_thp_swp_supported - /* * Outside of a few very special situations (e.g. hibernation), we always * use broadcast TLB invalidation instructions, therefore a spurious page @@ -1282,12 +1276,7 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, #ifdef CONFIG_ARM64_MTE #define __HAVE_ARCH_PREPARE_TO_SWAP -static inline int arch_prepare_to_swap(struct page *page) -{ - if (system_supports_mte()) - return mte_save_tags(page); - return 0; -} +extern int arch_prepare_to_swap(struct folio *folio); #define __HAVE_ARCH_SWAP_INVALIDATE static inline void arch_swap_invalidate_page(int type, pgoff_t offset) @@ -1303,11 +1292,7 @@ static inline void arch_swap_invalidate_area(int type) } #define __HAVE_ARCH_SWAP_RESTORE -static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) -{ - if (system_supports_mte()) - mte_restore_tags(entry, &folio->page); -} +extern void arch_swap_restore(swp_entry_t entry, struct folio *folio); #endif /* CONFIG_ARM64_MTE */ diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c index a31833e3ddc5..63e8d72f202a 100644 --- a/arch/arm64/mm/mteswap.c +++ b/arch/arm64/mm/mteswap.c @@ -68,6 +68,13 @@ void mte_invalidate_tags(int type, pgoff_t offset) mte_free_tag_storage(tags); } +static inline void __mte_invalidate_tags(struct page *page) +{ + swp_entry_t entry = page_swap_entry(page); + + mte_invalidate_tags(swp_type(entry), swp_offset(entry)); +} + void mte_invalidate_tags_area(int type) { swp_entry_t entry = swp_entry(type, 0); @@ -83,3 +90,41 @@ void mte_invalidate_tags_area(int type) } xa_unlock(&mte_pages); } + +int arch_prepare_to_swap(struct folio *folio) +{ + long i, nr; + int err; + + if (!system_supports_mte()) + return 0; + + nr = folio_nr_pages(folio); + + for (i = 0; i < nr; i++) { + err = mte_save_tags(folio_page(folio, i)); + if (err) + goto out; + } + return 0; + +out: + while (i--) + __mte_invalidate_tags(folio_page(folio, i)); + return err; +} + +void arch_swap_restore(swp_entry_t entry, struct folio *folio) +{ + long i, nr; + + if (!system_supports_mte()) + return; + + nr = folio_nr_pages(folio); + + for (i = 0; i < nr; i++) { + mte_restore_tags(entry, folio_page(folio, i)); + entry.val++; + } +} diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 0e16451adaba..7576025db55d 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -532,16 +532,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order) #define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0) #define split_folio(f) split_folio_to_order(f, 0) -/* - * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to - * limitations in the implementation like arm64 MTE can override this to - * false - */ -#ifndef arch_thp_swp_supported -static inline bool arch_thp_swp_supported(void) -{ - return true; -} -#endif - #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 2a1c044ae467..600e17d03659 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1050,7 +1050,7 @@ static inline int arch_unmap_one(struct mm_struct *mm, * prototypes must be defined in the arch-specific asm/pgtable.h file. */ #ifndef __HAVE_ARCH_PREPARE_TO_SWAP -static inline int arch_prepare_to_swap(struct page *page) +static inline int arch_prepare_to_swap(struct folio *folio) { return 0; } diff --git a/mm/internal.h b/mm/internal.h index 63bdac6d0413..6c8d3844b6a3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -76,6 +76,20 @@ static inline int folio_nr_pages_mapped(struct folio *folio) return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; } +/* + * Retrieve the first entry of a folio based on a provided entry within the + * folio. We cannot rely on folio->swap as there is no guarantee that it has + * been initialized. Used for calling arch_swap_restore() + */ +static inline swp_entry_t folio_swap(swp_entry_t entry, struct folio *folio) +{ + swp_entry_t swap = { + .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), + }; + + return swap; +} + static inline void *folio_raw_mapping(struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; diff --git a/mm/memory.c b/mm/memory.c index c859a09b4f72..805cebb6fd72 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4190,7 +4190,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ - arch_swap_restore(entry, folio); + arch_swap_restore(folio_swap(entry, folio), folio); /* * Remove the swap entry and conditionally try to free up the swapcache. diff --git a/mm/page_io.c b/mm/page_io.c index ae2b49055e43..a9a7c236aecc 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -189,7 +189,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) * Arch code may have to preserve more data than just the page * contents, e.g. memory tags. */ - ret = arch_prepare_to_swap(&folio->page); + ret = arch_prepare_to_swap(folio); if (ret) { folio_mark_dirty(folio); folio_unlock(folio); diff --git a/mm/shmem.c b/mm/shmem.c index 94ab99b6b574..98985179f495 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1907,7 +1907,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, * Some architectures may have to restore extra metadata to the * folio after reading from swap. */ - arch_swap_restore(swap, folio); + arch_swap_restore(folio_swap(swap, folio), folio); if (shmem_should_replace_folio(folio, gfp)) { error = shmem_replace_folio(&folio, gfp, info, index); diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 90973ce7881d..53abeaf1371d 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -310,7 +310,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio) entry.val = 0; if (folio_test_large(folio)) { - if (IS_ENABLED(CONFIG_THP_SWAP) && arch_thp_swp_supported()) + if (IS_ENABLED(CONFIG_THP_SWAP)) get_swap_pages(1, &entry, folio_nr_pages(folio)); goto out; } diff --git a/mm/swapfile.c b/mm/swapfile.c index 4919423cce76..5e6d2304a2a4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1806,7 +1806,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, * when reading from swap. This metadata may be indexed by swap entry * so this must be called before swap_free(). */ - arch_swap_restore(entry, folio); + arch_swap_restore(folio_swap(entry, folio), folio); dec_mm_counter(vma->vm_mm, MM_SWAPENTS); inc_mm_counter(vma->vm_mm, MM_ANONPAGES); -- cgit From ebb34f78d72c2320620ba6d55cb22a52949047a1 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 27 Feb 2024 21:15:48 +0100 Subject: mm: convert folio_estimated_sharers() to folio_likely_mapped_shared() Callers of folio_estimated_sharers() only care about "mapped shared vs. mapped exclusively", not the exact estimate of sharers. Let's consolidate and unify the condition users are checking. While at it clarify the semantics and extend the discussion on the fuzziness. Use the "likely mapped shared" terminology to better express what the (adjusted) function actually checks. Whether a partially-mappable folio is more likely to not be partially mapped than partially mapped is debatable. In the future, we might be able to improve our estimate for partially-mappable folios, though. Note that we will now consistently detect "mapped shared" only if the first subpage is actually mapped multiple times. When the first subpage is not mapped, we will consistently detect it as "mapped exclusively". This change should currently only affect the usage in madvise_free_pte_range() and queue_folios_pte_range() for large folios: if the first page was already unmapped, we would have skipped the folio. [david@redhat.com: folio_likely_mapped_shared() kerneldoc fixup] Link: https://lkml.kernel.org/r/dd0ad9f2-2d7a-45f3-9ba3-979488c7dd27@redhat.com Link: https://lkml.kernel.org/r/20240227201548.857831-1-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Khalid Aziz Acked-by: Barry Song Reviewed-by: Vishal Moola (Oracle) Reviewed-by: Ryan Roberts Reviewed-by: Zi Yan Signed-off-by: Andrew Morton --- include/linux/mm.h | 48 ++++++++++++++++++++++++++++++++++++++---------- mm/huge_memory.c | 2 +- mm/madvise.c | 6 +++--- mm/memory.c | 2 +- mm/mempolicy.c | 14 ++++++-------- mm/migrate.c | 8 ++++---- 6 files changed, 53 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1588fe15a38e..fe66b515aaab 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2165,21 +2165,49 @@ static inline size_t folio_size(struct folio *folio) } /** - * folio_estimated_sharers - Estimate the number of sharers of a folio. + * folio_likely_mapped_shared - Estimate if the folio is mapped into the page + * tables of more than one MM * @folio: The folio. * - * folio_estimated_sharers() aims to serve as a function to efficiently - * estimate the number of processes sharing a folio. This is done by - * looking at the precise mapcount of the first subpage in the folio, and - * assuming the other subpages are the same. This may not be true for large - * folios. If you want exact mapcounts for exact calculations, look at - * page_mapcount() or folio_total_mapcount(). + * This function checks if the folio is currently mapped into more than one + * MM ("mapped shared"), or if the folio is only mapped into a single MM + * ("mapped exclusively"). * - * Return: The estimated number of processes sharing a folio. + * As precise information is not easily available for all folios, this function + * estimates the number of MMs ("sharers") that are currently mapping a folio + * using the number of times the first page of the folio is currently mapped + * into page tables. + * + * For small anonymous folios (except KSM folios) and anonymous hugetlb folios, + * the return value will be exactly correct, because they can only be mapped + * at most once into an MM, and they cannot be partially mapped. + * + * For other folios, the result can be fuzzy: + * #. For partially-mappable large folios (THP), the return value can wrongly + * indicate "mapped exclusively" (false negative) when the folio is + * only partially mapped into at least one MM. + * #. For pagecache folios (including hugetlb), the return value can wrongly + * indicate "mapped shared" (false positive) when two VMAs in the same MM + * cover the same file range. + * #. For (small) KSM folios, the return value can wrongly indicate "mapped + * shared" (false negative), when the folio is mapped multiple times into + * the same MM. + * + * Further, this function only considers current page table mappings that + * are tracked using the folio mapcount(s). + * + * This function does not consider: + * #. If the folio might get mapped in the (near) future (e.g., swapcache, + * pagecache, temporary unmapping for migration). + * #. If the folio is mapped differently (VM_PFNMAP). + * #. If hugetlb page table sharing applies. Callers might want to check + * hugetlb_pmd_shared(). + * + * Return: Whether the folio is estimated to be mapped into more than one MM. */ -static inline int folio_estimated_sharers(struct folio *folio) +static inline bool folio_likely_mapped_shared(struct folio *folio) { - return page_mapcount(folio_page(folio, 0)); + return page_mapcount(folio_page(folio, 0)) > 1; } #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 16b2c5622fb1..1170fc22ed89 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1821,7 +1821,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. */ - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) goto out; if (!folio_trylock(folio)) diff --git a/mm/madvise.c b/mm/madvise.c index a2dd70c4a2e6..7625830d6ae9 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -366,7 +366,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, folio = pfn_folio(pmd_pfn(orig_pmd)); /* Do not interfere with other mappings of this folio */ - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) goto huge_unlock; if (pageout_anon_only_filter && !folio_test_anon(folio)) @@ -453,7 +453,7 @@ restart: if (folio_test_large(folio)) { int err; - if (folio_estimated_sharers(folio) > 1) + if (folio_likely_mapped_shared(folio)) break; if (pageout_anon_only_filter && !folio_test_anon(folio)) break; @@ -677,7 +677,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (folio_test_large(folio)) { int err; - if (folio_estimated_sharers(folio) != 1) + if (folio_likely_mapped_shared(folio)) break; if (!folio_trylock(folio)) break; diff --git a/mm/memory.c b/mm/memory.c index 805cebb6fd72..bcd29104f5ea 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5112,7 +5112,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * Flag if the folio is shared between multiple address spaces. This * is later used when determining whether to group tasks together */ - if (folio_estimated_sharers(folio) > 1 && (vma->vm_flags & VM_SHARED)) + if (folio_likely_mapped_shared(folio) && (vma->vm_flags & VM_SHARED)) flags |= TNF_SHARED; nid = folio_nid(folio); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 72626b2cefa9..913cff5da5a3 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -642,12 +642,11 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * To check if the folio is shared, ideally we want to make sure - * every page is mapped to the same process. Doing that is very - * expensive, so check the estimated sharers of the folio instead. + * See folio_likely_mapped_shared() on possible imprecision when we + * cannot easily detect if a folio is shared. */ if ((flags & MPOL_MF_MOVE_ALL) || - (folio_estimated_sharers(folio) == 1 && !hugetlb_pmd_shared(pte))) + (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) if (!isolate_hugetlb(folio, qp->pagelist)) qp->nr_failed++; unlock: @@ -1032,11 +1031,10 @@ static bool migrate_folio_add(struct folio *folio, struct list_head *foliolist, * Unless MPOL_MF_MOVE_ALL, we try to avoid migrating a shared folio. * Choosing not to migrate a shared folio is not counted as a failure. * - * To check if the folio is shared, ideally we want to make sure - * every page is mapped to the same process. Doing that is very - * expensive, so check the estimated sharers of the folio instead. + * See folio_likely_mapped_shared() on possible imprecision when we + * cannot easily detect if a folio is shared. */ - if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) { + if ((flags & MPOL_MF_MOVE_ALL) || !folio_likely_mapped_shared(folio)) { if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, foliolist); node_stat_mod_folio(folio, diff --git a/mm/migrate.c b/mm/migrate.c index 63c97bb639d7..a31aa75d223d 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2593,11 +2593,11 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, /* * Don't migrate file folios that are mapped in multiple processes * with execute permissions as they are probably shared libraries. - * To check if the folio is shared, ideally we want to make sure - * every page is mapped to the same process. Doing that is very - * expensive, so check the estimated mapcount of the folio instead. + * + * See folio_likely_mapped_shared() on possible imprecision when we + * cannot easily detect if a folio is shared. */ - if (folio_estimated_sharers(folio) != 1 && folio_is_file_lru(folio) && + if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) && (vma->vm_flags & VM_EXEC)) goto out; -- cgit From a4864671ca0bf51c8e78242951741df52c06766f Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 16 Apr 2024 01:18:55 +0800 Subject: lib/xarray: introduce a new helper xas_get_order It can be used after xas_load to check the order of loaded entries. Compared to xa_get_order, it saves an XA_STATE and avoid a rewalk. Added new test for xas_get_order, to make the test work, we have to export xas_get_order with EXPORT_SYMBOL_GPL. Also fix a sparse warning by checking the slot value with xa_entry instead of accessing it directly, as suggested by Matthew Wilcox. [kasong@tencent.com: simplify comment, sparse warning fix, per Matthew Wilcox] Link: https://lkml.kernel.org/r/20240416071722.45997-4-ryncsn@gmail.com Link: https://lkml.kernel.org/r/20240415171857.19244-4-ryncsn@gmail.com Signed-off-by: Kairui Song Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/xarray.h | 6 ++++++ lib/test_xarray.c | 34 ++++++++++++++++++++++++++++++++++ lib/xarray.c | 49 +++++++++++++++++++++++++++++++------------------ 3 files changed, 71 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/xarray.h b/include/linux/xarray.h index cb571dfcf4b1..d9d479334c9e 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h @@ -1548,6 +1548,7 @@ void xas_create_range(struct xa_state *); #ifdef CONFIG_XARRAY_MULTI int xa_get_order(struct xarray *, unsigned long index); +int xas_get_order(struct xa_state *xas); void xas_split(struct xa_state *, void *entry, unsigned int order); void xas_split_alloc(struct xa_state *, void *entry, unsigned int order, gfp_t); #else @@ -1556,6 +1557,11 @@ static inline int xa_get_order(struct xarray *xa, unsigned long index) return 0; } +static inline int xas_get_order(struct xa_state *xas) +{ + return 0; +} + static inline void xas_split(struct xa_state *xas, void *entry, unsigned int order) { diff --git a/lib/test_xarray.c b/lib/test_xarray.c index ebe2af2e072d..0efde8f93490 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1984,6 +1984,39 @@ static noinline void check_get_order(struct xarray *xa) } } +static noinline void check_xas_get_order(struct xarray *xa) +{ + XA_STATE(xas, xa, 0); + + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; + unsigned int order; + unsigned long i, j; + + for (order = 0; order < max_order; order++) { + for (i = 0; i < 10; i++) { + xas_set_order(&xas, i << order, order); + do { + xas_lock(&xas); + xas_store(&xas, xa_mk_value(i)); + xas_unlock(&xas); + } while (xas_nomem(&xas, GFP_KERNEL)); + + for (j = i << order; j < (i + 1) << order; j++) { + xas_set_order(&xas, j, 0); + rcu_read_lock(); + xas_load(&xas); + XA_BUG_ON(xa, xas_get_order(&xas) != order); + rcu_read_unlock(); + } + + xas_lock(&xas); + xas_set_order(&xas, i << order, order); + xas_store(&xas, NULL); + xas_unlock(&xas); + } + } +} + static noinline void check_destroy(struct xarray *xa) { unsigned long index; @@ -2035,6 +2068,7 @@ static int xarray_checks(void) check_multi_store(&array); check_multi_store_advanced(&array); check_get_order(&array); + check_xas_get_order(&array); check_xa_alloc(); check_find(&array); check_find_entry(&array); diff --git a/lib/xarray.c b/lib/xarray.c index 39f07bfc4dcc..da79128ad754 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1750,39 +1750,52 @@ unlock: EXPORT_SYMBOL(xa_store_range); /** - * xa_get_order() - Get the order of an entry. - * @xa: XArray. - * @index: Index of the entry. + * xas_get_order() - Get the order of an entry. + * @xas: XArray operation state. + * + * Called after xas_load, the xas should not be in an error state. * * Return: A number between 0 and 63 indicating the order of the entry. */ -int xa_get_order(struct xarray *xa, unsigned long index) +int xas_get_order(struct xa_state *xas) { - XA_STATE(xas, xa, index); - void *entry; int order = 0; - rcu_read_lock(); - entry = xas_load(&xas); - - if (!entry) - goto unlock; - - if (!xas.xa_node) - goto unlock; + if (!xas->xa_node) + return 0; for (;;) { - unsigned int slot = xas.xa_offset + (1 << order); + unsigned int slot = xas->xa_offset + (1 << order); if (slot >= XA_CHUNK_SIZE) break; - if (!xa_is_sibling(xas.xa_node->slots[slot])) + if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot))) break; order++; } - order += xas.xa_node->shift; -unlock: + order += xas->xa_node->shift; + return order; +} +EXPORT_SYMBOL_GPL(xas_get_order); + +/** + * xa_get_order() - Get the order of an entry. + * @xa: XArray. + * @index: Index of the entry. + * + * Return: A number between 0 and 63 indicating the order of the entry. + */ +int xa_get_order(struct xarray *xa, unsigned long index) +{ + XA_STATE(xas, xa, index); + int order = 0; + void *entry; + + rcu_read_lock(); + entry = xas_load(&xas); + if (entry) + order = xas_get_order(&xas); rcu_read_unlock(); return order; -- cgit From 6600a6b10c3210280e7623b8ba7feac9619cc2cd Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 25 Mar 2024 22:56:42 +0800 Subject: mm/mm_init.c: remove the useless dma_reserve Now nobody calls set_dma_reserve() to set value for dma_reserve, remove set_dma_reserve(), global variable dma_reserve and the codes using it. Link: https://lkml.kernel.org/r/20240325145646.1044760-3-bhe@redhat.com Signed-off-by: Baoquan He Reviewed-by: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 - mm/mm_init.c | 23 ----------------------- 2 files changed, 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index fe66b515aaab..22ae6ab621da 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3250,7 +3250,6 @@ static inline int early_pfn_to_nid(unsigned long pfn) extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif -extern void set_dma_reserve(unsigned long new_dma_reserve); extern void mem_init(void); extern void __init mmap_init(void); diff --git a/mm/mm_init.c b/mm/mm_init.c index f45c2b32ba82..91bb8600c0fa 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -227,7 +227,6 @@ static unsigned long required_movablecore_percent __initdata; static unsigned long nr_kernel_pages __initdata; static unsigned long nr_all_pages __initdata; -static unsigned long dma_reserve __initdata; static bool deferred_struct_pages __meminitdata; @@ -1584,12 +1583,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat) zone_names[j], memmap_pages, freesize); } - /* Account for reserved pages */ - if (j == 0 && freesize > dma_reserve) { - freesize -= dma_reserve; - pr_debug(" %s zone: %lu pages reserved\n", zone_names[0], dma_reserve); - } - if (!is_highmem_idx(j)) nr_kernel_pages += freesize; /* Charge for highmem memmap if there are enough kernel pages */ @@ -2548,22 +2541,6 @@ void *__init alloc_large_system_hash(const char *tablename, return table; } -/** - * set_dma_reserve - set the specified number of pages reserved in the first zone - * @new_dma_reserve: The number of pages to mark reserved - * - * The per-cpu batchsize and zone watermarks are determined by managed_pages. - * In the DMA zone, a significant percentage may be consumed by kernel image - * and other unfreeable allocations which can skew the watermarks badly. This - * function may optionally be used to account for unfreeable pages in the - * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and - * smaller per-cpu batchsize. - */ -void __init set_dma_reserve(unsigned long new_dma_reserve) -{ - dma_reserve = new_dma_reserve; -} - void __init memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) { -- cgit From 0b52663f7547520fbf54e2c07f61f8bd1b5cb6eb Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Mon, 25 Mar 2024 22:56:46 +0800 Subject: mm/mm_init.c: remove arch_reserved_kernel_pages() Since the current calculation of calc_nr_kernel_pages() has taken into consideration of kernel reserved memory, no need to have arch_reserved_kernel_pages() any more. Link: https://lkml.kernel.org/r/20240325145646.1044760-7-bhe@redhat.com Signed-off-by: Baoquan He Reviewed-by: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- arch/powerpc/include/asm/mmu.h | 4 ---- arch/powerpc/kernel/fadump.c | 5 ----- include/linux/mm.h | 3 --- mm/mm_init.c | 12 ------------ 4 files changed, 24 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 3b72c7ed24cf..aa5c0fd5edb1 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -406,9 +406,5 @@ extern void *abatron_pteptrs[2]; #include #endif -#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP) -#define __HAVE_ARCH_RESERVED_KERNEL_PAGES -#endif - #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MMU_H_ */ diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c index d14eda1e8589..ae8c7619e597 100644 --- a/arch/powerpc/kernel/fadump.c +++ b/arch/powerpc/kernel/fadump.c @@ -1735,8 +1735,3 @@ static void __init fadump_reserve_crash_area(u64 base) memblock_reserve(mstart, msize); } } - -unsigned long __init arch_reserved_kernel_pages(void) -{ - return memblock_reserved_size() / PAGE_SIZE; -} diff --git a/include/linux/mm.h b/include/linux/mm.h index 22ae6ab621da..dd47ba7a1c04 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3261,9 +3261,6 @@ static inline void show_mem(void) extern long si_mem_available(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); -#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES -extern unsigned long arch_reserved_kernel_pages(void); -#endif extern __printf(3, 4) void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); diff --git a/mm/mm_init.c b/mm/mm_init.c index 8c261572ca6e..0d32bcc301e2 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2374,17 +2374,6 @@ void __init page_alloc_init_late(void) page_alloc_sysctl_init(); } -#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES -/* - * Returns the number of pages that arch has reserved but - * is not known to alloc_large_system_hash(). - */ -static unsigned long __init arch_reserved_kernel_pages(void) -{ - return 0; -} -#endif - /* * Adaptive scale is meant to reduce sizes of hash tables on large memory * machines. As memory size is increased the scale is also increased but at @@ -2427,7 +2416,6 @@ void *__init alloc_large_system_hash(const char *tablename, if (!numentries) { /* round applicable memory size up to nearest megabyte */ numentries = nr_kernel_pages; - numentries -= arch_reserved_kernel_pages(); /* It isn't necessary when PAGE_SIZE >= 1MB */ if (PAGE_SIZE < SZ_1M) -- cgit From cb10c28ac82c9b7a5e9b3b1dc7157036c20c36dd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 25 Mar 2024 07:45:41 +0800 Subject: mm: remove follow_pfn Remove follow_pfn now that the last user is gone. Link: https://lkml.kernel.org/r/20240324234542.2038726-3-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: David Hildenbrand Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fei Li Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Nathan Chancellor Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 -- mm/memory.c | 36 ++---------------------------------- mm/nommu.c | 21 --------------------- 3 files changed, 2 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index dd47ba7a1c04..cb8e6158b577 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2424,8 +2424,6 @@ int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp); -int follow_pfn(struct vm_area_struct *vma, unsigned long address, - unsigned long *pfn); int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, diff --git a/mm/memory.c b/mm/memory.c index bcd29104f5ea..652370703e41 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5884,8 +5884,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) * Only IO mappings and raw PFN mappings are allowed. The mmap semaphore * should be taken for read. * - * KVM uses this function. While it is arguably less bad than ``follow_pfn``, - * it is not a good general-purpose API. + * KVM uses this function. While it is arguably less bad than the historic + * ``follow_pfn``, it is not a good general-purpose API. * * Return: zero on success, -ve otherwise. */ @@ -5927,38 +5927,6 @@ out: } EXPORT_SYMBOL_GPL(follow_pte); -/** - * follow_pfn - look up PFN at a user virtual address - * @vma: memory mapping - * @address: user virtual address - * @pfn: location to store found PFN - * - * Only IO mappings and raw PFN mappings are allowed. - * - * This function does not allow the caller to read the permissions - * of the PTE. Do not use it. - * - * Return: zero and the pfn at @pfn on success, -ve otherwise. - */ -int follow_pfn(struct vm_area_struct *vma, unsigned long address, - unsigned long *pfn) -{ - int ret = -EINVAL; - spinlock_t *ptl; - pte_t *ptep; - - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) - return ret; - - ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); - if (ret) - return ret; - *pfn = pte_pfn(ptep_get(ptep)); - pte_unmap_unlock(ptep, ptl); - return 0; -} -EXPORT_SYMBOL(follow_pfn); - #ifdef CONFIG_HAVE_IOREMAP_PROT int follow_phys(struct vm_area_struct *vma, unsigned long address, unsigned int flags, diff --git a/mm/nommu.c b/mm/nommu.c index 88b646ea6d30..331d2f778695 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -110,27 +110,6 @@ unsigned int kobjsize(const void *objp) return page_size(page); } -/** - * follow_pfn - look up PFN at a user virtual address - * @vma: memory mapping - * @address: user virtual address - * @pfn: location to store found PFN - * - * Only IO mappings and raw PFN mappings are allowed. - * - * Returns zero and the pfn at @pfn on success, -ve otherwise. - */ -int follow_pfn(struct vm_area_struct *vma, unsigned long address, - unsigned long *pfn) -{ - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) - return -EINVAL; - - *pfn = address >> PAGE_SHIFT; - return 0; -} -EXPORT_SYMBOL(follow_pfn); - void vfree(const void *addr) { kfree(addr); -- cgit From 5b34b76cb0cd8a21dee5c7677eae98480b0d05cc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 25 Mar 2024 07:45:42 +0800 Subject: mm: move follow_phys to arch/x86/mm/pat/memtype.c follow_phys is only used by two callers in arch/x86/mm/pat/memtype.c. Move it there and hardcode the two arguments that get the same values passed by both callers. [david@redhat.com: conflict resolutions] Link: https://lkml.kernel.org/r/20240403212131.929421-4-david@redhat.com Link: https://lkml.kernel.org/r/20240324234542.2038726-4-hch@lst.de Signed-off-by: Christoph Hellwig Signed-off-by: David Hildenbrand Reviewed-by: David Hildenbrand Cc: Andy Lutomirski Cc: Dave Hansen Cc: Fei Li Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Nathan Chancellor Signed-off-by: Andrew Morton --- arch/x86/mm/pat/memtype.c | 29 ++++++++++++++++++++++++++++- include/linux/mm.h | 2 -- mm/memory.c | 32 -------------------------------- 3 files changed, 28 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/mm/pat/memtype.c b/arch/x86/mm/pat/memtype.c index 36b603d0cdde..d01c3b0bd6eb 100644 --- a/arch/x86/mm/pat/memtype.c +++ b/arch/x86/mm/pat/memtype.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include @@ -947,6 +948,32 @@ static void free_pfn_range(u64 paddr, unsigned long size) memtype_free(paddr, paddr + size); } +static int follow_phys(struct vm_area_struct *vma, unsigned long *prot, + resource_size_t *phys) +{ + pte_t *ptep, pte; + spinlock_t *ptl; + + if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) + return -EINVAL; + + if (follow_pte(vma->vm_mm, vma->vm_start, &ptep, &ptl)) + return -EINVAL; + + pte = ptep_get(ptep); + + /* Never return PFNs of anon folios in COW mappings. */ + if (vm_normal_folio(vma, vma->vm_start, pte)) { + pte_unmap_unlock(ptep, ptl); + return -EINVAL; + } + + *prot = pgprot_val(pte_pgprot(pte)); + *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; + pte_unmap_unlock(ptep, ptl); + return 0; +} + static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, pgprot_t *pgprot) { @@ -964,7 +991,7 @@ static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr, * detect the PFN. If we need the cachemode as well, we're out of luck * for now and have to fail fork(). */ - if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) { + if (!follow_phys(vma, &prot, paddr)) { if (pgprot) *pgprot = __pgprot(prot); return 0; diff --git a/include/linux/mm.h b/include/linux/mm.h index cb8e6158b577..5dc65618e386 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2424,8 +2424,6 @@ int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp, spinlock_t **ptlp); -int follow_phys(struct vm_area_struct *vma, unsigned long address, - unsigned int flags, unsigned long *prot, resource_size_t *phys); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); diff --git a/mm/memory.c b/mm/memory.c index 652370703e41..62ee4a15092a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5928,38 +5928,6 @@ out: EXPORT_SYMBOL_GPL(follow_pte); #ifdef CONFIG_HAVE_IOREMAP_PROT -int follow_phys(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - unsigned long *prot, resource_size_t *phys) -{ - int ret = -EINVAL; - pte_t *ptep, pte; - spinlock_t *ptl; - - if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) - goto out; - - if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) - goto out; - pte = ptep_get(ptep); - - /* Never return PFNs of anon folios in COW mappings. */ - if (vm_normal_folio(vma, address, pte)) - goto unlock; - - if ((flags & FOLL_WRITE) && !pte_write(pte)) - goto unlock; - - *prot = pgprot_val(pte_pgprot(pte)); - *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT; - - ret = 0; -unlock: - pte_unmap_unlock(ptep, ptl); -out: - return ret; -} - /** * generic_access_phys - generic implementation for iomem mmap access * @vma: the vma to access -- cgit From f002882ca369aba3eece5006f3346ccf75ede7c5 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 26 Mar 2024 15:32:10 +0100 Subject: mm: merge folio_is_secretmem() and folio_fast_pin_allowed() into gup_fast_folio_allowed() folio_is_secretmem() is currently only used during GUP-fast. Nowadays, folio_fast_pin_allowed() performs similar checks during GUP-fast and contains a lot of careful handling -- READ_ONCE() -- , sanity checks -- lockdep_assert_irqs_disabled() -- and helpful comments on how this handling is safe and correct. So let's merge folio_is_secretmem() into folio_fast_pin_allowed(). Rename folio_fast_pin_allowed() to gup_fast_folio_allowed(), to better match the new semantics. Link: https://lkml.kernel.org/r/20240326143210.291116-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: Lorenzo Stoakes Cc: Miklos Szeredi Cc: xingwei lee Cc: yue sun Signed-off-by: Andrew Morton --- include/linux/secretmem.h | 21 ++------------------- mm/gup.c | 48 +++++++++++++++++++++++++++-------------------- 2 files changed, 30 insertions(+), 39 deletions(-) (limited to 'include/linux') diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h index acf7e1a3f3de..e918f96881f5 100644 --- a/include/linux/secretmem.h +++ b/include/linux/secretmem.h @@ -6,25 +6,8 @@ extern const struct address_space_operations secretmem_aops; -static inline bool folio_is_secretmem(struct folio *folio) +static inline bool secretmem_mapping(struct address_space *mapping) { - struct address_space *mapping; - - /* - * Using folio_mapping() is quite slow because of the actual call - * instruction. - * We know that secretmem pages are not compound, so we can - * save a couple of cycles here. - */ - if (folio_test_large(folio)) - return false; - - mapping = (struct address_space *) - ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS); - - if (!mapping || mapping != folio->mapping) - return false; - return mapping->a_ops == &secretmem_aops; } @@ -38,7 +21,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma) return false; } -static inline bool folio_is_secretmem(struct folio *folio) +static inline bool secretmem_mapping(struct address_space *mapping) { return false; } diff --git a/mm/gup.c b/mm/gup.c index 6d8d15f8c7f9..7b21fcda3a98 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2468,12 +2468,14 @@ EXPORT_SYMBOL(get_user_pages_unlocked); #ifdef CONFIG_HAVE_FAST_GUP /* - * Used in the GUP-fast path to determine whether a pin is permitted for a - * specific folio. + * Used in the GUP-fast path to determine whether GUP is permitted to work on + * a specific folio. * * This call assumes the caller has pinned the folio, that the lowest page table * level still points to this folio, and that interrupts have been disabled. * + * GUP-fast must reject all secretmem folios. + * * Writing to pinned file-backed dirty tracked folios is inherently problematic * (see comment describing the writable_file_mapping_allowed() function). We * therefore try to avoid the most egregious case of a long-term mapping doing @@ -2483,25 +2485,34 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * in the fast path, so instead we whitelist known good cases and if in doubt, * fall back to the slow path. */ -static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) +static bool gup_fast_folio_allowed(struct folio *folio, unsigned int flags) { + bool reject_file_backed = false; struct address_space *mapping; + bool check_secretmem = false; unsigned long mapping_flags; /* * If we aren't pinning then no problematic write can occur. A long term * pin is the most egregious case so this is the one we disallow. */ - if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != + if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) == (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) - return true; + reject_file_backed = true; + + /* We hold a folio reference, so we can safely access folio fields. */ - /* The folio is pinned, so we can safely access folio fields. */ + /* secretmem folios are always order-0 folios. */ + if (IS_ENABLED(CONFIG_SECRETMEM) && !folio_test_large(folio)) + check_secretmem = true; + + if (!reject_file_backed && !check_secretmem) + return true; if (WARN_ON_ONCE(folio_test_slab(folio))) return false; - /* hugetlb mappings do not require dirty-tracking. */ + /* hugetlb neither requires dirty-tracking nor can be secretmem. */ if (folio_test_hugetlb(folio)) return true; @@ -2537,10 +2548,12 @@ static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) /* * At this point, we know the mapping is non-null and points to an - * address_space object. The only remaining whitelisted file system is - * shmem. + * address_space object. */ - return shmem_mapping(mapping); + if (check_secretmem && secretmem_mapping(mapping)) + return false; + /* The only remaining allowed file system is shmem. */ + return !reject_file_backed || shmem_mapping(mapping); } static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, @@ -2626,18 +2639,13 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, if (!folio) goto pte_unmap; - if (unlikely(folio_is_secretmem(folio))) { - gup_put_folio(folio, 1, flags); - goto pte_unmap; - } - if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, 1, flags); goto pte_unmap; } @@ -2834,7 +2842,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2905,7 +2913,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2949,7 +2957,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } @@ -2994,7 +3002,7 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 0; } - if (!folio_fast_pin_allowed(folio, flags)) { + if (!gup_fast_folio_allowed(folio, flags)) { gup_put_folio(folio, refs, flags); return 0; } -- cgit From 6e65aa55cdf4c7cc0490b156b4aae2035c157140 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 17:10:25 +0000 Subject: mm: make page_ext_get() take a const argument In order to constify other functions, we need page_ext_get() to be const. This is no problem as lookup_page_ext() already takes a const argument. Link: https://lkml.kernel.org/r/20240326171045.410737-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/page_ext.h | 4 ++-- include/linux/pgalloc_tag.h | 2 -- mm/page_ext.c | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index 07e0656898f9..e4b48a0dda24 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -77,7 +77,7 @@ static inline void page_ext_init(void) } #endif -extern struct page_ext *page_ext_get(struct page *page); +extern struct page_ext *page_ext_get(const struct page *page); extern void page_ext_put(struct page_ext *page_ext); static inline void *page_ext_data(struct page_ext *page_ext, @@ -117,7 +117,7 @@ static inline void page_ext_init_flatmem(void) { } -static inline struct page_ext *page_ext_get(struct page *page) +static inline struct page_ext *page_ext_get(const struct page *page) { return NULL; } diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 62d8dad74b37..86ba5d33e43b 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -12,8 +12,6 @@ #include extern struct page_ext_operations page_alloc_tagging_ops; -extern struct page_ext *page_ext_get(struct page *page); -extern void page_ext_put(struct page_ext *page_ext); static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) { diff --git a/mm/page_ext.c b/mm/page_ext.c index e7d8f1a5589e..95dd8ffeaf81 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -514,7 +514,7 @@ void __meminit pgdat_page_ext_init(struct pglist_data *pgdat) * Context: Any context. Caller may not sleep until they have called * page_ext_put(). */ -struct page_ext *page_ext_get(struct page *page) +struct page_ext *page_ext_get(const struct page *page) { struct page_ext *page_ext; -- cgit From e3089fd0b0199a368db4039122b241139cc665f3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 17:10:26 +0000 Subject: mm: make folio_test_idle and folio_test_young take a const argument If these functions are defined in page-flags.h, they already take a const argument; make it true for these alternate definitions too. Link: https://lkml.kernel.org/r/20240326171045.410737-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/page_idle.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index d8f344840643..511e22ef459f 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -13,7 +13,7 @@ * If there is not enough space to store Idle and Young bits in page flags, use * page ext flags instead. */ -static inline bool folio_test_young(struct folio *folio) +static inline bool folio_test_young(const struct folio *folio) { struct page_ext *page_ext = page_ext_get(&folio->page); bool page_young; @@ -52,7 +52,7 @@ static inline bool folio_test_clear_young(struct folio *folio) return page_young; } -static inline bool folio_test_idle(struct folio *folio) +static inline bool folio_test_idle(const struct folio *folio) { struct page_ext *page_ext = page_ext_get(&folio->page); bool page_idle; @@ -60,7 +60,7 @@ static inline bool folio_test_idle(struct folio *folio) if (unlikely(!page_ext)) return false; - page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags); + page_idle = test_bit(PAGE_EXT_IDLE, &page_ext->flags); page_ext_put(page_ext); return page_idle; @@ -91,7 +91,7 @@ static inline void folio_clear_idle(struct folio *folio) #else /* !CONFIG_PAGE_IDLE_FLAG */ -static inline bool folio_test_young(struct folio *folio) +static inline bool folio_test_young(const struct folio *folio) { return false; } @@ -105,7 +105,7 @@ static inline bool folio_test_clear_young(struct folio *folio) return false; } -static inline bool folio_test_idle(struct folio *folio) +static inline bool folio_test_idle(const struct folio *folio) { return false; } -- cgit From 2ace5a670e2a0f3a6029bc6cf91dc14202074e7a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 17:10:27 +0000 Subject: mm: make is_free_buddy_page() take a const argument This function does not modify its argument; let the callers know that so they can make better optimisation decisions. Link: https://lkml.kernel.org/r/20240326171045.410737-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 2 +- mm/page_alloc.c | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 0163d6da6283..eaecf544039f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1088,7 +1088,7 @@ static inline bool is_page_hwpoison(const struct page *page) return folio_test_hugetlb(folio) && PageHWPoison(&folio->page); } -extern bool is_free_buddy_page(struct page *page); +bool is_free_buddy_page(const struct page *page); PAGEFLAG(Isolated, isolated, PF_ANY); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index daab8cab91cc..6a8593aef297 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6745,16 +6745,16 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) /* * This function returns a stable result only if called under zone lock. */ -bool is_free_buddy_page(struct page *page) +bool is_free_buddy_page(const struct page *page) { unsigned long pfn = page_to_pfn(page); unsigned int order; for (order = 0; order < NR_PAGE_ORDERS; order++) { - struct page *page_head = page - (pfn & ((1 << order) - 1)); + const struct page *head = page - (pfn & ((1 << order) - 1)); - if (PageBuddy(page_head) && - buddy_order_unsafe(page_head) >= order) + if (PageBuddy(head) && + buddy_order_unsafe(head) >= order) break; } -- cgit From b84fd2835c70e0149e2522fd746d3fb7049a1e19 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 17:10:28 +0000 Subject: mm: make page_mapped() take a const argument None of the functions called by page_mapped() modify the page or folio, so mark them all as const. Link: https://lkml.kernel.org/r/20240326171045.410737-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/mm.h | 12 ++++++------ mm/internal.h | 7 ++++--- mm/rmap.c | 2 +- 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 5dc65618e386..9c0e441664d4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1200,7 +1200,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) * debugging purposes - it does not include PTE-mapped sub-pages; look * at folio_mapcount() or page_mapcount() instead. */ -static inline int folio_entire_mapcount(struct folio *folio) +static inline int folio_entire_mapcount(const struct folio *folio) { VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); return atomic_read(&folio->_entire_mapcount) + 1; @@ -1240,7 +1240,7 @@ static inline int page_mapcount(struct page *page) return mapcount; } -int folio_total_mapcount(struct folio *folio); +int folio_total_mapcount(const struct folio *folio); /** * folio_mapcount() - Calculate the number of mappings of this folio. @@ -1253,14 +1253,14 @@ int folio_total_mapcount(struct folio *folio); * * Return: The number of times this folio is mapped. */ -static inline int folio_mapcount(struct folio *folio) +static inline int folio_mapcount(const struct folio *folio) { if (likely(!folio_test_large(folio))) return atomic_read(&folio->_mapcount) + 1; return folio_total_mapcount(folio); } -static inline bool folio_large_is_mapped(struct folio *folio) +static inline bool folio_large_is_mapped(const struct folio *folio) { /* * Reading _entire_mapcount below could be omitted if hugetlb @@ -1288,7 +1288,7 @@ static inline bool folio_mapped(struct folio *folio) * For compound page it returns true if any sub-page of compound page is mapped, * even if this particular sub-page is not itself mapped by any PTE or PMD. */ -static inline bool page_mapped(struct page *page) +static inline bool page_mapped(const struct page *page) { if (likely(!PageCompound(page))) return atomic_read(&page->_mapcount) >= 0; @@ -2070,7 +2070,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, * * Return: A positive power of two. */ -static inline long folio_nr_pages(struct folio *folio) +static inline long folio_nr_pages(const struct folio *folio) { if (!folio_test_large(folio)) return 1; diff --git a/mm/internal.h b/mm/internal.h index 5dbfa1c12e89..8e11f7b2da21 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -71,7 +71,7 @@ void page_writeback_init(void); * How many individual pages have an elevated _mapcount. Excludes * the folio's entire_mapcount. */ -static inline int folio_nr_pages_mapped(struct folio *folio) +static inline int folio_nr_pages_mapped(const struct folio *folio) { return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; } @@ -81,7 +81,8 @@ static inline int folio_nr_pages_mapped(struct folio *folio) * folio. We cannot rely on folio->swap as there is no guarantee that it has * been initialized. Used for calling arch_swap_restore() */ -static inline swp_entry_t folio_swap(swp_entry_t entry, struct folio *folio) +static inline swp_entry_t folio_swap(swp_entry_t entry, + const struct folio *folio) { swp_entry_t swap = { .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), @@ -90,7 +91,7 @@ static inline swp_entry_t folio_swap(swp_entry_t entry, struct folio *folio) return swap; } -static inline void *folio_raw_mapping(struct folio *folio) +static inline void *folio_raw_mapping(const struct folio *folio) { unsigned long mapping = (unsigned long)folio->mapping; diff --git a/mm/rmap.c b/mm/rmap.c index 3746a5531018..d52759aa3ff7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1134,7 +1134,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, return page_vma_mkclean_one(&pvmw); } -int folio_total_mapcount(struct folio *folio) +int folio_total_mapcount(const struct folio *folio) { int mapcount = folio_entire_mapcount(folio); int nr_pages; -- cgit From 51718e25c53f58fe9fa515f219367632cce4914d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 17:10:29 +0000 Subject: mm: convert arch_clear_hugepage_flags to take a folio All implementations that aren't no-ops just set a bit in the flags, and we want to use the folio flags rather than the page flags for that. Rename it to arch_clear_hugetlb_flags() while we're touching it so nobody thinks it's used for THP. [willy@infradead.org: fix arm64 build] Link: https://lkml.kernel.org/r/ZgQvNKGdlDkwhQEX@casper.infradead.org Link: https://lkml.kernel.org/r/20240326171045.410737-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- arch/arm/include/asm/hugetlb.h | 6 +++--- arch/arm64/include/asm/hugetlb.h | 6 +++--- arch/riscv/include/asm/hugetlb.h | 6 +++--- arch/s390/include/asm/hugetlb.h | 6 +++--- arch/sh/include/asm/hugetlb.h | 6 +++--- include/linux/hugetlb.h | 6 +++--- mm/hugetlb.c | 4 ++-- 7 files changed, 20 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/include/asm/hugetlb.h b/arch/arm/include/asm/hugetlb.h index a3a82b7158d4..b766c4b373f6 100644 --- a/arch/arm/include/asm/hugetlb.h +++ b/arch/arm/include/asm/hugetlb.h @@ -15,10 +15,10 @@ #include #include -static inline void arch_clear_hugepage_flags(struct page *page) +static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags); } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags #endif /* _ASM_ARM_HUGETLB_H */ diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h index 2ddc33d93b13..3954cbd2ff56 100644 --- a/arch/arm64/include/asm/hugetlb.h +++ b/arch/arm64/include/asm/hugetlb.h @@ -18,11 +18,11 @@ extern bool arch_hugetlb_migration_supported(struct hstate *h); #endif -static inline void arch_clear_hugepage_flags(struct page *page) +static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags); } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags); #define arch_make_huge_pte arch_make_huge_pte diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index 22deb7a2a6ec..b1ce97a9dbfc 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -5,11 +5,11 @@ #include #include -static inline void arch_clear_hugepage_flags(struct page *page) +static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags); } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION bool arch_hugetlb_migration_supported(struct hstate *h); diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index deb198a61039..ce5f4fe8be4d 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h @@ -39,11 +39,11 @@ static inline int prepare_hugepage_range(struct file *file, return 0; } -static inline void arch_clear_hugepage_flags(struct page *page) +static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_arch_1, &page->flags); + clear_bit(PG_arch_1, &folio->flags); } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep, unsigned long sz) diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h index 4d3ba39e681c..75028bd568ba 100644 --- a/arch/sh/include/asm/hugetlb.h +++ b/arch/sh/include/asm/hugetlb.h @@ -27,11 +27,11 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, return *ptep; } -static inline void arch_clear_hugepage_flags(struct page *page) +static inline void arch_clear_hugetlb_flags(struct folio *folio) { - clear_bit(PG_dcache_clean, &page->flags); + clear_bit(PG_dcache_clean, &folio->flags); } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags #include diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index d748628efc5e..03fc6d625068 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -836,9 +836,9 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, #define is_hugepage_only_range is_hugepage_only_range #endif -#ifndef arch_clear_hugepage_flags -static inline void arch_clear_hugepage_flags(struct page *page) { } -#define arch_clear_hugepage_flags arch_clear_hugepage_flags +#ifndef arch_clear_hugetlb_flags +static inline void arch_clear_hugetlb_flags(struct folio *folio) { } +#define arch_clear_hugetlb_flags arch_clear_hugetlb_flags #endif #ifndef arch_make_huge_pte diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2e984554ec63..88bb38df1701 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1726,7 +1726,7 @@ static void add_hugetlb_folio(struct hstate *h, struct folio *folio, */ return; - arch_clear_hugepage_flags(&folio->page); + arch_clear_hugetlb_flags(folio); enqueue_hugetlb_folio(h, folio); } @@ -2024,7 +2024,7 @@ void free_huge_folio(struct folio *folio) spin_unlock_irqrestore(&hugetlb_lock, flags); update_and_free_hugetlb_folio(h, folio, true); } else { - arch_clear_hugepage_flags(&folio->page); + arch_clear_hugetlb_flags(folio); enqueue_hugetlb_folio(h, folio); spin_unlock_irqrestore(&hugetlb_lock, flags); } -- cgit From dee3d0bef2b00772be430425832ead6aa9d707f9 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 17:10:32 +0000 Subject: proc: rewrite stable_page_flags() Reduce the usage of PageFlag tests and reduce the number of compound_head() calls. For multi-page folios, we'll now show all pages as having the flags that apply to them, e.g. if it's dirty, all pages will have the dirty flag set instead of just the head page. The mapped flag is still per page, as is the hwpoison flag. [willy@infradead.org: fix up some bits vs masks] Link: https://lkml.kernel.org/r/20240403173112.1450721-1-willy@infradead.org [willy@infradead.org: fix warnings] Link: https://lkml.kernel.org/r/ZhBPtCYfSuFuUMEz@casper.infradead.org Link: https://lkml.kernel.org/r/20240326171045.410737-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Svetly Todorov Signed-off-by: Andrew Morton --- fs/proc/page.c | 69 ++++++++++++++++++++++-------------------- include/linux/huge_mm.h | 4 +-- include/linux/page-flags.h | 2 +- tools/cgroup/memcg_slabinfo.py | 5 ++- 4 files changed, 42 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/page.c b/fs/proc/page.c index 9223856c934b..05120263af2a 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -107,10 +107,13 @@ static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit) return ((kflags >> kbit) & 1) << ubit; } -u64 stable_page_flags(struct page *page) +u64 stable_page_flags(const struct page *page) { - u64 k; - u64 u; + const struct folio *folio; + unsigned long k; + unsigned long mapping; + bool is_anon; + u64 u = 0; /* * pseudo flag: KPF_NOPAGE @@ -118,49 +121,47 @@ u64 stable_page_flags(struct page *page) */ if (!page) return 1 << KPF_NOPAGE; + folio = page_folio(page); - k = page->flags; - u = 0; + k = folio->flags; + mapping = (unsigned long)folio->mapping; + is_anon = mapping & PAGE_MAPPING_ANON; /* * pseudo flags for the well known (anonymous) memory mapped pages */ if (page_mapped(page)) u |= 1 << KPF_MMAP; - if (PageAnon(page)) + if (is_anon) { u |= 1 << KPF_ANON; - if (PageKsm(page)) - u |= 1 << KPF_KSM; + if (mapping & PAGE_MAPPING_KSM) + u |= 1 << KPF_KSM; + } /* * compound pages: export both head/tail info * they together define a compound page's start/end pos and order */ - if (PageHead(page)) - u |= 1 << KPF_COMPOUND_HEAD; - if (PageTail(page)) + if (page == &folio->page) + u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head); + else u |= 1 << KPF_COMPOUND_TAIL; - if (PageHuge(page)) + if (folio_test_hugetlb(folio)) u |= 1 << KPF_HUGE; /* - * PageTransCompound can be true for non-huge compound pages (slab - * pages or pages allocated by drivers with __GFP_COMP) because it - * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon + * We need to check PageLRU/PageAnon * to make sure a given page is a thp, not a non-huge compound page. */ - else if (PageTransCompound(page)) { - struct page *head = compound_head(page); - - if (PageLRU(head) || PageAnon(head)) + else if (folio_test_large(folio)) { + if ((k & (1 << PG_lru)) || is_anon) u |= 1 << KPF_THP; - else if (is_huge_zero_page(head)) { + else if (is_huge_zero_page(&folio->page)) { u |= 1 << KPF_ZERO_PAGE; u |= 1 << KPF_THP; } } else if (is_zero_pfn(page_to_pfn(page))) u |= 1 << KPF_ZERO_PAGE; - /* * Caveats on high order pages: PG_buddy and PG_slab will only be set * on the head page. @@ -174,16 +175,17 @@ u64 stable_page_flags(struct page *page) u |= 1 << KPF_OFFLINE; if (PageTable(page)) u |= 1 << KPF_PGTABLE; + if (folio_test_slab(folio)) + u |= 1 << KPF_SLAB; - if (page_is_idle(page)) +#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) + u |= kpf_copy_bit(k, KPF_IDLE, PG_idle); +#else + if (folio_test_idle(folio)) u |= 1 << KPF_IDLE; +#endif u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); - - u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); - if (PageTail(page) && PageSlab(page)) - u |= 1 << KPF_SLAB; - u |= kpf_copy_bit(k, KPF_ERROR, PG_error); u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty); u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate); @@ -194,7 +196,8 @@ u64 stable_page_flags(struct page *page) u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active); u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim); - if (PageSwapCache(page)) +#define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache)) + if ((k & SWAPCACHE) == SWAPCACHE) u |= 1 << KPF_SWAPCACHE; u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked); @@ -202,7 +205,10 @@ u64 stable_page_flags(struct page *page) u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked); #ifdef CONFIG_MEMORY_FAILURE - u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); + if (u & (1 << KPF_HUGE)) + u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison); + else + u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison); #endif #ifdef CONFIG_ARCH_USES_PG_UNCACHED @@ -228,7 +234,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, { const unsigned long max_dump_pfn = get_max_dump_pfn(); u64 __user *out = (u64 __user *)buf; - struct page *ppage; unsigned long src = *ppos; unsigned long pfn; ssize_t ret = 0; @@ -245,9 +250,9 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf, * TODO: ZONE_DEVICE support requires to identify * memmaps that were actually initialized. */ - ppage = pfn_to_online_page(pfn); + struct page *page = pfn_to_online_page(pfn); - if (put_user(stable_page_flags(ppage), out)) { + if (put_user(stable_page_flags(page), out)) { ret = -EFAULT; break; } diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7576025db55d..1540a1481daf 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -351,7 +351,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); extern struct page *huge_zero_page; extern unsigned long huge_zero_pfn; -static inline bool is_huge_zero_page(struct page *page) +static inline bool is_huge_zero_page(const struct page *page) { return READ_ONCE(huge_zero_page) == page; } @@ -480,7 +480,7 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) return 0; } -static inline bool is_huge_zero_page(struct page *page) +static inline bool is_huge_zero_page(const struct page *page) { return false; } diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index eaecf544039f..888353c209c0 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -734,7 +734,7 @@ static __always_inline bool PageKsm(const struct page *page) TESTPAGEFLAG_FALSE(Ksm, ksm) #endif -u64 stable_page_flags(struct page *page); +u64 stable_page_flags(const struct page *page); /** * folio_xor_flags_has_waiters - Change some folio flags. diff --git a/tools/cgroup/memcg_slabinfo.py b/tools/cgroup/memcg_slabinfo.py index 1d3a90d93fe2..270c28a0d098 100644 --- a/tools/cgroup/memcg_slabinfo.py +++ b/tools/cgroup/memcg_slabinfo.py @@ -146,12 +146,11 @@ def detect_kernel_config(): def for_each_slab(prog): - PGSlab = 1 << prog.constant('PG_slab') - PGHead = 1 << prog.constant('PG_head') + PGSlab = ~prog.constant('PG_slab') for page in for_each_page(prog): try: - if page.flags.value_() & PGSlab: + if page.page_type.value_() == PGSlab: yield cast('struct slab *', page) except FaultError: pass -- cgit From b6dd94596f7fafd162da236b438e31b12d352187 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Tue, 26 Mar 2024 14:11:29 +0800 Subject: mm: make __absent_pages_in_range() as static It's only called in mm/mm_init.c now. Link: https://lkml.kernel.org/r/20240326061134.1055295-4-bhe@redhat.com Signed-off-by: Baoquan He Reviewed-by: Mike Rapoport (IBM) Cc: Mel Gorman Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 -- mm/mm_init.c | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 9c0e441664d4..00a46a5d5c9d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3229,8 +3229,6 @@ static inline unsigned long get_num_physpages(void) */ void free_area_init(unsigned long *max_zone_pfn); unsigned long node_map_pfn_alignment(void); -unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn, - unsigned long end_pfn); extern unsigned long absent_pages_in_range(unsigned long start_pfn, unsigned long end_pfn); extern void get_pfn_range_for_nid(unsigned int nid, diff --git a/mm/mm_init.c b/mm/mm_init.c index 0fe466abde43..19c78b54fc8a 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1144,7 +1144,7 @@ static void __init adjust_zone_range_for_zone_movable(int nid, * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, * then all holes in the requested range will be accounted for. */ -unsigned long __init __absent_pages_in_range(int nid, +static unsigned long __init __absent_pages_in_range(int nid, unsigned long range_start_pfn, unsigned long range_end_pfn) { -- cgit From 5beaee54a324ba1fe307e341ec825d5d099f4091 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 20:28:22 +0000 Subject: mm: add is_huge_zero_folio() This is the folio equivalent of is_huge_zero_page(). It doesn't add any efficiency, but it does prevent the caller from passing a tail page and getting confused when the predicate returns false. Link: https://lkml.kernel.org/r/20240326202833.523759-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- fs/proc/page.c | 2 +- include/linux/huge_mm.h | 10 ++++++++++ mm/huge_memory.c | 6 +++--- mm/mempolicy.c | 2 +- mm/swap.c | 2 +- mm/swap_state.c | 2 +- mm/userfaultfd.c | 2 +- 7 files changed, 18 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/fs/proc/page.c b/fs/proc/page.c index 05120263af2a..2fb64bdb64eb 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -155,7 +155,7 @@ u64 stable_page_flags(const struct page *page) else if (folio_test_large(folio)) { if ((k & (1 << PG_lru)) || is_anon) u |= 1 << KPF_THP; - else if (is_huge_zero_page(&folio->page)) { + else if (is_huge_zero_folio(folio)) { u |= 1 << KPF_ZERO_PAGE; u |= 1 << KPF_THP; } diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1540a1481daf..600c6008262b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -356,6 +356,11 @@ static inline bool is_huge_zero_page(const struct page *page) return READ_ONCE(huge_zero_page) == page; } +static inline bool is_huge_zero_folio(const struct folio *folio) +{ + return READ_ONCE(huge_zero_page) == &folio->page; +} + static inline bool is_huge_zero_pmd(pmd_t pmd) { return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); @@ -485,6 +490,11 @@ static inline bool is_huge_zero_page(const struct page *page) return false; } +static inline bool is_huge_zero_folio(const struct folio *folio) +{ + return false; +} + static inline bool is_huge_zero_pmd(pmd_t pmd) { return false; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 75ad971ca45e..5c043c7b5062 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -789,12 +789,12 @@ struct deferred_split *get_deferred_split_queue(struct folio *folio) } #endif -static inline bool is_transparent_hugepage(struct folio *folio) +static inline bool is_transparent_hugepage(const struct folio *folio) { if (!folio_test_large(folio)) return false; - return is_huge_zero_page(&folio->page) || + return is_huge_zero_folio(folio) || folio_test_large_rmappable(folio); } @@ -3085,7 +3085,7 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, } - is_hzp = is_huge_zero_page(&folio->page); + is_hzp = is_huge_zero_folio(folio); if (is_hzp) { pr_warn_ratelimited("Called split_huge_page for huge zero page\n"); return -EBUSY; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 913cff5da5a3..5743028a63a5 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -510,7 +510,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk) return; } folio = pfn_folio(pmd_pfn(*pmd)); - if (is_huge_zero_page(&folio->page)) { + if (is_huge_zero_folio(folio)) { walk->action = ACTION_CONTINUE; return; } diff --git a/mm/swap.c b/mm/swap.c index 500a09a48dfd..f72364e92d5f 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -985,7 +985,7 @@ void folios_put_refs(struct folio_batch *folios, unsigned int *refs) struct folio *folio = folios->folios[i]; unsigned int nr_refs = refs ? refs[i] : 1; - if (is_huge_zero_page(&folio->page)) + if (is_huge_zero_folio(folio)) continue; if (folio_is_zone_device(folio)) { diff --git a/mm/swap_state.c b/mm/swap_state.c index bfc7e8c58a6d..2deac23633cd 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -301,7 +301,7 @@ void free_page_and_swap_cache(struct page *page) struct folio *folio = page_folio(page); free_swap_cache(folio); - if (!is_huge_zero_page(page)) + if (!is_huge_zero_folio(folio)) folio_put(folio); } diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 3c3539c573e7..a0ec14553fbe 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1664,7 +1664,7 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, !pmd_none(dst_pmdval)) { struct folio *folio = pfn_folio(pmd_pfn(*src_pmd)); - if (!folio || (!is_huge_zero_page(&folio->page) && + if (!folio || (!is_huge_zero_folio(folio) && !PageAnonExclusive(&folio->page))) { spin_unlock(ptl); err = -EBUSY; -- cgit From e06d03d5590ae1c257b8aa2cfbfe6765e0755c14 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 20:28:23 +0000 Subject: mm: add pmd_folio() Convert directly from a pmd to a folio without going through another representation first. For now this is just a slightly shorter way to write it, but it might end up being more efficient later. Link: https://lkml.kernel.org/r/20240326202833.523759-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 2 ++ mm/huge_memory.c | 6 +++--- mm/madvise.c | 2 +- mm/mempolicy.c | 2 +- mm/mlock.c | 2 +- mm/userfaultfd.c | 2 +- 6 files changed, 9 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 600e17d03659..09c85c7bf9c2 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -50,6 +50,8 @@ #define pmd_pgtable(pmd) pmd_page(pmd) #endif +#define pmd_folio(pmd) page_folio(pmd_page(pmd)) + /* * A page table page can be thought of an array like this: pXd_t[PTRS_PER_PxD] * diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5c043c7b5062..712263e3b1f6 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1816,7 +1816,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, goto out; } - folio = pfn_folio(pmd_pfn(orig_pmd)); + folio = pmd_folio(orig_pmd); /* * If other processes are mapping this folio, we couldn't discard * the folio unless they all do MADV_FREE so let's skip the folio. @@ -2086,7 +2086,7 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_protnone(*pmd)) goto unlock; - folio = page_folio(pmd_page(*pmd)); + folio = pmd_folio(*pmd); toptier = node_is_toptier(folio_nid(folio)); /* * Skip scanning top tier node if normal numa @@ -2663,7 +2663,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, * It's safe to call pmd_page when folio is set because it's * guaranteed that pmd is present. */ - if (folio && folio != page_folio(pmd_page(*pmd))) + if (folio && folio != pmd_folio(*pmd)) goto out; __split_huge_pmd_locked(vma, pmd, range.start, freeze); } diff --git a/mm/madvise.c b/mm/madvise.c index 7625830d6ae9..1f77a51baaac 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -363,7 +363,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, goto huge_unlock; } - folio = pfn_folio(pmd_pfn(orig_pmd)); + folio = pmd_folio(orig_pmd); /* Do not interfere with other mappings of this folio */ if (folio_likely_mapped_shared(folio)) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5743028a63a5..aec756ae5637 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -509,7 +509,7 @@ static void queue_folios_pmd(pmd_t *pmd, struct mm_walk *walk) qp->nr_failed++; return; } - folio = pfn_folio(pmd_pfn(*pmd)); + folio = pmd_folio(*pmd); if (is_huge_zero_folio(folio)) { walk->action = ACTION_CONTINUE; return; diff --git a/mm/mlock.c b/mm/mlock.c index 1ed2f2ab37cd..30b51cdea89d 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -378,7 +378,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, goto out; if (is_huge_zero_pmd(*pmd)) goto out; - folio = page_folio(pmd_page(*pmd)); + folio = pmd_folio(*pmd); if (vma->vm_flags & VM_LOCKED) mlock_folio(folio); else diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index a0ec14553fbe..b70618e8dcd2 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1662,7 +1662,7 @@ ssize_t move_pages(struct userfaultfd_ctx *ctx, unsigned long dst_start, /* Check if we can move the pmd without splitting it. */ if (move_splits_huge_pmd(dst_addr, src_addr, src_start + len) || !pmd_none(dst_pmdval)) { - struct folio *folio = pfn_folio(pmd_pfn(*src_pmd)); + struct folio *folio = pmd_folio(*src_pmd); if (!folio || (!is_huge_zero_folio(folio) && !PageAnonExclusive(&folio->page))) { -- cgit From 5691753d73a233a97c13f7d389f15d20596d062d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 20:28:25 +0000 Subject: mm: convert huge_zero_page to huge_zero_folio With all callers of is_huge_zero_page() converted, we can now switch the huge_zero_page itself from being a compound page to a folio. Link: https://lkml.kernel.org/r/20240326202833.523759-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 21 ++++++++------------- mm/huge_memory.c | 28 ++++++++++++++-------------- 2 files changed, 22 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 600c6008262b..7ba59ba36354 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -348,17 +348,12 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); -extern struct page *huge_zero_page; +extern struct folio *huge_zero_folio; extern unsigned long huge_zero_pfn; -static inline bool is_huge_zero_page(const struct page *page) -{ - return READ_ONCE(huge_zero_page) == page; -} - static inline bool is_huge_zero_folio(const struct folio *folio) { - return READ_ONCE(huge_zero_page) == &folio->page; + return READ_ONCE(huge_zero_folio) == folio; } static inline bool is_huge_zero_pmd(pmd_t pmd) @@ -371,9 +366,14 @@ static inline bool is_huge_zero_pud(pud_t pud) return false; } -struct page *mm_get_huge_zero_page(struct mm_struct *mm); +struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); void mm_put_huge_zero_page(struct mm_struct *mm); +static inline struct page *mm_get_huge_zero_page(struct mm_struct *mm) +{ + return &mm_get_huge_zero_folio(mm)->page; +} + #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) static inline bool thp_migration_supported(void) @@ -485,11 +485,6 @@ static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) return 0; } -static inline bool is_huge_zero_page(const struct page *page) -{ - return false; -} - static inline bool is_huge_zero_folio(const struct folio *folio) { return false; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 712263e3b1f6..6d510d635b1d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -74,7 +74,7 @@ static unsigned long deferred_split_scan(struct shrinker *shrink, struct shrink_control *sc); static atomic_t huge_zero_refcount; -struct page *huge_zero_page __read_mostly; +struct folio *huge_zero_folio __read_mostly; unsigned long huge_zero_pfn __read_mostly = ~0UL; unsigned long huge_anon_orders_always __read_mostly; unsigned long huge_anon_orders_madvise __read_mostly; @@ -192,24 +192,24 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, static bool get_huge_zero_page(void) { - struct page *zero_page; + struct folio *zero_folio; retry: if (likely(atomic_inc_not_zero(&huge_zero_refcount))) return true; - zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, + zero_folio = folio_alloc((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE, HPAGE_PMD_ORDER); - if (!zero_page) { + if (!zero_folio) { count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED); return false; } preempt_disable(); - if (cmpxchg(&huge_zero_page, NULL, zero_page)) { + if (cmpxchg(&huge_zero_folio, NULL, zero_folio)) { preempt_enable(); - __free_pages(zero_page, compound_order(zero_page)); + folio_put(zero_folio); goto retry; } - WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page)); + WRITE_ONCE(huge_zero_pfn, folio_pfn(zero_folio)); /* We take additional reference here. It will be put back by shrinker */ atomic_set(&huge_zero_refcount, 2); @@ -227,10 +227,10 @@ static void put_huge_zero_page(void) BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); } -struct page *mm_get_huge_zero_page(struct mm_struct *mm) +struct folio *mm_get_huge_zero_folio(struct mm_struct *mm) { if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) - return READ_ONCE(huge_zero_page); + return READ_ONCE(huge_zero_folio); if (!get_huge_zero_page()) return NULL; @@ -238,7 +238,7 @@ struct page *mm_get_huge_zero_page(struct mm_struct *mm) if (test_and_set_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) put_huge_zero_page(); - return READ_ONCE(huge_zero_page); + return READ_ONCE(huge_zero_folio); } void mm_put_huge_zero_page(struct mm_struct *mm) @@ -258,10 +258,10 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, struct shrink_control *sc) { if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { - struct page *zero_page = xchg(&huge_zero_page, NULL); - BUG_ON(zero_page == NULL); + struct folio *zero_folio = xchg(&huge_zero_folio, NULL); + BUG_ON(zero_folio == NULL); WRITE_ONCE(huge_zero_pfn, ~0UL); - __free_pages(zero_page, compound_order(zero_page)); + folio_put(zero_folio); return HPAGE_PMD_NR; } @@ -1340,7 +1340,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, * since we already have a zero page to copy. It just takes a * reference. */ - mm_get_huge_zero_page(dst_mm); + mm_get_huge_zero_folio(dst_mm); goto out_zero_page; } -- cgit From 632230ff193954b514c908fdb45320f64ac9dc72 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 26 Mar 2024 20:28:28 +0000 Subject: mm: rename mm_put_huge_zero_page to mm_put_huge_zero_folio Also remove mm_get_huge_zero_page() now it has no users. Link: https://lkml.kernel.org/r/20240326202833.523759-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 9 ++------- kernel/fork.c | 2 +- mm/huge_memory.c | 2 +- 3 files changed, 4 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 7ba59ba36354..9d11e886aa9a 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -367,12 +367,7 @@ static inline bool is_huge_zero_pud(pud_t pud) } struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); -void mm_put_huge_zero_page(struct mm_struct *mm); - -static inline struct page *mm_get_huge_zero_page(struct mm_struct *mm) -{ - return &mm_get_huge_zero_folio(mm)->page; -} +void mm_put_huge_zero_folio(struct mm_struct *mm); #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot)) @@ -500,7 +495,7 @@ static inline bool is_huge_zero_pud(pud_t pud) return false; } -static inline void mm_put_huge_zero_page(struct mm_struct *mm) +static inline void mm_put_huge_zero_folio(struct mm_struct *mm) { return; } diff --git a/kernel/fork.c b/kernel/fork.c index aebb3e6c96dc..99076dbe27d8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1344,7 +1344,7 @@ static inline void __mmput(struct mm_struct *mm) ksm_exit(mm); khugepaged_exit(mm); /* must run before exit_mmap */ exit_mmap(mm); - mm_put_huge_zero_page(mm); + mm_put_huge_zero_folio(mm); set_mm_exe_file(mm, NULL); if (!list_empty(&mm->mmlist)) { spin_lock(&mmlist_lock); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 098b8ad4bcc7..c4820cd4749e 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -241,7 +241,7 @@ struct folio *mm_get_huge_zero_folio(struct mm_struct *mm) return READ_ONCE(huge_zero_folio); } -void mm_put_huge_zero_page(struct mm_struct *mm) +void mm_put_huge_zero_folio(struct mm_struct *mm) { if (test_bit(MMF_HUGE_ZERO_PAGE, &mm->flags)) put_huge_zero_page(); -- cgit From 24334e78e8e3ca6c8e51dad67d779b1ed40988a6 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 27 Mar 2024 11:23:21 -0400 Subject: mm/hugetlb: declare hugetlbfs_pagecache_present() non-static It will be used outside hugetlb.c soon. Link: https://lkml.kernel.org/r/20240327152332.950956-3-peterx@redhat.com Signed-off-by: Peter Xu Tested-by: Ryan Roberts Cc: Andrea Arcangeli Cc: Andrew Jones Cc: Aneesh Kumar K.V (IBM) Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: James Houghton Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: "Mike Rapoport (IBM)" Cc: Muchun Song Cc: Rik van Riel Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 9 +++++++++ mm/hugetlb.c | 4 ++-- 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 03fc6d625068..a7988c78d69f 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -174,6 +174,9 @@ u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, pud_t *pud); +bool hugetlbfs_pagecache_present(struct hstate *h, + struct vm_area_struct *vma, + unsigned long address); struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); @@ -1228,6 +1231,12 @@ static inline void hugetlb_register_node(struct node *node) static inline void hugetlb_unregister_node(struct node *node) { } + +static inline bool hugetlbfs_pagecache_present( + struct hstate *h, struct vm_area_struct *vma, unsigned long address) +{ + return false; +} #endif /* CONFIG_HUGETLB_PAGE */ static inline spinlock_t *huge_pte_lock(struct hstate *h, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 88bb38df1701..740435567be8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6113,8 +6113,8 @@ out_release_old: /* * Return whether there is a pagecache page to back given address within VMA. */ -static bool hugetlbfs_pagecache_present(struct hstate *h, - struct vm_area_struct *vma, unsigned long address) +bool hugetlbfs_pagecache_present(struct hstate *h, + struct vm_area_struct *vma, unsigned long address) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = linear_page_index(vma, address); -- cgit From b979db1611a63b207dbc47706de989ac113ea898 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 27 Mar 2024 11:23:22 -0400 Subject: mm: make HPAGE_PXD_* macros even if !THP These macros can be helpful when we plan to merge hugetlb code into generic code. Move them out and define them as long as PGTABLE_HAS_HUGE_LEAVES is selected, because there are systems that only define HUGETLB_PAGE not THP. One note here is HPAGE_PMD_SHIFT must be defined even if PMD_SHIFT is not defined (e.g. !CONFIG_MMU case); it (or in other forms, like HPAGE_PMD_NR) is already used in lots of common codes without ifdef guards. Use the old trick to let complations work. Here we only need to differenciate HPAGE_PXD_SHIFT definitions. All the rest macros will be defined based on it. When at it, move HPAGE_PMD_NR / HPAGE_PMD_ORDER over together. Link: https://lkml.kernel.org/r/20240327152332.950956-4-peterx@redhat.com Signed-off-by: Peter Xu Tested-by: Ryan Roberts Cc: Andrea Arcangeli Cc: Andrew Jones Cc: Aneesh Kumar K.V (IBM) Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: James Houghton Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: "Mike Rapoport (IBM)" Cc: Muchun Song Cc: Rik van Riel Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9d11e886aa9a..ff72ee19a125 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -64,9 +64,6 @@ ssize_t single_hugepage_flag_show(struct kobject *kobj, enum transparent_hugepage_flag flag); extern struct kobj_attribute shmem_enabled_attr; -#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT) -#define HPAGE_PMD_NR (1< Date: Wed, 27 Mar 2024 11:23:23 -0400 Subject: mm: introduce vma_pgtable_walk_{begin|end}() Introduce per-vma begin()/end() helpers for pgtable walks. This is a preparation work to merge hugetlb pgtable walkers with generic mm. The helpers need to be called before and after a pgtable walk, will start to be needed if the pgtable walker code supports hugetlb pages. It's a hook point for any type of VMA, but for now only hugetlb uses it to stablize the pgtable pages from getting away (due to possible pmd unsharing). Link: https://lkml.kernel.org/r/20240327152332.950956-5-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Christoph Hellwig Reviewed-by: Muchun Song Tested-by: Ryan Roberts Cc: Andrea Arcangeli Cc: Andrew Jones Cc: Aneesh Kumar K.V (IBM) Cc: Axel Rasmussen Cc: Christophe Leroy Cc: David Hildenbrand Cc: James Houghton Cc: Jason Gunthorpe Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: "Mike Rapoport (IBM)" Cc: Rik van Riel Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/mm.h | 3 +++ mm/memory.c | 12 ++++++++++++ 2 files changed, 15 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 00a46a5d5c9d..3dda0dbcae14 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4230,4 +4230,7 @@ static inline bool pfn_is_unaccepted_memory(unsigned long pfn) return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE); } +void vma_pgtable_walk_begin(struct vm_area_struct *vma); +void vma_pgtable_walk_end(struct vm_area_struct *vma); + #endif /* _LINUX_MM_H */ diff --git a/mm/memory.c b/mm/memory.c index 62ee4a15092a..0b92336bcebd 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6378,3 +6378,15 @@ void ptlock_free(struct ptdesc *ptdesc) kmem_cache_free(page_ptl_cachep, ptdesc->ptl); } #endif + +void vma_pgtable_walk_begin(struct vm_area_struct *vma) +{ + if (is_vm_hugetlb_page(vma)) + hugetlb_vma_lock_read(vma); +} + +void vma_pgtable_walk_end(struct vm_area_struct *vma) +{ + if (is_vm_hugetlb_page(vma)) + hugetlb_vma_unlock_read(vma); +} -- cgit From 35a76f5c0863fc6b570bf02ab6dbf4cd65d1e001 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 27 Mar 2024 11:23:24 -0400 Subject: mm/arch: provide pud_pfn() fallback The comment in the code explains the reasons. We took a different approach comparing to pmd_pfn() by providing a fallback function. Another option is to provide some lower level config options (compare to HUGETLB_PAGE or THP) to identify which layer an arch can support for such huge mappings. However that can be an overkill. [peterx@redhat.com: fix loongson defconfig] Link: https://lkml.kernel.org/r/20240403013249.1418299-4-peterx@redhat.com Link: https://lkml.kernel.org/r/20240327152332.950956-6-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Jason Gunthorpe Tested-by: Ryan Roberts Cc: Mike Rapoport (IBM) Cc: Matthew Wilcox Cc: Andrea Arcangeli Cc: Andrew Jones Cc: Aneesh Kumar K.V (IBM) Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: James Houghton Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Michael Ellerman Cc: Muchun Song Cc: Rik van Riel Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- arch/riscv/include/asm/pgtable.h | 1 + arch/s390/include/asm/pgtable.h | 1 + arch/sparc/include/asm/pgtable_64.h | 1 + arch/x86/include/asm/pgtable.h | 1 + include/linux/pgtable.h | 14 ++++++++++++++ 5 files changed, 18 insertions(+) (limited to 'include/linux') diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 9f8ea0e33eb1..661b2b4fe758 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -648,6 +648,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) #define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) +#define pud_pfn pud_pfn static inline unsigned long pud_pfn(pud_t pud) { return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 60950e7a25f5..2cb2a2e7b34b 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1405,6 +1405,7 @@ static inline unsigned long pud_deref(pud_t pud) return (unsigned long)__va(pud_val(pud) & origin_mask); } +#define pud_pfn pud_pfn static inline unsigned long pud_pfn(pud_t pud) { return __pa(pud_deref(pud)) >> PAGE_SHIFT; diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 4d1bafaba942..26efc9bb644a 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -875,6 +875,7 @@ static inline bool pud_leaf(pud_t pud) return pte_val(pte) & _PAGE_PMD_HUGE; } +#define pud_pfn pud_pfn static inline unsigned long pud_pfn(pud_t pud) { pte_t pte = __pte(pud_val(pud)); diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index cefc7a84f7a4..273f7557218c 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -234,6 +234,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd) return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; } +#define pud_pfn pud_pfn static inline unsigned long pud_pfn(pud_t pud) { phys_addr_t pfn = pud_val(pud); diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 09c85c7bf9c2..f108c7a3c1d9 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1819,6 +1819,20 @@ typedef unsigned int pgtbl_mod_mask; #define pte_leaf_size(x) PAGE_SIZE #endif +/* + * We always define pmd_pfn for all archs as it's used in lots of generic + * code. Now it happens too for pud_pfn (and can happen for larger + * mappings too in the future; we're not there yet). Instead of defining + * it for all archs (like pmd_pfn), provide a fallback. + * + * Note that returning 0 here means any arch that didn't define this can + * get severely wrong when it hits a real pud leaf. It's arch's + * responsibility to properly define it when a huge pud is possible. + */ +#ifndef pud_pfn +#define pud_pfn(x) 0 +#endif + /* * Some architectures have MMUs that are configurable or selectable at boot * time. These lead to variable PTRS_PER_x. For statically allocated arrays it -- cgit From 1b1676180246232308885c4f37fee01cf898fdb2 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 27 Mar 2024 11:23:29 -0400 Subject: mm/gup: handle huge pud for follow_pud_mask() Teach follow_pud_mask() to be able to handle normal PUD pages like hugetlb. Rename follow_devmap_pud() to follow_huge_pud() so that it can process either huge devmap or hugetlb. Move it out of TRANSPARENT_HUGEPAGE_PUD and and huge_memory.c (which relies on CONFIG_THP). Switch to pud_leaf() to detect both cases in the slow gup. In the new follow_huge_pud(), taking care of possible CoR for hugetlb if necessary. touch_pud() needs to be moved out of huge_memory.c to be accessable from gup.c even if !THP. Since at it, optimize the non-present check by adding a pud_present() early check before taking the pgtable lock, failing the follow_page() early if PUD is not present: that is required by both devmap or hugetlb. Use pud_huge() to also cover the pud_devmap() case. One more trivial thing to mention is, introduce "pud_t pud" in the code paths along the way, so the code doesn't dereference *pudp multiple time. Not only because that looks less straightforward, but also because if the dereference really happened, it's not clear whether there can be race to see different *pudp values when it's being modified at the same time. Setting ctx->page_mask properly for a PUD entry. As a side effect, this patch should also be able to optimize devmap GUP on PUD to be able to jump over the whole PUD range, but not yet verified. Hugetlb already can do so prior to this patch. Link: https://lkml.kernel.org/r/20240327152332.950956-11-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Jason Gunthorpe Tested-by: Ryan Roberts Cc: Andrea Arcangeli Cc: Andrew Jones Cc: Aneesh Kumar K.V (IBM) Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: James Houghton Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: "Mike Rapoport (IBM)" Cc: Muchun Song Cc: Rik van Riel Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 8 ------ mm/gup.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++--- mm/huge_memory.c | 47 ++------------------------------- mm/internal.h | 2 ++ 4 files changed, 71 insertions(+), 56 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ff72ee19a125..ab26d9e65ec3 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -351,8 +351,6 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap); -struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, int flags, struct dev_pagemap **pgmap); vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); @@ -507,12 +505,6 @@ static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma, return NULL; } -static inline struct page *follow_devmap_pud(struct vm_area_struct *vma, - unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap) -{ - return NULL; -} - static inline bool thp_migration_supported(void) { return false; diff --git a/mm/gup.c b/mm/gup.c index 39224b5fe62f..2b06d59f2fa3 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -525,6 +525,70 @@ static struct page *no_page_table(struct vm_area_struct *vma, return NULL; } +#ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES +static struct page *follow_huge_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pudp, + int flags, struct follow_page_context *ctx) +{ + struct mm_struct *mm = vma->vm_mm; + struct page *page; + pud_t pud = *pudp; + unsigned long pfn = pud_pfn(pud); + int ret; + + assert_spin_locked(pud_lockptr(mm, pudp)); + + if ((flags & FOLL_WRITE) && !pud_write(pud)) + return NULL; + + if (!pud_present(pud)) + return NULL; + + pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; + + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && + pud_devmap(pud)) { + /* + * device mapped pages can only be returned if the caller + * will manage the page reference count. + * + * At least one of FOLL_GET | FOLL_PIN must be set, so + * assert that here: + */ + if (!(flags & (FOLL_GET | FOLL_PIN))) + return ERR_PTR(-EEXIST); + + if (flags & FOLL_TOUCH) + touch_pud(vma, addr, pudp, flags & FOLL_WRITE); + + ctx->pgmap = get_dev_pagemap(pfn, ctx->pgmap); + if (!ctx->pgmap) + return ERR_PTR(-EFAULT); + } + + page = pfn_to_page(pfn); + + if (!pud_devmap(pud) && !pud_write(pud) && + gup_must_unshare(vma, flags, page)) + return ERR_PTR(-EMLINK); + + ret = try_grab_page(page, flags); + if (ret) + page = ERR_PTR(ret); + else + ctx->page_mask = HPAGE_PUD_NR - 1; + + return page; +} +#else /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ +static struct page *follow_huge_pud(struct vm_area_struct *vma, + unsigned long addr, pud_t *pudp, + int flags, struct follow_page_context *ctx) +{ + return NULL; +} +#endif /* CONFIG_PGTABLE_HAS_HUGE_LEAVES */ + static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { @@ -760,11 +824,11 @@ static struct page *follow_pud_mask(struct vm_area_struct *vma, pudp = pud_offset(p4dp, address); pud = READ_ONCE(*pudp); - if (pud_none(pud)) + if (!pud_present(pud)) return no_page_table(vma, flags, address); - if (pud_devmap(pud)) { + if (pud_leaf(pud)) { ptl = pud_lock(mm, pudp); - page = follow_devmap_pud(vma, address, pudp, flags, &ctx->pgmap); + page = follow_huge_pud(vma, address, pudp, flags, ctx); spin_unlock(ptl); if (page) return page; diff --git a/mm/huge_memory.c b/mm/huge_memory.c index c4820cd4749e..249318f367e4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1378,8 +1378,8 @@ out: } #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static void touch_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, bool write) +void touch_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, bool write) { pud_t _pud; @@ -1391,49 +1391,6 @@ static void touch_pud(struct vm_area_struct *vma, unsigned long addr, update_mmu_cache_pud(vma, addr, pud); } -struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, - pud_t *pud, int flags, struct dev_pagemap **pgmap) -{ - unsigned long pfn = pud_pfn(*pud); - struct mm_struct *mm = vma->vm_mm; - struct page *page; - int ret; - - assert_spin_locked(pud_lockptr(mm, pud)); - - if (flags & FOLL_WRITE && !pud_write(*pud)) - return NULL; - - if (pud_present(*pud) && pud_devmap(*pud)) - /* pass */; - else - return NULL; - - if (flags & FOLL_TOUCH) - touch_pud(vma, addr, pud, flags & FOLL_WRITE); - - /* - * device mapped pages can only be returned if the - * caller will manage the page reference count. - * - * At least one of FOLL_GET | FOLL_PIN must be set, so assert that here: - */ - if (!(flags & (FOLL_GET | FOLL_PIN))) - return ERR_PTR(-EEXIST); - - pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; - *pgmap = get_dev_pagemap(pfn, *pgmap); - if (!*pgmap) - return ERR_PTR(-EFAULT); - page = pfn_to_page(pfn); - - ret = try_grab_page(page, flags); - if (ret) - page = ERR_PTR(ret); - - return page; -} - int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, unsigned long addr, struct vm_area_struct *vma) diff --git a/mm/internal.h b/mm/internal.h index 8e11f7b2da21..d06072f06a8d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1112,6 +1112,8 @@ int __must_check try_grab_page(struct page *page, unsigned int flags); /* * mm/huge_memory.c */ +void touch_pud(struct vm_area_struct *vma, unsigned long addr, + pud_t *pud, bool write); struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags); -- cgit From 9cb28da54643ad464c47585cd5866c30b0218e67 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 27 Mar 2024 11:23:32 -0400 Subject: mm/gup: handle hugetlb in the generic follow_page_mask code Now follow_page() is ready to handle hugetlb pages in whatever form, and over all architectures. Switch to the generic code path. Time to retire hugetlb_follow_page_mask(), following the previous retirement of follow_hugetlb_page() in 4849807114b8. There may be a slight difference of how the loops run when processing slow GUP over a large hugetlb range on cont_pte/cont_pmd supported archs: each loop of __get_user_pages() will resolve one pgtable entry with the patch applied, rather than relying on the size of hugetlb hstate, the latter may cover multiple entries in one loop. A quick performance test on an aarch64 VM on M1 chip shows 15% degrade over a tight loop of slow gup after the path switched. That shouldn't be a problem because slow-gup should not be a hot path for GUP in general: when page is commonly present, fast-gup will already succeed, while when the page is indeed missing and require a follow up page fault, the slow gup degrade will probably buried in the fault paths anyway. It also explains why slow gup for THP used to be very slow before 57edfcfd3419 ("mm/gup: accelerate thp gup even for "pages != NULL"") lands, the latter not part of a performance analysis but a side benefit. If the performance will be a concern, we can consider handle CONT_PTE in follow_page(). Before that is justified to be necessary, keep everything clean and simple. Link: https://lkml.kernel.org/r/20240327152332.950956-14-peterx@redhat.com Signed-off-by: Peter Xu Reviewed-by: Jason Gunthorpe Tested-by: Ryan Roberts Cc: Andrea Arcangeli Cc: Andrew Jones Cc: Aneesh Kumar K.V (IBM) Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: James Houghton Cc: John Hubbard Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Michael Ellerman Cc: "Mike Rapoport (IBM)" Cc: Muchun Song Cc: Rik van Riel Cc: Vlastimil Babka Cc: Yang Shi Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 7 ----- mm/gup.c | 15 ++++------- mm/hugetlb.c | 71 ------------------------------------------------- 3 files changed, 5 insertions(+), 88 deletions(-) (limited to 'include/linux') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index a7988c78d69f..3f3e62880279 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -328,13 +328,6 @@ static inline void hugetlb_zap_end( { } -static inline struct page *hugetlb_follow_page_mask( - struct vm_area_struct *vma, unsigned long address, unsigned int flags, - unsigned int *page_mask) -{ - BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/ -} - static inline int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *dst_vma, diff --git a/mm/gup.c b/mm/gup.c index a572a1169aa7..fbf80e16bc94 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1132,18 +1132,11 @@ static struct page *follow_page_mask(struct vm_area_struct *vma, { pgd_t *pgd; struct mm_struct *mm = vma->vm_mm; + struct page *page; - ctx->page_mask = 0; - - /* - * Call hugetlb_follow_page_mask for hugetlb vmas as it will use - * special hugetlb page table walking code. This eliminates the - * need to check for hugetlb entries in the general walking code. - */ - if (is_vm_hugetlb_page(vma)) - return hugetlb_follow_page_mask(vma, address, flags, - &ctx->page_mask); + vma_pgtable_walk_begin(vma); + ctx->page_mask = 0; pgd = pgd_offset(mm, address); if (unlikely(is_hugepd(__hugepd(pgd_val(*pgd))))) @@ -1154,6 +1147,8 @@ static struct page *follow_page_mask(struct vm_area_struct *vma, else page = follow_p4d_mask(vma, address, pgd, flags, ctx); + vma_pgtable_walk_end(vma); + return page; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 740435567be8..8af2fd48f785 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6876,77 +6876,6 @@ out_release_nounlock: } #endif /* CONFIG_USERFAULTFD */ -struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - unsigned int *page_mask) -{ - struct hstate *h = hstate_vma(vma); - struct mm_struct *mm = vma->vm_mm; - unsigned long haddr = address & huge_page_mask(h); - struct page *page = NULL; - spinlock_t *ptl; - pte_t *pte, entry; - int ret; - - hugetlb_vma_lock_read(vma); - pte = hugetlb_walk(vma, haddr, huge_page_size(h)); - if (!pte) - goto out_unlock; - - ptl = huge_pte_lock(h, mm, pte); - entry = huge_ptep_get(pte); - if (pte_present(entry)) { - page = pte_page(entry); - - if (!huge_pte_write(entry)) { - if (flags & FOLL_WRITE) { - page = NULL; - goto out; - } - - if (gup_must_unshare(vma, flags, page)) { - /* Tell the caller to do unsharing */ - page = ERR_PTR(-EMLINK); - goto out; - } - } - - page = nth_page(page, ((address & ~huge_page_mask(h)) >> PAGE_SHIFT)); - - /* - * Note that page may be a sub-page, and with vmemmap - * optimizations the page struct may be read only. - * try_grab_page() will increase the ref count on the - * head page, so this will be OK. - * - * try_grab_page() should always be able to get the page here, - * because we hold the ptl lock and have verified pte_present(). - */ - ret = try_grab_page(page, flags); - - if (WARN_ON_ONCE(ret)) { - page = ERR_PTR(ret); - goto out; - } - - *page_mask = (1U << huge_page_order(h)) - 1; - } -out: - spin_unlock(ptl); -out_unlock: - hugetlb_vma_unlock_read(vma); - - /* - * Fixup retval for dump requests: if pagecache doesn't exist, - * don't try to allocate a new page but just skip it. - */ - if (!page && (flags & FOLL_DUMP) && - !hugetlbfs_pagecache_present(h, vma, address)) - page = ERR_PTR(-EFAULT); - - return page; -} - long hugetlb_change_protection(struct vm_area_struct *vma, unsigned long address, unsigned long end, pgprot_t newprot, unsigned long cp_flags) -- cgit From c0bff412e67b781d761e330ff9578aa9ed2be79e Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 2 Apr 2024 21:32:47 -0400 Subject: mm: allow anon exclusive check over hugetlb tail pages PageAnonExclusive() used to forbid tail pages for hugetlbfs, as that used to be called mostly in hugetlb specific paths and the head page was guaranteed. As we move forward towards merging hugetlb paths into generic mm, we may start to pass in tail hugetlb pages (when with cont-pte/cont-pmd huge pages) for such check. Allow it to properly fetch the head, in which case the anon-exclusiveness of the head will always represents the tail page. There's already a sign of it when we look at the GUP-fast which already contain the hugetlb processing altogether: we used to have a specific commit 5805192c7b72 ("mm/gup: handle cont-PTE hugetlb pages correctly in gup_must_unshare() via GUP-fast") covering that area. Now with this more generic change, that can also go away. [akpm@linux-foundation.org: simplify PageAnonExclusive(), per Matthew] Link: https://lkml.kernel.org/r/Zg3u5Sh9EbbYPhaI@casper.infradead.org Link: https://lkml.kernel.org/r/20240403013249.1418299-2-peterx@redhat.com Signed-off-by: Peter Xu Acked-by: David Hildenbrand Cc: Huacai Chen Cc: Jason Gunthorpe Cc: Matthew Wilcox (Oracle) Cc: Nathan Chancellor Cc: Ryan Roberts Cc: WANG Xuerui Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 7 ++++++- mm/internal.h | 10 ---------- 2 files changed, 6 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 888353c209c0..7577fe7debaf 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -1095,7 +1095,12 @@ PAGEFLAG(Isolated, isolated, PF_ANY); static __always_inline int PageAnonExclusive(const struct page *page) { VM_BUG_ON_PGFLAGS(!PageAnon(page), page); - VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); + /* + * HugeTLB stores this information on the head page; THP keeps it per + * page + */ + if (PageHuge(page)) + page = compound_head(page); return test_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } diff --git a/mm/internal.h b/mm/internal.h index 5933a8e7d2dd..6614ba4ca9de 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1203,16 +1203,6 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma, if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_rmb(); - /* - * During GUP-fast we might not get called on the head page for a - * hugetlb page that is mapped using cont-PTE, because GUP-fast does - * not work with the abstracted hugetlb PTEs that always point at the - * head page. For hugetlb, PageAnonExclusive only applies on the head - * page (as it cannot be partially COW-shared), so lookup the head page. - */ - if (unlikely(!PageHead(page) && PageHuge(page))) - page = compound_head(page); - /* * Note that PageKsm() pages cannot be exclusive, and consequently, * cannot get pinned. -- cgit From ba168b52bf8ecb877af294769b0ab32b06bd9293 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 27 Mar 2024 19:06:59 +0000 Subject: mm: use rwsem assertion macros for mmap_lock This slightly strengthens our write assertion when lockdep is disabled. It also downgrades us from BUG_ON to WARN_ON, but I think that's an improvement. I don't think dumping the mm_struct was all that valuable; the call chain is what's important. Link: https://lkml.kernel.org/r/20240327190701.1082560-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/mmap_lock.h | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 8d38dcb6d044..de9dc20b01ba 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -60,16 +60,14 @@ static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) #endif /* CONFIG_TRACING */ -static inline void mmap_assert_locked(struct mm_struct *mm) +static inline void mmap_assert_locked(const struct mm_struct *mm) { - lockdep_assert_held(&mm->mmap_lock); - VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); + rwsem_assert_held(&mm->mmap_lock); } -static inline void mmap_assert_write_locked(struct mm_struct *mm) +static inline void mmap_assert_write_locked(const struct mm_struct *mm) { - lockdep_assert_held_write(&mm->mmap_lock); - VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm); + rwsem_assert_held_write(&mm->mmap_lock); } #ifdef CONFIG_PER_VMA_LOCK -- cgit From 07db63a2161a66a2198bc20c7580dd95af887ff4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 27 Mar 2024 18:54:29 +0000 Subject: filemap: remove __set_page_dirty() All callers have been converted to use folios; remove this wrapper. Link: https://lkml.kernel.org/r/20240327185447.1076689-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 35636e67e2e1..ec8b83928c30 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1147,11 +1147,6 @@ void folio_end_writeback(struct folio *folio); void wait_for_stable_page(struct page *page); void folio_wait_stable(struct folio *folio); void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn); -static inline void __set_page_dirty(struct page *page, - struct address_space *mapping, int warn) -{ - __folio_mark_dirty(page_folio(page), mapping, warn); -} void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb); void __folio_cancel_dirty(struct folio *folio); static inline void folio_cancel_dirty(struct folio *folio) -- cgit From 3b612c8f069fecb84af277c8b90e14832749ceca Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 27 Mar 2024 15:35:48 +0100 Subject: mm: optimize CONFIG_PER_VMA_LOCK member placement in vm_area_struct Currently, we end up wasting some memory in each vm_area_struct. Pahole states that: [...] int vm_lock_seq; /* 40 4 */ /* XXX 4 bytes hole, try to pack */ struct vma_lock * vm_lock; /* 48 8 */ bool detached; /* 56 1 */ /* XXX 7 bytes hole, try to pack */ [...] Let's reduce the holes and memory wastage by moving the bool: [...] bool detached; /* 40 1 */ /* XXX 3 bytes hole, try to pack */ int vm_lock_seq; /* 44 4 */ struct vma_lock * vm_lock; /* 48 8 */ [...] Effectively shrinking the vm_area_struct with CONFIG_PER_VMA_LOCK by 8 byte. Likely, we could place "detached" in the lowest bit of vm_lock, but at least on 64bit that won't really make a difference, so keep it simple. Link: https://lkml.kernel.org/r/20240327143548.744070-1-david@redhat.com Signed-off-by: David Hildenbrand Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- include/linux/mm_types.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6fef56938a17..fee6561feb7e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -671,6 +671,9 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK + /* Flag to indicate areas detached from the mm->mm_mt tree */ + bool detached; + /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) @@ -687,9 +690,6 @@ struct vm_area_struct { */ int vm_lock_seq; struct vma_lock *vm_lock; - - /* Flag to indicate areas detached from the mm->mm_mt tree */ - bool detached; #endif /* -- cgit From 82a616d0f33b77b1cadd0652efbe11874771320f Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 27 Mar 2024 15:33:01 +0100 Subject: mm: remove "prot" parameter from move_pte() The "prot" parameter is unused, and using it instead of what's stored in that particular PTE would very likely be wrong. Let's simply remove it. Link: https://lkml.kernel.org/r/20240327143301.741807-1-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Vishal Moola (Oracle) Cc: "David S. Miller" Cc: Andreas Larsson Signed-off-by: Andrew Morton --- arch/sparc/include/asm/pgtable_64.h | 2 +- include/linux/pgtable.h | 2 +- mm/mremap.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 26efc9bb644a..3fe429d73a65 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -957,7 +957,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr, #ifdef DCACHE_ALIASING_POSSIBLE #define __HAVE_ARCH_MOVE_PTE -#define move_pte(pte, prot, old_addr, new_addr) \ +#define move_pte(pte, old_addr, new_addr) \ ({ \ pte_t newpte = (pte); \ if (tlb_type != hypervisor && pte_present(pte)) { \ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index f108c7a3c1d9..a3fc8150b047 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1079,7 +1079,7 @@ static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio) #endif #ifndef __HAVE_ARCH_MOVE_PTE -#define move_pte(pte, prot, old_addr, new_addr) (pte) +#define move_pte(pte, old_addr, new_addr) (pte) #endif #ifndef pte_accessible diff --git a/mm/mremap.c b/mm/mremap.c index 38d98465f3d8..f5aba752d35f 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -205,7 +205,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, */ if (pte_present(pte)) force_flush = true; - pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); + pte = move_pte(pte, old_addr, new_addr); pte = move_soft_dirty_pte(pte); set_pte_at(mm, new_addr, new_pte, pte); } -- cgit From 08b8247ebd2b70367ab3a87657efb237662bd8f6 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 27 Mar 2024 22:30:08 +0800 Subject: mm: remove __set_page_dirty_nobuffers() There are no more callers of __set_page_dirty_nobuffers(), remove it. Link: https://lkml.kernel.org/r/20240327143008.3739435-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: David Hildenbrand Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 1 - mm/folio-compat.c | 6 ------ 2 files changed, 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ec8b83928c30..65e332df4d76 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1158,7 +1158,6 @@ static inline void folio_cancel_dirty(struct folio *folio) bool folio_clear_dirty_for_io(struct folio *folio); bool clear_page_dirty_for_io(struct page *page); void folio_invalidate(struct folio *folio, size_t offset, size_t length); -int __set_page_dirty_nobuffers(struct page *page); bool noop_dirty_folio(struct address_space *mapping, struct folio *folio); #ifdef CONFIG_MIGRATION diff --git a/mm/folio-compat.c b/mm/folio-compat.c index 50412014f16f..f31e0ce65b11 100644 --- a/mm/folio-compat.c +++ b/mm/folio-compat.c @@ -58,12 +58,6 @@ bool set_page_dirty(struct page *page) } EXPORT_SYMBOL(set_page_dirty); -int __set_page_dirty_nobuffers(struct page *page) -{ - return filemap_dirty_folio(page_mapping(page), page_folio(page)); -} -EXPORT_SYMBOL(__set_page_dirty_nobuffers); - bool clear_page_dirty_for_io(struct page *page) { return folio_clear_dirty_for_io(page_folio(page)); -- cgit From 529ce23a764f25d172198b4c6ba90f1e2ad17f93 Mon Sep 17 00:00:00 2001 From: Rick Edgecombe Date: Mon, 25 Mar 2024 19:16:44 -0700 Subject: mm: switch mm->get_unmapped_area() to a flag The mm_struct contains a function pointer *get_unmapped_area(), which is set to either arch_get_unmapped_area() or arch_get_unmapped_area_topdown() during the initialization of the mm. Since the function pointer only ever points to two functions that are named the same across all arch's, a function pointer is not really required. In addition future changes will want to add versions of the functions that take additional arguments. So to save a pointers worth of bytes in mm_struct, and prevent adding additional function pointers to mm_struct in future changes, remove it and keep the information about which get_unmapped_area() to use in a flag. Add the new flag to MMF_INIT_MASK so it doesn't get clobbered on fork by mmf_init_flags(). Most MM flags get clobbered on fork. In the pre-existing behavior mm->get_unmapped_area() would get copied to the new mm in dup_mm(), so not clobbering the flag preserves the existing behavior around inheriting the topdown-ness. Introduce a helper, mm_get_unmapped_area(), to easily convert code that refers to the old function pointer to instead select and call either arch_get_unmapped_area() or arch_get_unmapped_area_topdown() based on the flag. Then drop the mm->get_unmapped_area() function pointer. Leave the get_unmapped_area() pointer in struct file_operations alone. The main purpose of this change is to reorganize in preparation for future changes, but it also converts the calls of mm->get_unmapped_area() from indirect branches into a direct ones. The stress-ng bigheap benchmark calls realloc a lot, which calls through get_unmapped_area() in the kernel. On x86, the change yielded a ~1% improvement there on a retpoline config. In testing a few x86 configs, removing the pointer unfortunately didn't result in any actual size reductions in the compiled layout of mm_struct. But depending on compiler or arch alignment requirements, the change could shrink the size of mm_struct. Link: https://lkml.kernel.org/r/20240326021656.202649-3-rick.p.edgecombe@intel.com Signed-off-by: Rick Edgecombe Acked-by: Dave Hansen Acked-by: Liam R. Howlett Reviewed-by: Kirill A. Shutemov Acked-by: Alexei Starovoitov Cc: Dan Williams Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov (AMD) Cc: Christophe Leroy Cc: Deepak Gupta Cc: Guo Ren Cc: Helge Deller Cc: H. Peter Anvin (Intel) Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Kees Cook Cc: Mark Brown Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- arch/s390/mm/hugetlbpage.c | 2 +- arch/s390/mm/mmap.c | 4 ++-- arch/sparc/kernel/sys_sparc_64.c | 15 ++++++--------- arch/sparc/mm/hugetlbpage.c | 2 +- arch/x86/kernel/cpu/sgx/driver.c | 2 +- arch/x86/mm/hugetlbpage.c | 2 +- arch/x86/mm/mmap.c | 4 ++-- drivers/char/mem.c | 2 +- drivers/dax/device.c | 6 +++--- fs/hugetlbfs/inode.c | 4 ++-- fs/proc/inode.c | 3 ++- fs/ramfs/file-mmu.c | 2 +- include/linux/mm_types.h | 6 +----- include/linux/sched/coredump.h | 5 ++++- include/linux/sched/mm.h | 5 +++++ io_uring/io_uring.c | 2 +- kernel/bpf/arena.c | 2 +- kernel/bpf/syscall.c | 2 +- mm/debug.c | 6 ------ mm/huge_memory.c | 9 ++++----- mm/mmap.c | 21 ++++++++++++++++++--- mm/shmem.c | 11 +++++------ mm/util.c | 6 +++--- 23 files changed, 66 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index ca43b6fce71c..7d948e243f4b 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -318,7 +318,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, goto check_asce_limit; } - if (mm->get_unmapped_area == arch_get_unmapped_area) + if (!test_bit(MMF_TOPDOWN, &mm->flags)) addr = hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index b14fc0887654..6b2e4436ad4a 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -185,10 +185,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) */ if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = mmap_base_legacy(random_factor); - mm->get_unmapped_area = arch_get_unmapped_area; + clear_bit(MMF_TOPDOWN, &mm->flags); } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; + set_bit(MMF_TOPDOWN, &mm->flags); } } diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 1e9a9e016237..1dbf7211666e 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -218,14 +218,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long align_goal, addr = -ENOMEM; - unsigned long (*get_area)(struct file *, unsigned long, - unsigned long, unsigned long, unsigned long); - - get_area = current->mm->get_unmapped_area; if (flags & MAP_FIXED) { /* Ok, don't mess with it. */ - return get_area(NULL, orig_addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags); } flags &= ~MAP_SHARED; @@ -238,7 +234,8 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u align_goal = (64UL * 1024); do { - addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags); + addr = mm_get_unmapped_area(current->mm, NULL, orig_addr, + len + (align_goal - PAGE_SIZE), pgoff, flags); if (!(addr & ~PAGE_MASK)) { addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL); break; @@ -256,7 +253,7 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u * be obtained. */ if (addr & ~PAGE_MASK) - addr = get_area(NULL, orig_addr, len, pgoff, flags); + addr = mm_get_unmapped_area(current->mm, NULL, orig_addr, len, pgoff, flags); return addr; } @@ -292,7 +289,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) gap == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - mm->get_unmapped_area = arch_get_unmapped_area; + clear_bit(MMF_TOPDOWN, &mm->flags); } else { /* We know it's 32-bit */ unsigned long task_size = STACK_TOP32; @@ -303,7 +300,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; + set_bit(MMF_TOPDOWN, &mm->flags); } } diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 8ed5bdf95d25..c23012e3a353 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -123,7 +123,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, (!vma || addr + len <= vm_start_gap(vma))) return addr; } - if (mm->get_unmapped_area == arch_get_unmapped_area) + if (!test_bit(MMF_TOPDOWN, &mm->flags)) return hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else diff --git a/arch/x86/kernel/cpu/sgx/driver.c b/arch/x86/kernel/cpu/sgx/driver.c index 262f5fb18d74..22b65a5f5ec6 100644 --- a/arch/x86/kernel/cpu/sgx/driver.c +++ b/arch/x86/kernel/cpu/sgx/driver.c @@ -113,7 +113,7 @@ static unsigned long sgx_get_unmapped_area(struct file *file, if (flags & MAP_FIXED) return addr; - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); } #ifdef CONFIG_COMPAT diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index dab6db288e5b..06ca9a60bac2 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -115,7 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } get_unmapped_area: - if (mm->get_unmapped_area == arch_get_unmapped_area) + if (!test_bit(MMF_TOPDOWN, &mm->flags)) return hugetlb_get_unmapped_area_bottomup(file, addr, len, pgoff, flags); else diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index c90c20904a60..a2cabb1c81e1 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -129,9 +129,9 @@ static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base, void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { if (mmap_is_legacy()) - mm->get_unmapped_area = arch_get_unmapped_area; + clear_bit(MMF_TOPDOWN, &mm->flags); else - mm->get_unmapped_area = arch_get_unmapped_area_topdown; + set_bit(MMF_TOPDOWN, &mm->flags); arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base, arch_rnd(mmap64_rnd_bits), task_size_64bit(0), diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 3c6670cf905f..9b80e622ae80 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c @@ -544,7 +544,7 @@ static unsigned long get_unmapped_area_zero(struct file *file, } /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */ - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); #else return -ENOSYS; #endif diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 93ebedc5ec8c..47c126d37b59 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -329,14 +329,14 @@ static unsigned long dax_get_unmapped_area(struct file *filp, if ((off + len_align) < off) goto out; - addr_align = current->mm->get_unmapped_area(filp, addr, len_align, - pgoff, flags); + addr_align = mm_get_unmapped_area(current->mm, filp, addr, len_align, + pgoff, flags); if (!IS_ERR_VALUE(addr_align)) { addr_align += (off - addr_align) & (align - 1); return addr_align; } out: - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); } static const struct address_space_operations dev_dax_aops = { diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 6502c7e776d1..3dee18bf47ed 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -249,11 +249,11 @@ generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } /* - * Use mm->get_unmapped_area value as a hint to use topdown routine. + * Use MMF_TOPDOWN flag as a hint to use topdown routine. * If architectures have special needs, they should define their own * version of hugetlb_get_unmapped_area. */ - if (mm->get_unmapped_area == arch_get_unmapped_area_topdown) + if (test_bit(MMF_TOPDOWN, &mm->flags)) return hugetlb_get_unmapped_area_topdown(file, addr, len, pgoff, flags); return hugetlb_get_unmapped_area_bottomup(file, addr, len, diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 75396a24fd8c..d19434e2a58e 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -455,8 +455,9 @@ pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned lo return pde->proc_ops->proc_get_unmapped_area(file, orig_addr, len, pgoff, flags); #ifdef CONFIG_MMU - return current->mm->get_unmapped_area(file, orig_addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, file, orig_addr, len, pgoff, flags); #endif + return orig_addr; } diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index c7a1aa3c882b..b45c7edc3225 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c @@ -35,7 +35,7 @@ static unsigned long ramfs_mmu_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); } const struct file_operations ramfs_file_operations = { diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fee6561feb7e..fa0d6995706f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -777,11 +777,7 @@ struct mm_struct { } ____cacheline_aligned_in_smp; struct maple_tree mm_mt; -#ifdef CONFIG_MMU - unsigned long (*get_unmapped_area) (struct file *filp, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags); -#endif + unsigned long mmap_base; /* base of mmap area */ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ #ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 02f5090ffea2..e62ff805cfc9 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -92,9 +92,12 @@ static inline int get_dumpable(struct mm_struct *mm) #define MMF_VM_MERGE_ANY 30 #define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) +#define MMF_TOPDOWN 31 /* mm searches top down by default */ +#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN) + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ - MMF_VM_MERGE_ANY_MASK) + MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK) static inline unsigned long mmf_init_flags(unsigned long flags) { diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index b6543f9d78d6..ed1caa26c8be 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -8,6 +8,7 @@ #include #include #include +#include /* * Routines for handling mm_structs @@ -186,6 +187,10 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); +unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags); + unsigned long generic_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index c170a2b8d2cf..d5edfb8444d7 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -3525,7 +3525,7 @@ static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp, #else addr = 0UL; #endif - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); } #else /* !CONFIG_MMU */ diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index 343c3456c8dd..16dbf4f6b77f 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -314,7 +314,7 @@ static unsigned long arena_get_unmapped_area(struct file *filp, unsigned long ad return -EINVAL; } - ret = current->mm->get_unmapped_area(filp, addr, len * 2, 0, flags); + ret = mm_get_unmapped_area(current->mm, filp, addr, len * 2, 0, flags); if (IS_ERR_VALUE(ret)) return ret; if ((ret >> 32) == ((ret + len - 1) >> 32)) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index c287925471f6..9cb89e875f0d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -980,7 +980,7 @@ static unsigned long bpf_get_unmapped_area(struct file *filp, unsigned long addr if (map->ops->map_get_unmapped_area) return map->ops->map_get_unmapped_area(filp, addr, len, pgoff, flags); #ifdef CONFIG_MMU - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); #else return addr; #endif diff --git a/mm/debug.c b/mm/debug.c index e8a96b8b7197..b71186f1fb0b 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -177,9 +177,6 @@ EXPORT_SYMBOL(dump_vma); void dump_mm(const struct mm_struct *mm) { pr_emerg("mm %px task_size %lu\n" -#ifdef CONFIG_MMU - "get_unmapped_area %px\n" -#endif "mmap_base %lu mmap_legacy_base %lu\n" "pgd %px mm_users %d mm_count %d pgtables_bytes %lu map_count %d\n" "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" @@ -205,9 +202,6 @@ void dump_mm(const struct mm_struct *mm) "def_flags: %#lx(%pGv)\n", mm, mm->task_size, -#ifdef CONFIG_MMU - mm->get_unmapped_area, -#endif mm->mmap_base, mm->mmap_legacy_base, mm->pgd, atomic_read(&mm->mm_users), atomic_read(&mm->mm_count), diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 157cee64850c..7fdb2275a1e0 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -816,8 +816,8 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, if (len_pad < len || (off + len_pad) < off) return 0; - ret = current->mm->get_unmapped_area(filp, addr, len_pad, - off >> PAGE_SHIFT, flags); + ret = mm_get_unmapped_area(current->mm, filp, addr, len_pad, + off >> PAGE_SHIFT, flags); /* * The failure might be due to length padding. The caller will retry @@ -835,8 +835,7 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, off_sub = (off - ret) & (size - 1); - if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown && - !off_sub) + if (test_bit(MMF_TOPDOWN, ¤t->mm->flags) && !off_sub) return ret + size; ret += off_sub; @@ -853,7 +852,7 @@ unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, if (ret) return ret; - return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); diff --git a/mm/mmap.c b/mm/mmap.c index 77a625e13ec1..dfb8a518e8c9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1812,7 +1812,8 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long (*get_area)(struct file *, unsigned long, - unsigned long, unsigned long, unsigned long); + unsigned long, unsigned long, unsigned long) + = NULL; unsigned long error = arch_mmap_check(addr, len, flags); if (error) @@ -1822,7 +1823,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (len > TASK_SIZE) return -ENOMEM; - get_area = current->mm->get_unmapped_area; if (file) { if (file->f_op->get_unmapped_area) get_area = file->f_op->get_unmapped_area; @@ -1841,7 +1841,11 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (!file) pgoff = 0; - addr = get_area(file, addr, len, pgoff, flags); + if (get_area) + addr = get_area(file, addr, len, pgoff, flags); + else + addr = mm_get_unmapped_area(current->mm, file, addr, len, + pgoff, flags); if (IS_ERR_VALUE(addr)) return addr; @@ -1856,6 +1860,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, EXPORT_SYMBOL(get_unmapped_area); +unsigned long +mm_get_unmapped_area(struct mm_struct *mm, struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + if (test_bit(MMF_TOPDOWN, &mm->flags)) + return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags); + return arch_get_unmapped_area(file, addr, len, pgoff, flags); +} +EXPORT_SYMBOL(mm_get_unmapped_area); + /** * find_vma_intersection() - Look up the first VMA which intersects the interval * @mm: The process address space. diff --git a/mm/shmem.c b/mm/shmem.c index 98985179f495..fa2a0ed97507 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2267,8 +2267,6 @@ unsigned long shmem_get_unmapped_area(struct file *file, unsigned long uaddr, unsigned long len, unsigned long pgoff, unsigned long flags) { - unsigned long (*get_area)(struct file *, - unsigned long, unsigned long, unsigned long, unsigned long); unsigned long addr; unsigned long offset; unsigned long inflated_len; @@ -2278,8 +2276,8 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (len > TASK_SIZE) return -ENOMEM; - get_area = current->mm->get_unmapped_area; - addr = get_area(file, uaddr, len, pgoff, flags); + addr = mm_get_unmapped_area(current->mm, file, uaddr, len, pgoff, + flags); if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) return addr; @@ -2336,7 +2334,8 @@ unsigned long shmem_get_unmapped_area(struct file *file, if (inflated_len < len) return addr; - inflated_addr = get_area(NULL, uaddr, inflated_len, 0, flags); + inflated_addr = mm_get_unmapped_area(current->mm, NULL, uaddr, + inflated_len, 0, flags); if (IS_ERR_VALUE(inflated_addr)) return addr; if (inflated_addr & ~PAGE_MASK) @@ -4801,7 +4800,7 @@ unsigned long shmem_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - return current->mm->get_unmapped_area(file, addr, len, pgoff, flags); + return mm_get_unmapped_area(current->mm, file, addr, len, pgoff, flags); } #endif diff --git a/mm/util.c b/mm/util.c index a9e911b22b99..c9e519e6811f 100644 --- a/mm/util.c +++ b/mm/util.c @@ -469,17 +469,17 @@ void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) if (mmap_is_legacy(rlim_stack)) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; - mm->get_unmapped_area = arch_get_unmapped_area; + clear_bit(MMF_TOPDOWN, &mm->flags); } else { mm->mmap_base = mmap_base(random_factor, rlim_stack); - mm->get_unmapped_area = arch_get_unmapped_area_topdown; + set_bit(MMF_TOPDOWN, &mm->flags); } } #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) { mm->mmap_base = TASK_UNMAPPED_BASE; - mm->get_unmapped_area = arch_get_unmapped_area; + clear_bit(MMF_TOPDOWN, &mm->flags); } #endif -- cgit From 961148704acd814a4a5acaa4687a45e2315dd857 Mon Sep 17 00:00:00 2001 From: Rick Edgecombe Date: Mon, 25 Mar 2024 19:16:45 -0700 Subject: mm: introduce arch_get_unmapped_area_vmflags() When memory is being placed, mmap() will take care to respect the guard gaps of certain types of memory (VM_SHADOWSTACK, VM_GROWSUP and VM_GROWSDOWN). In order to ensure guard gaps between mappings, mmap() needs to consider two things: 1. That the new mapping isn't placed in an any existing mappings guard gaps. 2. That the new mapping isn't placed such that any existing mappings are not in *its* guard gaps. The longstanding behavior of mmap() is to ensure 1, but not take any care around 2. So for example, if there is a PAGE_SIZE free area, and a mmap() with a PAGE_SIZE size, and a type that has a guard gap is being placed, mmap() may place the shadow stack in the PAGE_SIZE free area. Then the mapping that is supposed to have a guard gap will not have a gap to the adjacent VMA. In order to take the start gap into account, the maple tree search needs to know the size of start gap the new mapping will need. The call chain from do_mmap() to the actual maple tree search looks like this: do_mmap(size, vm_flags, map_flags, ..) mm/mmap.c:get_unmapped_area(size, map_flags, ...) arch_get_unmapped_area(size, map_flags, ...) vm_unmapped_area(struct vm_unmapped_area_info) One option would be to add another MAP_ flag to mean a one page start gap (as is for shadow stack), but this consumes a flag unnecessarily. Another option could be to simply increase the size passed in do_mmap() by the start gap size, and adjust after the fact, but this will interfere with the alignment requirements passed in struct vm_unmapped_area_info, and unknown to mmap.c. Instead, introduce variants of arch_get_unmapped_area/_topdown() that take vm_flags. In future changes, these variants can be used in mmap.c:get_unmapped_area() to allow the vm_flags to be passed through to vm_unmapped_area(), while preserving the normal arch_get_unmapped_area/_topdown() for the existing callers. Link: https://lkml.kernel.org/r/20240326021656.202649-4-rick.p.edgecombe@intel.com Signed-off-by: Rick Edgecombe Cc: Alexei Starovoitov Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov (AMD) Cc: Christophe Leroy Cc: Dan Williams Cc: Dave Hansen Cc: Deepak Gupta Cc: Guo Ren Cc: Helge Deller Cc: H. Peter Anvin (Intel) Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Liam R. Howlett Cc: Mark Brown Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/sched/mm.h | 17 +++++++++++++++++ mm/mmap.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index ed1caa26c8be..91546493c43d 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -191,6 +191,23 @@ unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); +unsigned long +arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags); +unsigned long +arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t); + +unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, + struct file *filp, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags, + vm_flags_t vm_flags); + unsigned long generic_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, diff --git a/mm/mmap.c b/mm/mmap.c index dfb8a518e8c9..b33055d1561b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1807,6 +1807,34 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, } #endif +#ifndef HAVE_ARCH_UNMAPPED_AREA_VMFLAGS +unsigned long +arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) +{ + return arch_get_unmapped_area(filp, addr, len, pgoff, flags); +} + +unsigned long +arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) +{ + return arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags); +} +#endif + +unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) +{ + if (test_bit(MMF_TOPDOWN, &mm->flags)) + return arch_get_unmapped_area_topdown_vmflags(filp, addr, len, pgoff, + flags, vm_flags); + return arch_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, vm_flags); +} + unsigned long get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) -- cgit From 8a0fe564bb2e744f21ca94e3eabe96d232cf5f41 Mon Sep 17 00:00:00 2001 From: Rick Edgecombe Date: Mon, 25 Mar 2024 19:16:47 -0700 Subject: mm: use get_unmapped_area_vmflags() When memory is being placed, mmap() will take care to respect the guard gaps of certain types of memory (VM_SHADOWSTACK, VM_GROWSUP and VM_GROWSDOWN). In order to ensure guard gaps between mappings, mmap() needs to consider two things: 1. That the new mapping isn't placed in an any existing mappings guard gaps. 2. That the new mapping isn't placed such that any existing mappings are not in *its* guard gaps. The long standing behavior of mmap() is to ensure 1, but not take any care around 2. So for example, if there is a PAGE_SIZE free area, and a mmap() with a PAGE_SIZE size, and a type that has a guard gap is being placed, mmap() may place the shadow stack in the PAGE_SIZE free area. Then the mapping that is supposed to have a guard gap will not have a gap to the adjacent VMA. Use mm_get_unmapped_area_vmflags() in the do_mmap() so future changes can cause shadow stack mappings to be placed with a guard gap. Also use the THP variant that takes vm_flags, such that THP shadow stack can get the same treatment. Adjust the vm_flags calculation to happen earlier so that the vm_flags can be passed into __get_unmapped_area(). Link: https://lkml.kernel.org/r/20240326021656.202649-6-rick.p.edgecombe@intel.com Signed-off-by: Rick Edgecombe Reviewed-by: Christophe Leroy Cc: Alexei Starovoitov Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov (AMD) Cc: Dan Williams Cc: Dave Hansen Cc: Deepak Gupta Cc: Guo Ren Cc: Helge Deller Cc: H. Peter Anvin (Intel) Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Liam R. Howlett Cc: Mark Brown Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/mm.h | 11 ++++++++++- mm/mmap.c | 32 ++++++++++++++++---------------- 2 files changed, 26 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 3dda0dbcae14..1b6903f4c57b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3413,7 +3413,16 @@ extern int install_special_mapping(struct mm_struct *mm, unsigned long randomize_stack_top(unsigned long stack_top); unsigned long randomize_page(unsigned long start, unsigned long range); -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); +unsigned long +__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); + +static inline unsigned long +get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + return __get_unmapped_area(file, addr, len, pgoff, flags, 0); +} extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, diff --git a/mm/mmap.c b/mm/mmap.c index 30baba087c65..a37c8dd29d64 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1255,18 +1255,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (mm->map_count > sysctl_max_map_count) return -ENOMEM; - /* Obtain the address to map to. we verify (or select) it and ensure - * that it represents a valid section of the address space. - */ - addr = get_unmapped_area(file, addr, len, pgoff, flags); - if (IS_ERR_VALUE(addr)) - return addr; - - if (flags & MAP_FIXED_NOREPLACE) { - if (find_vma_intersection(mm, addr, addr + len)) - return -EEXIST; - } - if (prot == PROT_EXEC) { pkey = execute_only_pkey(mm); if (pkey < 0) @@ -1280,6 +1268,18 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ + addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags); + if (IS_ERR_VALUE(addr)) + return addr; + + if (flags & MAP_FIXED_NOREPLACE) { + if (find_vma_intersection(mm, addr, addr + len)) + return -EEXIST; + } + if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; @@ -1836,8 +1836,8 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *fi } unsigned long -get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags) +__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags) { unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) @@ -1872,8 +1872,8 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, if (get_area) addr = get_area(file, addr, len, pgoff, flags); else - addr = mm_get_unmapped_area(current->mm, file, addr, len, - pgoff, flags); + addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, + pgoff, flags, vm_flags); if (IS_ERR_VALUE(addr)) return addr; -- cgit From ed48e87c7df3651accfa3088076830727b070f54 Mon Sep 17 00:00:00 2001 From: Rick Edgecombe Date: Mon, 25 Mar 2024 19:16:48 -0700 Subject: thp: add thp_get_unmapped_area_vmflags() When memory is being placed, mmap() will take care to respect the guard gaps of certain types of memory (VM_SHADOWSTACK, VM_GROWSUP and VM_GROWSDOWN). In order to ensure guard gaps between mappings, mmap() needs to consider two things: 1. That the new mapping isn't placed in an any existing mappings guard gaps. 2. That the new mapping isn't placed such that any existing mappings are not in *its* guard gaps. The longstanding behavior of mmap() is to ensure 1, but not take any care around 2. So for example, if there is a PAGE_SIZE free area, and a mmap() with a PAGE_SIZE size, and a type that has a guard gap is being placed, mmap() may place the shadow stack in the PAGE_SIZE free area. Then the mapping that is supposed to have a guard gap will not have a gap to the adjacent VMA. Add a THP implementations of the vm_flags variant of get_unmapped_area(). Future changes will call this from mmap.c in the do_mmap() path to allow shadow stacks to be placed with consideration taken for the start guard gap. Shadow stack memory is always private and anonymous and so special guard gap logic is not needed in a lot of caseis, but it can be mapped by THP, so needs to be handled. Link: https://lkml.kernel.org/r/20240326021656.202649-7-rick.p.edgecombe@intel.com Signed-off-by: Rick Edgecombe Reviewed-by: Christophe Leroy Cc: Alexei Starovoitov Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov (AMD) Cc: Dan Williams Cc: Dave Hansen Cc: Deepak Gupta Cc: Guo Ren Cc: Helge Deller Cc: H. Peter Anvin (Intel) Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Liam R. Howlett Cc: Mark Brown Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/huge_mm.h | 11 +++++++++++ mm/huge_memory.c | 23 ++++++++++++++++------- mm/mmap.c | 12 +++++++----- 3 files changed, 34 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ab26d9e65ec3..e896ca4760f6 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -270,6 +270,9 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); +unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags); bool can_split_folio(struct folio *folio, int *pextra_pins); int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, @@ -413,6 +416,14 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, #define thp_get_unmapped_area NULL +static inline unsigned long +thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags) +{ + return 0; +} + static inline bool can_split_folio(struct folio *folio, int *pextra_pins) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7fdb2275a1e0..b757d9be310c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -800,7 +800,8 @@ static inline bool is_transparent_hugepage(const struct folio *folio) static unsigned long __thp_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, - loff_t off, unsigned long flags, unsigned long size) + loff_t off, unsigned long flags, unsigned long size, + vm_flags_t vm_flags) { loff_t off_end = off + len; loff_t off_align = round_up(off, size); @@ -816,8 +817,8 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, if (len_pad < len || (off + len_pad) < off) return 0; - ret = mm_get_unmapped_area(current->mm, filp, addr, len_pad, - off >> PAGE_SHIFT, flags); + ret = mm_get_unmapped_area_vmflags(current->mm, filp, addr, len_pad, + off >> PAGE_SHIFT, flags, vm_flags); /* * The failure might be due to length padding. The caller will retry @@ -842,17 +843,25 @@ static unsigned long __thp_get_unmapped_area(struct file *filp, return ret; } -unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags) +unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags, + vm_flags_t vm_flags) { unsigned long ret; loff_t off = (loff_t)pgoff << PAGE_SHIFT; - ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE); + ret = __thp_get_unmapped_area(filp, addr, len, off, flags, PMD_SIZE, vm_flags); if (ret) return ret; - return mm_get_unmapped_area(current->mm, filp, addr, len, pgoff, flags); + return mm_get_unmapped_area_vmflags(current->mm, filp, addr, len, pgoff, flags, + vm_flags); +} + +unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + return thp_get_unmapped_area_vmflags(filp, addr, len, pgoff, flags, 0); } EXPORT_SYMBOL_GPL(thp_get_unmapped_area); diff --git a/mm/mmap.c b/mm/mmap.c index a37c8dd29d64..0ef1191e3be5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1860,20 +1860,22 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, * so use shmem's get_unmapped_area in case it can be huge. */ get_area = shmem_get_unmapped_area; - } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { - /* Ensures that larger anonymous mappings are THP aligned. */ - get_area = thp_get_unmapped_area; } /* Always treat pgoff as zero for anonymous memory. */ if (!file) pgoff = 0; - if (get_area) + if (get_area) { addr = get_area(file, addr, len, pgoff, flags); - else + } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) { + /* Ensures that larger anonymous mappings are THP aligned. */ + addr = thp_get_unmapped_area_vmflags(file, addr, len, + pgoff, flags, vm_flags); + } else { addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len, pgoff, flags, vm_flags); + } if (IS_ERR_VALUE(addr)) return addr; -- cgit From 44bd7ace9fd6490dce3b001b21d5c6bac869171b Mon Sep 17 00:00:00 2001 From: Rick Edgecombe Date: Mon, 25 Mar 2024 19:16:53 -0700 Subject: mm: take placement mappings gap into account When memory is being placed, mmap() will take care to respect the guard gaps of certain types of memory (VM_SHADOWSTACK, VM_GROWSUP and VM_GROWSDOWN). In order to ensure guard gaps between mappings, mmap() needs to consider two things: 1. That the new mapping isn't placed in an any existing mappings guard gaps. 2. That the new mapping isn't placed such that any existing mappings are not in *its* guard gaps. The longstanding behavior of mmap() is to ensure 1, but not take any care around 2. So for example, if there is a PAGE_SIZE free area, and a mmap() with a PAGE_SIZE size, and a type that has a guard gap is being placed, mmap() may place the shadow stack in the PAGE_SIZE free area. Then the mapping that is supposed to have a guard gap will not have a gap to the adjacent VMA. For MAP_GROWSDOWN/VM_GROWSDOWN and MAP_GROWSUP/VM_GROWSUP this has not been a problem in practice because applications place these kinds of mappings very early, when there is not many mappings to find a space between. But for shadow stacks, they may be placed throughout the lifetime of the application. Use the start_gap field to find a space that includes the guard gap for the new mapping. Take care to not interfere with the alignment. Link: https://lkml.kernel.org/r/20240326021656.202649-12-rick.p.edgecombe@intel.com Signed-off-by: Rick Edgecombe Reviewed-by: Christophe Leroy Cc: Alexei Starovoitov Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov (AMD) Cc: Dan Williams Cc: Dave Hansen Cc: Deepak Gupta Cc: Guo Ren Cc: Helge Deller Cc: H. Peter Anvin (Intel) Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Liam R. Howlett Cc: Mark Brown Cc: Michael Ellerman Cc: Naveen N. Rao Cc: Nicholas Piggin Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 + mm/mmap.c | 12 +++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 1b6903f4c57b..2d5e492ef57f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3468,6 +3468,7 @@ struct vm_unmapped_area_info { unsigned long high_limit; unsigned long align_mask; unsigned long align_offset; + unsigned long start_gap; }; extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info); diff --git a/mm/mmap.c b/mm/mmap.c index 4ad386f3f63f..d3c2ca8efa53 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1579,7 +1579,7 @@ static unsigned long unmapped_area(struct vm_unmapped_area_info *info) VMA_ITERATOR(vmi, current->mm, 0); /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask; + length = info->length + info->align_mask + info->start_gap; if (length < info->length) return -ENOMEM; @@ -1591,7 +1591,13 @@ retry: if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) return -ENOMEM; - gap = vma_iter_addr(&vmi); + /* + * Adjust for the gap first so it doesn't interfere with the + * later alignment. The first step is the minimum needed to + * fulill the start gap, the next steps is the minimum to align + * that. It is the minimum needed to fulill both. + */ + gap = vma_iter_addr(&vmi) + info->start_gap; gap += (info->align_offset - gap) & info->align_mask; tmp = vma_next(&vmi); if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ @@ -1630,7 +1636,7 @@ static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) VMA_ITERATOR(vmi, current->mm, 0); /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask; + length = info->length + info->align_mask + info->start_gap; if (length < info->length) return -ENOMEM; -- cgit From 3a9e567ca45fb5280065283d10d9a11f0db61d2b Mon Sep 17 00:00:00 2001 From: Jinjiang Tu Date: Thu, 28 Mar 2024 19:10:08 +0800 Subject: mm/ksm: fix ksm exec support for prctl Patch series "mm/ksm: fix ksm exec support for prctl", v4. commit 3c6f33b7273a ("mm/ksm: support fork/exec for prctl") inherits MMF_VM_MERGE_ANY flag when a task calls execve(). However, it doesn't create the mm_slot, so ksmd will not try to scan this task. The first patch fixes the issue. The second patch refactors to prepare for the third patch. The third patch extends the selftests of ksm to verfity the deduplication really happens after fork/exec inherits ths KSM setting. This patch (of 3): commit 3c6f33b7273a ("mm/ksm: support fork/exec for prctl") inherits MMF_VM_MERGE_ANY flag when a task calls execve(). Howerver, it doesn't create the mm_slot, so ksmd will not try to scan this task. To fix it, allocate and add the mm_slot to ksm_mm_head in __bprm_mm_init() when the mm has MMF_VM_MERGE_ANY flag. Link: https://lkml.kernel.org/r/20240328111010.1502191-1-tujinjiang@huawei.com Link: https://lkml.kernel.org/r/20240328111010.1502191-2-tujinjiang@huawei.com Fixes: 3c6f33b7273a ("mm/ksm: support fork/exec for prctl") Signed-off-by: Jinjiang Tu Reviewed-by: David Hildenbrand Cc: Johannes Weiner Cc: Kefeng Wang Cc: Nanyong Sun Cc: Rik van Riel Cc: Stefan Roesch Signed-off-by: Andrew Morton --- fs/exec.c | 11 +++++++++++ include/linux/ksm.h | 13 +++++++++++++ 2 files changed, 24 insertions(+) (limited to 'include/linux') diff --git a/fs/exec.c b/fs/exec.c index cf1df7f16e55..0c5f06d08c35 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -67,6 +67,7 @@ #include #include #include +#include #include #include @@ -267,6 +268,14 @@ static int __bprm_mm_init(struct linux_binprm *bprm) goto err_free; } + /* + * Need to be called with mmap write lock + * held, to avoid race with ksmd. + */ + err = ksm_execve(mm); + if (err) + goto err_ksm; + /* * Place the stack at the largest stack address the architecture * supports. Later, we'll move this to an appropriate place. We don't @@ -288,6 +297,8 @@ static int __bprm_mm_init(struct linux_binprm *bprm) bprm->p = vma->vm_end - sizeof(void *); return 0; err: + ksm_exit(mm); +err_ksm: mmap_write_unlock(mm); err_free: bprm->vma = NULL; diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 401348e9f92b..7e2b1de3996a 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -59,6 +59,14 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } +static inline int ksm_execve(struct mm_struct *mm) +{ + if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return __ksm_enter(mm); + + return 0; +} + static inline void ksm_exit(struct mm_struct *mm) { if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) @@ -107,6 +115,11 @@ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) return 0; } +static inline int ksm_execve(struct mm_struct *mm) +{ + return 0; +} + static inline void ksm_exit(struct mm_struct *mm) { } -- cgit From ba42b524a0408b5f92bd41edaee1ea84309ab9ae Mon Sep 17 00:00:00 2001 From: York Jasper Niebuhr Date: Fri, 29 Mar 2024 15:56:05 +0100 Subject: mm: init_mlocked_on_free_v3 Implements the "init_mlocked_on_free" boot option. When this boot option is enabled, any mlock'ed pages are zeroed on free. If the pages are munlock'ed beforehand, no initialization takes place. This boot option is meant to combat the performance hit of "init_on_free" as reported in commit 6471384af2a6 ("mm: security: introduce init_on_alloc=1 and init_on_free=1 boot options"). With "init_mlocked_on_free=1" only relevant data is freed while everything else is left untouched by the kernel. Correspondingly, this patch introduces no performance hit for unmapping non-mlock'ed memory. The unmapping overhead for purely mlocked memory was measured to be approximately 13%. Realistically, most systems mlock only a fraction of the total memory so the real-world system overhead should be close to zero. Optimally, userspace programs clear any key material or other confidential memory before exit and munlock the according memory regions. If a program crashes, userspace key managers fail to do this job. Accordingly, no munlock operations are performed so the data is caught and zeroed by the kernel. Should the program not crash, all memory will ideally be munlocked so no overhead is caused. CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON can be set to enable "init_mlocked_on_free" by default. Link: https://lkml.kernel.org/r/20240329145605.149917-1-yjnworkstation@gmail.com Signed-off-by: York Jasper Niebuhr Cc: Matthew Wilcox (Oracle) Cc: York Jasper Niebuhr Cc: Kees Cook Signed-off-by: Andrew Morton --- Documentation/admin-guide/kernel-parameters.txt | 6 ++++ include/linux/mm.h | 9 +++++- mm/internal.h | 1 + mm/memory.c | 6 ++++ mm/mm_init.c | 43 +++++++++++++++++++++---- mm/page_alloc.c | 2 +- security/Kconfig.hardening | 15 +++++++++ 7 files changed, 73 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 902ecd92a29f..3ff97de349da 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -2148,6 +2148,12 @@ Format: 0 | 1 Default set by CONFIG_INIT_ON_FREE_DEFAULT_ON. + init_mlocked_on_free= [MM] Fill freed userspace memory with zeroes if + it was mlock'ed and not explicitly munlock'ed + afterwards. + Format: 0 | 1 + Default set by CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON + init_pkru= [X86] Specify the default memory protection keys rights register contents for all processes. 0x55555554 by default (disallow access to all but pkey 0). Can diff --git a/include/linux/mm.h b/include/linux/mm.h index 2d5e492ef57f..4f4e460d7853 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3762,7 +3762,14 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); static inline bool want_init_on_free(void) { return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, - &init_on_free); + &init_on_free); +} + +DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free); +static inline bool want_init_mlocked_on_free(void) +{ + return static_branch_maybe(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, + &init_mlocked_on_free); } extern bool _debug_pagealloc_enabled_early; diff --git a/mm/internal.h b/mm/internal.h index 6614ba4ca9de..cf7799e29391 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -506,6 +506,7 @@ extern void __putback_isolated_page(struct page *page, unsigned int order, extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order); +extern void kernel_init_pages(struct page *page, int numpages); /* * This will have no effect, other than possibly generating a warning, if the diff --git a/mm/memory.c b/mm/memory.c index 0b92336bcebd..80944acb5b4e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1506,6 +1506,12 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, if (unlikely(page_mapcount(page) < 0)) print_bad_pte(vma, addr, ptent, page); } + + if (want_init_mlocked_on_free() && folio_test_mlocked(folio) && + !delay_rmap && folio_test_anon(folio)) { + kernel_init_pages(page, folio_nr_pages(folio)); + } + if (unlikely(__tlb_remove_folio_pages(tlb, page, nr, delay_rmap))) { *force_flush = true; *force_break = true; diff --git a/mm/mm_init.c b/mm/mm_init.c index d01912b8a597..2c8f3af4430f 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2522,6 +2522,9 @@ EXPORT_SYMBOL(init_on_alloc); DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); EXPORT_SYMBOL(init_on_free); +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON, init_mlocked_on_free); +EXPORT_SYMBOL(init_mlocked_on_free); + static bool _init_on_alloc_enabled_early __read_mostly = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); static int __init early_init_on_alloc(char *buf) @@ -2539,6 +2542,14 @@ static int __init early_init_on_free(char *buf) } early_param("init_on_free", early_init_on_free); +static bool _init_mlocked_on_free_enabled_early __read_mostly + = IS_ENABLED(CONFIG_INIT_MLOCKED_ON_FREE_DEFAULT_ON); +static int __init early_init_mlocked_on_free(char *buf) +{ + return kstrtobool(buf, &_init_mlocked_on_free_enabled_early); +} +early_param("init_mlocked_on_free", early_init_mlocked_on_free); + DEFINE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); /* @@ -2566,12 +2577,21 @@ static void __init mem_debugging_and_hardening_init(void) } #endif - if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && + if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early || + _init_mlocked_on_free_enabled_early) && page_poisoning_requested) { pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " - "will take precedence over init_on_alloc and init_on_free\n"); + "will take precedence over init_on_alloc, init_on_free " + "and init_mlocked_on_free\n"); _init_on_alloc_enabled_early = false; _init_on_free_enabled_early = false; + _init_mlocked_on_free_enabled_early = false; + } + + if (_init_mlocked_on_free_enabled_early && _init_on_free_enabled_early) { + pr_info("mem auto-init: init_on_free is on, " + "will take precedence over init_mlocked_on_free\n"); + _init_mlocked_on_free_enabled_early = false; } if (_init_on_alloc_enabled_early) { @@ -2588,9 +2608,17 @@ static void __init mem_debugging_and_hardening_init(void) static_branch_disable(&init_on_free); } - if (IS_ENABLED(CONFIG_KMSAN) && - (_init_on_alloc_enabled_early || _init_on_free_enabled_early)) - pr_info("mem auto-init: please make sure init_on_alloc and init_on_free are disabled when running KMSAN\n"); + if (_init_mlocked_on_free_enabled_early) { + want_check_pages = true; + static_branch_enable(&init_mlocked_on_free); + } else { + static_branch_disable(&init_mlocked_on_free); + } + + if (IS_ENABLED(CONFIG_KMSAN) && (_init_on_alloc_enabled_early || + _init_on_free_enabled_early || _init_mlocked_on_free_enabled_early)) + pr_info("mem auto-init: please make sure init_on_alloc, init_on_free and " + "init_mlocked_on_free are disabled when running KMSAN\n"); #ifdef CONFIG_DEBUG_PAGEALLOC if (debug_pagealloc_enabled()) { @@ -2629,9 +2657,10 @@ static void __init report_meminit(void) else stack = "off"; - pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", + pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s, mlocked free:%s\n", stack, want_init_on_alloc(GFP_KERNEL) ? "on" : "off", - want_init_on_free() ? "on" : "off"); + want_init_on_free() ? "on" : "off", + want_init_mlocked_on_free() ? "on" : "off"); if (want_init_on_free()) pr_info("mem auto-init: clearing system memory may take some time...\n"); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 47ab0297838a..e030ccf9d5bc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1032,7 +1032,7 @@ static inline bool should_skip_kasan_poison(struct page *page) return page_kasan_tag(page) == KASAN_TAG_KERNEL; } -static void kernel_init_pages(struct page *page, int numpages) +void kernel_init_pages(struct page *page, int numpages) { int i; diff --git a/security/Kconfig.hardening b/security/Kconfig.hardening index 2cff851ebfd7..effbf5982be1 100644 --- a/security/Kconfig.hardening +++ b/security/Kconfig.hardening @@ -255,6 +255,21 @@ config INIT_ON_FREE_DEFAULT_ON touching "cold" memory areas. Most cases see 3-5% impact. Some synthetic workloads have measured as high as 8%. +config INIT_MLOCKED_ON_FREE_DEFAULT_ON + bool "Enable mlocked memory zeroing on free" + depends on !KMSAN + help + This config has the effect of setting "init_mlocked_on_free=1" + on the kernel command line. If it is enabled, all mlocked process + memory is zeroed when freed. This restriction to mlocked memory + improves performance over "init_on_free" but can still be used to + protect confidential data like key material from content exposures + to other processes, as well as live forensics and cold boot attacks. + Any non-mlocked memory is not cleared before it is reassigned. This + configuration can be overwritten by setting "init_mlocked_on_free=0" + on the command line. The "init_on_free" boot option takes + precedence over "init_mlocked_on_free". + config CC_HAS_ZERO_CALL_USED_REGS def_bool $(cc-option,-fzero-call-used-regs=used-gpr) # https://github.com/ClangBuiltLinux/linux/issues/1766 -- cgit From 1ade67cd22e4aeb74ed0b86bab6760bbcf9e2f06 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 2 Apr 2024 21:12:50 +0100 Subject: mm: remove page_idle and page_young wrappers All users have now been converted to the folio equivalents, so remove the page wrappers. Link: https://lkml.kernel.org/r/20240402201252.917342-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/page_idle.h | 25 ------------------------- 1 file changed, 25 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index 511e22ef459f..6357f1e7918a 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -119,29 +119,4 @@ static inline void folio_clear_idle(struct folio *folio) } #endif /* CONFIG_PAGE_IDLE_FLAG */ - -static inline bool page_is_young(struct page *page) -{ - return folio_test_young(page_folio(page)); -} - -static inline void set_page_young(struct page *page) -{ - folio_set_young(page_folio(page)); -} - -static inline bool test_and_clear_page_young(struct page *page) -{ - return folio_test_clear_young(page_folio(page)); -} - -static inline bool page_is_idle(struct page *page) -{ - return folio_test_idle(page_folio(page)); -} - -static inline void set_page_idle(struct page *page) -{ - folio_set_idle(page_folio(page)); -} #endif /* _LINUX_MM_PAGE_IDLE_H */ -- cgit From 8c9e8381ea4bcca4305cf0d24f87f4d39514ab35 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 2 Apr 2024 21:12:51 +0100 Subject: mm: generate PAGE_IDLE_FLAG definitions If CONFIG_PAGE_IDLE_FLAG is not set, we can use FOLIO_FLAG_FALSE() to generate these definitions. Link: https://lkml.kernel.org/r/20240402201252.917342-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 9 ++++++++- include/linux/page_idle.h | 37 ++----------------------------------- 2 files changed, 10 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 7577fe7debaf..f83331684e47 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -626,12 +626,19 @@ PAGEFLAG_FALSE(HWPoison, hwpoison) #define __PG_HWPOISON 0 #endif -#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) +#ifdef CONFIG_PAGE_IDLE_FLAG +#ifdef CONFIG_64BIT FOLIO_TEST_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_SET_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(young, FOLIO_HEAD_PAGE) FOLIO_FLAG(idle, FOLIO_HEAD_PAGE) #endif +/* See page_idle.h for !64BIT workaround */ +#else /* !CONFIG_PAGE_IDLE_FLAG */ +FOLIO_FLAG_FALSE(young) +FOLIO_TEST_CLEAR_FLAG_FALSE(young) +FOLIO_FLAG_FALSE(idle) +#endif /* * PageReported() is used to track reported free pages within the Buddy diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index 6357f1e7918a..89ca0d5dc1e7 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -6,9 +6,7 @@ #include #include -#ifdef CONFIG_PAGE_IDLE_FLAG - -#ifndef CONFIG_64BIT +#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) /* * If there is not enough space to store Idle and Young bits in page flags, use * page ext flags instead. @@ -87,36 +85,5 @@ static inline void folio_clear_idle(struct folio *folio) clear_bit(PAGE_EXT_IDLE, &page_ext->flags); page_ext_put(page_ext); } -#endif /* !CONFIG_64BIT */ - -#else /* !CONFIG_PAGE_IDLE_FLAG */ - -static inline bool folio_test_young(const struct folio *folio) -{ - return false; -} - -static inline void folio_set_young(struct folio *folio) -{ -} - -static inline bool folio_test_clear_young(struct folio *folio) -{ - return false; -} - -static inline bool folio_test_idle(const struct folio *folio) -{ - return false; -} - -static inline void folio_set_idle(struct folio *folio) -{ -} - -static inline void folio_clear_idle(struct folio *folio) -{ -} - -#endif /* CONFIG_PAGE_IDLE_FLAG */ +#endif /* CONFIG_PAGE_IDLE_FLAG && !64BIT */ #endif /* _LINUX_MM_PAGE_IDLE_H */ -- cgit From d7d0d389ff90644546ffcb8e15ea3ccaf6138958 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:40 +0100 Subject: mm: swap: remove CLUSTER_FLAG_HUGE from swap_cluster_info:flags Patch series "Swap-out mTHP without splitting", v7. This series adds support for swapping out multi-size THP (mTHP) without needing to first split the large folio via split_huge_page_to_list_to_order(). It closely follows the approach already used to swap-out PMD-sized THP. There are a couple of reasons for swapping out mTHP without splitting: - Performance: It is expensive to split a large folio and under extreme memory pressure some workloads regressed performance when using 64K mTHP vs 4K small folios because of this extra cost in the swap-out path. This series not only eliminates the regression but makes it faster to swap out 64K mTHP vs 4K small folios. - Memory fragmentation avoidance: If we can avoid splitting a large folio memory is less likely to become fragmented, making it easier to re-allocate a large folio in future. - Performance: Enables a separate series [7] to swap-in whole mTHPs, which means we won't lose the TLB-efficiency benefits of mTHP once the memory has been through a swap cycle. I've done what I thought was the smallest change possible, and as a result, this approach is only employed when the swap is backed by a non-rotating block device (just as PMD-sized THP is supported today). Discussion against the RFC concluded that this is sufficient. Performance Testing =================== I've run some swap performance tests on Ampere Altra VM (arm64) with 8 CPUs. The VM is set up with a 35G block ram device as the swap device and the test is run from inside a memcg limited to 40G memory. I've then run `usemem` from vm-scalability with 70 processes, each allocating and writing 1G of memory. I've repeated everything 6 times and taken the mean performance improvement relative to 4K page baseline: | alloc size | baseline | + this series | | | mm-unstable (~v6.9-rc1) | | |:-----------|------------------------:|------------------------:| | 4K Page | 0.0% | 1.3% | | 64K THP | -13.6% | 46.3% | | 2M THP | 91.4% | 89.6% | So with this change, the 64K swap performance goes from a 14% regression to a 46% improvement. While 2M shows a small regression I'm confident that this is just noise. [1] https://lore.kernel.org/linux-mm/20231010142111.3997780-1-ryan.roberts@arm.com/ [2] https://lore.kernel.org/linux-mm/20231017161302.2518826-1-ryan.roberts@arm.com/ [3] https://lore.kernel.org/linux-mm/20231025144546.577640-1-ryan.roberts@arm.com/ [4] https://lore.kernel.org/linux-mm/20240311150058.1122862-1-ryan.roberts@arm.com/ [5] https://lore.kernel.org/linux-mm/20240327144537.4165578-1-ryan.roberts@arm.com/ [6] https://lore.kernel.org/linux-mm/20240403114032.1162100-1-ryan.roberts@arm.com/ [7] https://lore.kernel.org/linux-mm/20240304081348.197341-1-21cnbao@gmail.com/ [8] https://lore.kernel.org/linux-mm/CAGsJ_4yMOow27WDvN2q=E4HAtDd2PJ=OQ5Pj9DG+6FLWwNuXUw@mail.gmail.com/ [9] https://lore.kernel.org/linux-mm/579d5127-c763-4001-9625-4563a9316ac3@redhat.com/ This patch (of 7): As preparation for supporting small-sized THP in the swap-out path, without first needing to split to order-0, Remove the CLUSTER_FLAG_HUGE, which, when present, always implies PMD-sized THP, which is the same as the cluster size. The only use of the flag was to determine whether a swap entry refers to a single page or a PMD-sized THP in swap_page_trans_huge_swapped(). Instead of relying on the flag, we now pass in order, which originates from the folio's order. This allows the logic to work for folios of any order. The one snag is that one of the swap_page_trans_huge_swapped() call sites does not have the folio. But it was only being called there to shortcut a call __try_to_reclaim_swap() in some cases. __try_to_reclaim_swap() gets the folio and (via some other functions) calls swap_page_trans_huge_swapped(). So I've removed the problematic call site and believe the new logic should be functionally equivalent. That said, removing the fast path means that we will take a reference and trylock a large folio much more often, which we would like to avoid. The next patch will solve this. Removing CLUSTER_FLAG_HUGE also means we can remove split_swap_cluster() which used to be called during folio splitting, since split_swap_cluster()'s only job was to remove the flag. Link: https://lkml.kernel.org/r/20240408183946.2991168-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240408183946.2991168-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Acked-by: Chris Li Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Cc: Barry Song Signed-off-by: Andrew Morton --- include/linux/swap.h | 10 ---------- mm/huge_memory.c | 3 --- mm/swapfile.c | 47 ++++++++--------------------------------------- 3 files changed, 8 insertions(+), 52 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index f53d608daa01..a803de0ac24f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -259,7 +259,6 @@ struct swap_cluster_info { }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ -#define CLUSTER_FLAG_HUGE 4 /* This cluster is backing a transparent huge page */ /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from @@ -587,15 +586,6 @@ static inline int add_swap_extent(struct swap_info_struct *sis, } #endif /* CONFIG_SWAP */ -#ifdef CONFIG_THP_SWAP -extern int split_swap_cluster(swp_entry_t entry); -#else -static inline int split_swap_cluster(swp_entry_t entry) -{ - return 0; -} -#endif - #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4065bf8bfcc4..14f04fbc22d9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2844,9 +2844,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, shmem_uncharge(folio->mapping->host, nr_dropped); remap_page(folio, nr); - if (folio_test_swapcache(folio)) - split_swap_cluster(folio->swap); - /* * set page to its compound_head when split to non order-0 pages, so * we can skip unlocking it below, since PG_locked is transferred to diff --git a/mm/swapfile.c b/mm/swapfile.c index 5e6d2304a2a4..1ded6d1dcab4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -343,18 +343,6 @@ static inline void cluster_set_null(struct swap_cluster_info *info) info->data = 0; } -static inline bool cluster_is_huge(struct swap_cluster_info *info) -{ - if (IS_ENABLED(CONFIG_THP_SWAP)) - return info->flags & CLUSTER_FLAG_HUGE; - return false; -} - -static inline void cluster_clear_huge(struct swap_cluster_info *info) -{ - info->flags &= ~CLUSTER_FLAG_HUGE; -} - static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset) { @@ -1027,7 +1015,7 @@ static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) offset = idx * SWAPFILE_CLUSTER; ci = lock_cluster(si, offset); alloc_cluster(si, idx); - cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE); + cluster_set_count(ci, SWAPFILE_CLUSTER); memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); unlock_cluster(ci); @@ -1365,7 +1353,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) ci = lock_cluster_or_swap_info(si, offset); if (size == SWAPFILE_CLUSTER) { - VM_BUG_ON(!cluster_is_huge(ci)); map = si->swap_map + offset; for (i = 0; i < SWAPFILE_CLUSTER; i++) { val = map[i]; @@ -1373,7 +1360,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) if (val == SWAP_HAS_CACHE) free_entries++; } - cluster_clear_huge(ci); if (free_entries == SWAPFILE_CLUSTER) { unlock_cluster_or_swap_info(si, ci); spin_lock(&si->lock); @@ -1395,23 +1381,6 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unlock_cluster_or_swap_info(si, ci); } -#ifdef CONFIG_THP_SWAP -int split_swap_cluster(swp_entry_t entry) -{ - struct swap_info_struct *si; - struct swap_cluster_info *ci; - unsigned long offset = swp_offset(entry); - - si = _swap_info_get(entry); - if (!si) - return -EBUSY; - ci = lock_cluster(si, offset); - cluster_clear_huge(ci); - unlock_cluster(ci); - return 0; -} -#endif - static int swp_entry_cmp(const void *ent1, const void *ent2) { const swp_entry_t *e1 = ent1, *e2 = ent2; @@ -1519,22 +1488,23 @@ out: } static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, - swp_entry_t entry) + swp_entry_t entry, int order) { struct swap_cluster_info *ci; unsigned char *map = si->swap_map; + unsigned int nr_pages = 1 << order; unsigned long roffset = swp_offset(entry); - unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER); + unsigned long offset = round_down(roffset, nr_pages); int i; bool ret = false; ci = lock_cluster_or_swap_info(si, offset); - if (!ci || !cluster_is_huge(ci)) { + if (!ci || nr_pages == 1) { if (swap_count(map[roffset])) ret = true; goto unlock_out; } - for (i = 0; i < SWAPFILE_CLUSTER; i++) { + for (i = 0; i < nr_pages; i++) { if (swap_count(map[offset + i])) { ret = true; break; @@ -1556,7 +1526,7 @@ static bool folio_swapped(struct folio *folio) if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio))) return swap_swapcount(si, entry) != 0; - return swap_page_trans_huge_swapped(si, entry); + return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); } /** @@ -1622,8 +1592,7 @@ int free_swap_and_cache(swp_entry_t entry) } count = __swap_entry_free(p, entry); - if (count == SWAP_HAS_CACHE && - !swap_page_trans_huge_swapped(p, entry)) + if (count == SWAP_HAS_CACHE) __try_to_reclaim_swap(p, swp_offset(entry), TTRS_UNMAPPED | TTRS_FULL); put_swap_device(p); -- cgit From a62fb92ac12ed39df4930dca599a3b427552882a Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:41 +0100 Subject: mm: swap: free_swap_and_cache_nr() as batched free_swap_and_cache() Now that we no longer have a convenient flag in the cluster to determine if a folio is large, free_swap_and_cache() will take a reference and lock a large folio much more often, which could lead to contention and (e.g.) failure to split large folios, etc. Let's solve that problem by batch freeing swap and cache with a new function, free_swap_and_cache_nr(), to free a contiguous range of swap entries together. This allows us to first drop a reference to each swap slot before we try to release the cache folio. This means we only try to release the folio once, only taking the reference and lock once - much better than the previous 512 times for the 2M THP case. Contiguous swap entries are gathered in zap_pte_range() and madvise_free_pte_range() in a similar way to how present ptes are already gathered in zap_pte_range(). While we are at it, let's simplify by converting the return type of both functions to void. The return value was used only by zap_pte_range() to print a bad pte, and was ignored by everyone else, so the extra reporting wasn't exactly guaranteed. We will still get the warning with most of the information from get_swap_device(). With the batch version, we wouldn't know which pte was bad anyway so could print the wrong one. [ryan.roberts@arm.com: fix a build warning on parisc] Link: https://lkml.kernel.org/r/20240409111840.3173122-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20240408183946.2991168-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: Gao Xiang Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 29 +++++++++++++++ include/linux/swap.h | 12 ++++-- mm/internal.h | 64 ++++++++++++++++++++++++++++++++ mm/madvise.c | 12 ++++-- mm/memory.c | 13 ++++--- mm/swapfile.c | 97 ++++++++++++++++++++++++++++++++++++++++--------- 6 files changed, 196 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index a3fc8150b047..75096025fe52 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -708,6 +708,35 @@ static inline void pte_clear_not_present_full(struct mm_struct *mm, } #endif +#ifndef clear_not_present_full_ptes +/** + * clear_not_present_full_ptes - Clear multiple not present PTEs which are + * consecutive in the pgtable. + * @mm: Address space the ptes represent. + * @addr: Address of the first pte. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * @full: Whether we are clearing a full mm. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over pte_clear_not_present_full(). + * + * Context: The caller holds the page table lock. The PTEs are all not present. + * The PTEs are all in the same PMD. + */ +static inline void clear_not_present_full_ptes(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, unsigned int nr, int full) +{ + for (;;) { + pte_clear_not_present_full(mm, addr, ptep, full); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH extern pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address, diff --git a/include/linux/swap.h b/include/linux/swap.h index a803de0ac24f..2d8f2b950ddf 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -468,7 +468,7 @@ extern int swap_duplicate(swp_entry_t); extern int swapcache_prepare(swp_entry_t); extern void swap_free(swp_entry_t); extern void swapcache_free_entries(swp_entry_t *entries, int n); -extern int free_swap_and_cache(swp_entry_t); +extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); int swap_type_of(dev_t device, sector_t offset); int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); @@ -517,8 +517,9 @@ static inline void put_swap_device(struct swap_info_struct *si) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr)); -/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */ -#define free_swap_and_cache(e) is_pfn_swap_entry(e) +static inline void free_swap_and_cache_nr(swp_entry_t entry, int nr) +{ +} static inline void free_swap_cache(struct folio *folio) { @@ -586,6 +587,11 @@ static inline int add_swap_extent(struct swap_info_struct *sis, } #endif /* CONFIG_SWAP */ +static inline void free_swap_and_cache(swp_entry_t entry) +{ + free_swap_and_cache_nr(entry, 1); +} + #ifdef CONFIG_MEMCG static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) { diff --git a/mm/internal.h b/mm/internal.h index d567381b12cc..d34df04b11f6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -11,6 +11,8 @@ #include #include #include +#include +#include #include struct folio_batch; @@ -189,6 +191,68 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, return min(ptep - start_ptep, max_nr); } + +/** + * pte_next_swp_offset - Increment the swap entry offset field of a swap pte. + * @pte: The initial pte state; is_swap_pte(pte) must be true and + * non_swap_entry() must be false. + * + * Increments the swap offset, while maintaining all other fields, including + * swap type, and any swp pte bits. The resulting pte is returned. + */ +static inline pte_t pte_next_swp_offset(pte_t pte) +{ + swp_entry_t entry = pte_to_swp_entry(pte); + pte_t new = __swp_entry_to_pte(__swp_entry(swp_type(entry), + (swp_offset(entry) + 1))); + + if (pte_swp_soft_dirty(pte)) + new = pte_swp_mksoft_dirty(new); + if (pte_swp_exclusive(pte)) + new = pte_swp_mkexclusive(new); + if (pte_swp_uffd_wp(pte)) + new = pte_swp_mkuffd_wp(new); + + return new; +} + +/** + * swap_pte_batch - detect a PTE batch for a set of contiguous swap entries + * @start_ptep: Page table pointer for the first entry. + * @max_nr: The maximum number of table entries to consider. + * @pte: Page table entry for the first entry. + * + * Detect a batch of contiguous swap entries: consecutive (non-present) PTEs + * containing swap entries all with consecutive offsets and targeting the same + * swap type, all with matching swp pte bits. + * + * max_nr must be at least one and must be limited by the caller so scanning + * cannot exceed a single page table. + * + * Return: the number of table entries in the batch. + */ +static inline int swap_pte_batch(pte_t *start_ptep, int max_nr, pte_t pte) +{ + pte_t expected_pte = pte_next_swp_offset(pte); + const pte_t *end_ptep = start_ptep + max_nr; + pte_t *ptep = start_ptep + 1; + + VM_WARN_ON(max_nr < 1); + VM_WARN_ON(!is_swap_pte(pte)); + VM_WARN_ON(non_swap_entry(pte_to_swp_entry(pte))); + + while (ptep < end_ptep) { + pte = ptep_get(ptep); + + if (!pte_same(pte, expected_pte)) + break; + + expected_pte = pte_next_swp_offset(expected_pte); + ptep++; + } + + return ptep - start_ptep; +} #endif /* CONFIG_MMU */ void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, diff --git a/mm/madvise.c b/mm/madvise.c index 1f77a51baaac..5011ecb24344 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -628,6 +628,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, struct folio *folio; int nr_swap = 0; unsigned long next; + int nr, max_nr; next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) @@ -640,7 +641,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); - for (; addr != end; pte++, addr += PAGE_SIZE) { + for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) { + nr = 1; ptent = ptep_get(pte); if (pte_none(ptent)) @@ -655,9 +657,11 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, entry = pte_to_swp_entry(ptent); if (!non_swap_entry(entry)) { - nr_swap--; - free_swap_and_cache(entry); - pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); + max_nr = (end - addr) / PAGE_SIZE; + nr = swap_pte_batch(pte, max_nr, ptent); + nr_swap -= nr; + free_swap_and_cache_nr(entry, nr); + clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); diff --git a/mm/memory.c b/mm/memory.c index 694e18837cd8..7880400370c8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1637,12 +1637,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, folio_remove_rmap_pte(folio, page, vma); folio_put(folio); } else if (!non_swap_entry(entry)) { - /* Genuine swap entry, hence a private anon page */ + max_nr = (end - addr) / PAGE_SIZE; + nr = swap_pte_batch(pte, max_nr, ptent); + /* Genuine swap entries, hence a private anon pages */ if (!should_zap_cows(details)) continue; - rss[MM_SWAPENTS]--; - if (unlikely(!free_swap_and_cache(entry))) - print_bad_pte(vma, addr, ptent, NULL); + rss[MM_SWAPENTS] -= nr; + free_swap_and_cache_nr(entry, nr); } else if (is_migration_entry(entry)) { folio = pfn_swap_entry_folio(entry); if (!should_zap_folio(details, folio)) @@ -1665,8 +1666,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pr_alert("unrecognized swap entry 0x%lx\n", entry.val); WARN_ON_ONCE(1); } - pte_clear_not_present_full(mm, addr, pte, tlb->fullmm); - zap_install_uffd_wp_if_needed(vma, addr, pte, 1, details, ptent); + clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); + zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); add_mm_rss_vec(mm, rss); diff --git a/mm/swapfile.c b/mm/swapfile.c index 1ded6d1dcab4..20c45757f2b2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -130,7 +130,11 @@ static inline unsigned char swap_count(unsigned char ent) /* Reclaim the swap entry if swap is getting full*/ #define TTRS_FULL 0x4 -/* returns 1 if swap entry is freed */ +/* + * returns number of pages in the folio that backs the swap entry. If positive, + * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no + * folio was associated with the swap entry. + */ static int __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset, unsigned long flags) { @@ -155,6 +159,7 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, ret = folio_free_swap(folio); folio_unlock(folio); } + ret = ret ? folio_nr_pages(folio) : -folio_nr_pages(folio); folio_put(folio); return ret; } @@ -895,7 +900,7 @@ checks: swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); spin_lock(&si->lock); /* entry was freed successfully, try to use this again */ - if (swap_was_freed) + if (swap_was_freed > 0) goto checks; goto scan; /* check next one */ } @@ -1572,32 +1577,88 @@ bool folio_free_swap(struct folio *folio) return true; } -/* - * Free the swap entry like above, but also try to - * free the page cache entry if it is the last user. +/** + * free_swap_and_cache_nr() - Release reference on range of swap entries and + * reclaim their cache if no more references remain. + * @entry: First entry of range. + * @nr: Number of entries in range. + * + * For each swap entry in the contiguous range, release a reference. If any swap + * entries become free, try to reclaim their underlying folios, if present. The + * offset range is defined by [entry.offset, entry.offset + nr). */ -int free_swap_and_cache(swp_entry_t entry) +void free_swap_and_cache_nr(swp_entry_t entry, int nr) { - struct swap_info_struct *p; + const unsigned long start_offset = swp_offset(entry); + const unsigned long end_offset = start_offset + nr; + unsigned int type = swp_type(entry); + struct swap_info_struct *si; + bool any_only_cache = false; + unsigned long offset; unsigned char count; if (non_swap_entry(entry)) - return 1; + return; - p = get_swap_device(entry); - if (p) { - if (WARN_ON(data_race(!p->swap_map[swp_offset(entry)]))) { - put_swap_device(p); - return 0; + si = get_swap_device(entry); + if (!si) + return; + + if (WARN_ON(end_offset > si->max)) + goto out; + + /* + * First free all entries in the range. + */ + for (offset = start_offset; offset < end_offset; offset++) { + if (data_race(si->swap_map[offset])) { + count = __swap_entry_free(si, swp_entry(type, offset)); + if (count == SWAP_HAS_CACHE) + any_only_cache = true; + } else { + WARN_ON_ONCE(1); } + } + + /* + * Short-circuit the below loop if none of the entries had their + * reference drop to zero. + */ + if (!any_only_cache) + goto out; - count = __swap_entry_free(p, entry); - if (count == SWAP_HAS_CACHE) - __try_to_reclaim_swap(p, swp_offset(entry), + /* + * Now go back over the range trying to reclaim the swap cache. This is + * more efficient for large folios because we will only try to reclaim + * the swap once per folio in the common case. If we do + * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the + * latter will get a reference and lock the folio for every individual + * page but will only succeed once the swap slot for every subpage is + * zero. + */ + for (offset = start_offset; offset < end_offset; offset += nr) { + nr = 1; + if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { + /* + * Folios are always naturally aligned in swap so + * advance forward to the next boundary. Zero means no + * folio was found for the swap entry, so advance by 1 + * in this case. Negative value means folio was found + * but could not be reclaimed. Here we can still advance + * to the next boundary. + */ + nr = __try_to_reclaim_swap(si, offset, TTRS_UNMAPPED | TTRS_FULL); - put_swap_device(p); + if (nr == 0) + nr = 1; + else if (nr < 0) + nr = -nr; + nr = ALIGN(offset + 1, nr) - offset; + } } - return p != NULL; + +out: + put_swap_device(si); } #ifdef CONFIG_HIBERNATION -- cgit From 14c62da21b2b865f4fc0c49edd74ed7299927d35 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:42 +0100 Subject: mm: swap: simplify struct percpu_cluster struct percpu_cluster stores the index of cpu's current cluster and the offset of the next entry that will be allocated for the cpu. These two pieces of information are redundant because the cluster index is just (offset / SWAPFILE_CLUSTER). The only reason for explicitly keeping the cluster index is because the structure used for it also has a flag to indicate "no cluster". However this data structure also contains a spin lock, which is never used in this context, as a side effect the code copies the spinlock_t structure, which is questionable coding practice in my view. So let's clean this up and store only the next offset, and use a sentinal value (SWAP_NEXT_INVALID) to indicate "no cluster". SWAP_NEXT_INVALID is chosen to be 0, because 0 will never be seen legitimately; The first page in the swap file is the swap header, which is always marked bad to prevent it from being allocated as an entry. This also prevents the cluster to which it belongs being marked free, so it will never appear on the free list. This change saves 16 bytes per cpu. And given we are shortly going to extend this mechanism to be per-cpu-AND-per-order, we will end up saving 16 * 9 = 144 bytes per cpu, which adds up if you have 256 cpus in the system. Link: https://lkml.kernel.org/r/20240408183946.2991168-4-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: David Hildenbrand Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/swap.h | 9 ++++++++- mm/swapfile.c | 22 +++++++++++----------- 2 files changed, 19 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index 2d8f2b950ddf..ce35fd4abdf0 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -260,13 +260,20 @@ struct swap_cluster_info { #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ #define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ +/* + * The first page in the swap file is the swap header, which is always marked + * bad to prevent it from being allocated as an entry. This also prevents the + * cluster to which it belongs being marked free. Therefore 0 is safe to use as + * a sentinel to indicate next is not valid in percpu_cluster. + */ +#define SWAP_NEXT_INVALID 0 + /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from * its own cluster and swapout sequentially. The purpose is to optimize swapout * throughput. */ struct percpu_cluster { - struct swap_cluster_info index; /* Current cluster index */ unsigned int next; /* Likely next allocation offset */ }; diff --git a/mm/swapfile.c b/mm/swapfile.c index 20c45757f2b2..e3f855475278 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -609,7 +609,7 @@ scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, return false; percpu_cluster = this_cpu_ptr(si->percpu_cluster); - cluster_set_null(&percpu_cluster->index); + percpu_cluster->next = SWAP_NEXT_INVALID; return true; } @@ -622,14 +622,14 @@ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, { struct percpu_cluster *cluster; struct swap_cluster_info *ci; - unsigned long tmp, max; + unsigned int tmp, max; new_cluster: cluster = this_cpu_ptr(si->percpu_cluster); - if (cluster_is_null(&cluster->index)) { + tmp = cluster->next; + if (tmp == SWAP_NEXT_INVALID) { if (!cluster_list_empty(&si->free_clusters)) { - cluster->index = si->free_clusters.head; - cluster->next = cluster_next(&cluster->index) * + tmp = cluster_next(&si->free_clusters.head) * SWAPFILE_CLUSTER; } else if (!cluster_list_empty(&si->discard_clusters)) { /* @@ -649,9 +649,7 @@ new_cluster: * Other CPUs can use our cluster if they can't find a free cluster, * check if there is still free entry in the cluster */ - tmp = cluster->next; - max = min_t(unsigned long, si->max, - (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); + max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER)); if (tmp < max) { ci = lock_cluster(si, tmp); while (tmp < max) { @@ -662,12 +660,13 @@ new_cluster: unlock_cluster(ci); } if (tmp >= max) { - cluster_set_null(&cluster->index); + cluster->next = SWAP_NEXT_INVALID; goto new_cluster; } - cluster->next = tmp + 1; *offset = tmp; *scan_base = tmp; + tmp += 1; + cluster->next = tmp < max ? tmp : SWAP_NEXT_INVALID; return true; } @@ -3163,8 +3162,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) } for_each_possible_cpu(cpu) { struct percpu_cluster *cluster; + cluster = per_cpu_ptr(p->percpu_cluster, cpu); - cluster_set_null(&cluster->index); + cluster->next = SWAP_NEXT_INVALID; } } else { atomic_inc(&nr_rotate_swap); -- cgit From 9faaa0f8168bfcd81469b0724b25ba3093097a08 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:43 +0100 Subject: mm: swap: update get_swap_pages() to take folio order We are about to allow swap storage of any mTHP size. To prepare for that, let's change get_swap_pages() to take a folio order parameter instead of nr_pages. This makes the interface self-documenting; a power-of-2 number of pages must be provided. We will also need the order internally so this simplifies accessing it. Link: https://lkml.kernel.org/r/20240408183946.2991168-5-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Reviewed-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/swap.h | 2 +- mm/swap_slots.c | 6 +++--- mm/swapfile.c | 13 +++++++------ 3 files changed, 11 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index ce35fd4abdf0..b69b06733247 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -468,7 +468,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio); bool folio_free_swap(struct folio *folio); void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); -extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size); +extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); extern void swap_shmem_alloc(swp_entry_t); extern int swap_duplicate(swp_entry_t); diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 53abeaf1371d..13ab3b771409 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -264,7 +264,7 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache) cache->cur = 0; if (swap_slot_cache_active) cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, - cache->slots, 1); + cache->slots, 0); return cache->nr; } @@ -311,7 +311,7 @@ swp_entry_t folio_alloc_swap(struct folio *folio) if (folio_test_large(folio)) { if (IS_ENABLED(CONFIG_THP_SWAP)) - get_swap_pages(1, &entry, folio_nr_pages(folio)); + get_swap_pages(1, &entry, folio_order(folio)); goto out; } @@ -343,7 +343,7 @@ repeat: goto out; } - get_swap_pages(1, &entry, 1); + get_swap_pages(1, &entry, 0); out: if (mem_cgroup_try_charge_swap(folio, entry)) { put_swap_folio(folio, entry); diff --git a/mm/swapfile.c b/mm/swapfile.c index e3f855475278..d2e3d3cd439f 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -278,15 +278,15 @@ static void discard_swap_cluster(struct swap_info_struct *si, #ifdef CONFIG_THP_SWAP #define SWAPFILE_CLUSTER HPAGE_PMD_NR -#define swap_entry_size(size) (size) +#define swap_entry_order(order) (order) #else #define SWAPFILE_CLUSTER 256 /* - * Define swap_entry_size() as constant to let compiler to optimize + * Define swap_entry_order() as constant to let compiler to optimize * out some code if !CONFIG_THP_SWAP */ -#define swap_entry_size(size) 1 +#define swap_entry_order(order) 0 #endif #define LATENCY_LIMIT 256 @@ -1042,9 +1042,10 @@ static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) swap_range_free(si, offset, SWAPFILE_CLUSTER); } -int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) +int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) { - unsigned long size = swap_entry_size(entry_size); + int order = swap_entry_order(entry_order); + unsigned long size = 1 << order; struct swap_info_struct *si, *next; long avail_pgs; int n_ret = 0; @@ -1349,7 +1350,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unsigned char *map; unsigned int i, free_entries = 0; unsigned char val; - int size = swap_entry_size(folio_nr_pages(folio)); + int size = 1 << swap_entry_order(folio_order(folio)); si = _swap_info_get(entry); if (!si) -- cgit From 845982eb264bc64b0c3242ace217fb574f56a299 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:44 +0100 Subject: mm: swap: allow storage of all mTHP orders Multi-size THP enables performance improvements by allocating large, pte-mapped folios for anonymous memory. However I've observed that on an arm64 system running a parallel workload (e.g. kernel compilation) across many cores, under high memory pressure, the speed regresses. This is due to bottlenecking on the increased number of TLBIs added due to all the extra folio splitting when the large folios are swapped out. Therefore, solve this regression by adding support for swapping out mTHP without needing to split the folio, just like is already done for PMD-sized THP. This change only applies when CONFIG_THP_SWAP is enabled, and when the swap backing store is a non-rotating block device. These are the same constraints as for the existing PMD-sized THP swap-out support. Note that no attempt is made to swap-in (m)THP here - this is still done page-by-page, like for PMD-sized THP. But swapping-out mTHP is a prerequisite for swapping-in mTHP. The main change here is to improve the swap entry allocator so that it can allocate any power-of-2 number of contiguous entries between [1, (1 << PMD_ORDER)]. This is done by allocating a cluster for each distinct order and allocating sequentially from it until the cluster is full. This ensures that we don't need to search the map and we get no fragmentation due to alignment padding for different orders in the cluster. If there is no current cluster for a given order, we attempt to allocate a free cluster from the list. If there are no free clusters, we fail the allocation and the caller can fall back to splitting the folio and allocates individual entries (as per existing PMD-sized THP fallback). The per-order current clusters are maintained per-cpu using the existing infrastructure. This is done to avoid interleving pages from different tasks, which would prevent IO being batched. This is already done for the order-0 allocations so we follow the same pattern. As is done for order-0 per-cpu clusters, the scanner now can steal order-0 entries from any per-cpu-per-order reserved cluster. This ensures that when the swap file is getting full, space doesn't get tied up in the per-cpu reserves. This change only modifies swap to be able to accept any order mTHP. It doesn't change the callers to elide doing the actual split. That will be done in separate changes. Link: https://lkml.kernel.org/r/20240408183946.2991168-6-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: "Huang, Ying" Cc: Barry Song <21cnbao@gmail.com> Cc: Barry Song Cc: Chris Li Cc: David Hildenbrand Cc: Gao Xiang Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/swap.h | 8 ++- mm/swapfile.c | 162 +++++++++++++++++++++++++++++---------------------- 2 files changed, 98 insertions(+), 72 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index b69b06733247..6b672a67ae5d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -268,13 +268,19 @@ struct swap_cluster_info { */ #define SWAP_NEXT_INVALID 0 +#ifdef CONFIG_THP_SWAP +#define SWAP_NR_ORDERS (PMD_ORDER + 1) +#else +#define SWAP_NR_ORDERS 1 +#endif + /* * We assign a cluster to each CPU, so each CPU can allocate swap entry from * its own cluster and swapout sequentially. The purpose is to optimize swapout * throughput. */ struct percpu_cluster { - unsigned int next; /* Likely next allocation offset */ + unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; struct swap_cluster_list { diff --git a/mm/swapfile.c b/mm/swapfile.c index d2e3d3cd439f..148ef08f19dd 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -551,10 +551,12 @@ static void free_cluster(struct swap_info_struct *si, unsigned long idx) /* * The cluster corresponding to page_nr will be used. The cluster will be - * removed from free cluster list and its usage counter will be increased. + * removed from free cluster list and its usage counter will be increased by + * count. */ -static void inc_cluster_info_page(struct swap_info_struct *p, - struct swap_cluster_info *cluster_info, unsigned long page_nr) +static void add_cluster_info_page(struct swap_info_struct *p, + struct swap_cluster_info *cluster_info, unsigned long page_nr, + unsigned long count) { unsigned long idx = page_nr / SWAPFILE_CLUSTER; @@ -563,9 +565,19 @@ static void inc_cluster_info_page(struct swap_info_struct *p, if (cluster_is_free(&cluster_info[idx])) alloc_cluster(p, idx); - VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); + VM_BUG_ON(cluster_count(&cluster_info[idx]) + count > SWAPFILE_CLUSTER); cluster_set_count(&cluster_info[idx], - cluster_count(&cluster_info[idx]) + 1); + cluster_count(&cluster_info[idx]) + count); +} + +/* + * The cluster corresponding to page_nr will be used. The cluster will be + * removed from free cluster list and its usage counter will be increased by 1. + */ +static void inc_cluster_info_page(struct swap_info_struct *p, + struct swap_cluster_info *cluster_info, unsigned long page_nr) +{ + add_cluster_info_page(p, cluster_info, page_nr, 1); } /* @@ -595,7 +607,7 @@ static void dec_cluster_info_page(struct swap_info_struct *p, */ static bool scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, - unsigned long offset) + unsigned long offset, int order) { struct percpu_cluster *percpu_cluster; bool conflict; @@ -609,24 +621,39 @@ scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, return false; percpu_cluster = this_cpu_ptr(si->percpu_cluster); - percpu_cluster->next = SWAP_NEXT_INVALID; + percpu_cluster->next[order] = SWAP_NEXT_INVALID; + return true; +} + +static inline bool swap_range_empty(char *swap_map, unsigned int start, + unsigned int nr_pages) +{ + unsigned int i; + + for (i = 0; i < nr_pages; i++) { + if (swap_map[start + i]) + return false; + } + return true; } /* - * Try to get a swap entry from current cpu's swap entry pool (a cluster). This - * might involve allocating a new cluster for current CPU too. + * Try to get swap entries with specified order from current cpu's swap entry + * pool (a cluster). This might involve allocating a new cluster for current CPU + * too. */ static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, - unsigned long *offset, unsigned long *scan_base) + unsigned long *offset, unsigned long *scan_base, int order) { + unsigned int nr_pages = 1 << order; struct percpu_cluster *cluster; struct swap_cluster_info *ci; unsigned int tmp, max; new_cluster: cluster = this_cpu_ptr(si->percpu_cluster); - tmp = cluster->next; + tmp = cluster->next[order]; if (tmp == SWAP_NEXT_INVALID) { if (!cluster_list_empty(&si->free_clusters)) { tmp = cluster_next(&si->free_clusters.head) * @@ -647,26 +674,27 @@ new_cluster: /* * Other CPUs can use our cluster if they can't find a free cluster, - * check if there is still free entry in the cluster + * check if there is still free entry in the cluster, maintaining + * natural alignment. */ max = min_t(unsigned long, si->max, ALIGN(tmp + 1, SWAPFILE_CLUSTER)); if (tmp < max) { ci = lock_cluster(si, tmp); while (tmp < max) { - if (!si->swap_map[tmp]) + if (swap_range_empty(si->swap_map, tmp, nr_pages)) break; - tmp++; + tmp += nr_pages; } unlock_cluster(ci); } if (tmp >= max) { - cluster->next = SWAP_NEXT_INVALID; + cluster->next[order] = SWAP_NEXT_INVALID; goto new_cluster; } *offset = tmp; *scan_base = tmp; - tmp += 1; - cluster->next = tmp < max ? tmp : SWAP_NEXT_INVALID; + tmp += nr_pages; + cluster->next[order] = tmp < max ? tmp : SWAP_NEXT_INVALID; return true; } @@ -796,13 +824,14 @@ static bool swap_offset_available_and_locked(struct swap_info_struct *si, static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, - swp_entry_t slots[]) + swp_entry_t slots[], int order) { struct swap_cluster_info *ci; unsigned long offset; unsigned long scan_base; unsigned long last_in_cluster = 0; int latency_ration = LATENCY_LIMIT; + unsigned int nr_pages = 1 << order; int n_ret = 0; bool scanned_many = false; @@ -817,6 +846,25 @@ static int scan_swap_map_slots(struct swap_info_struct *si, * And we let swap pages go all over an SSD partition. Hugh */ + if (order > 0) { + /* + * Should not even be attempting large allocations when huge + * page swap is disabled. Warn and fail the allocation. + */ + if (!IS_ENABLED(CONFIG_THP_SWAP) || + nr_pages > SWAPFILE_CLUSTER) { + VM_WARN_ON_ONCE(1); + return 0; + } + + /* + * Swapfile is not block device or not using clusters so unable + * to allocate large entries. + */ + if (!(si->flags & SWP_BLKDEV) || !si->cluster_info) + return 0; + } + si->flags += SWP_SCANNING; /* * Use percpu scan base for SSD to reduce lock contention on @@ -831,8 +879,11 @@ static int scan_swap_map_slots(struct swap_info_struct *si, /* SSD algorithm */ if (si->cluster_info) { - if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) + if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) { + if (order > 0) + goto no_page; goto scan; + } } else if (unlikely(!si->cluster_nr--)) { if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { si->cluster_nr = SWAPFILE_CLUSTER - 1; @@ -874,13 +925,16 @@ static int scan_swap_map_slots(struct swap_info_struct *si, checks: if (si->cluster_info) { - while (scan_swap_map_ssd_cluster_conflict(si, offset)) { + while (scan_swap_map_ssd_cluster_conflict(si, offset, order)) { /* take a break if we already got some slots */ if (n_ret) goto done; if (!scan_swap_map_try_ssd_cluster(si, &offset, - &scan_base)) + &scan_base, order)) { + if (order > 0) + goto no_page; goto scan; + } } } if (!(si->flags & SWP_WRITEOK)) @@ -911,11 +965,11 @@ checks: else goto done; } - WRITE_ONCE(si->swap_map[offset], usage); - inc_cluster_info_page(si, si->cluster_info, offset); + memset(si->swap_map + offset, usage, nr_pages); + add_cluster_info_page(si, si->cluster_info, offset, nr_pages); unlock_cluster(ci); - swap_range_alloc(si, offset, 1); + swap_range_alloc(si, offset, nr_pages); slots[n_ret++] = swp_entry(si->type, offset); /* got enough slots or reach max slots? */ @@ -936,8 +990,10 @@ checks: /* try to get more slots in cluster */ if (si->cluster_info) { - if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) + if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base, order)) goto checks; + if (order > 0) + goto done; } else if (si->cluster_nr && !si->swap_map[++offset]) { /* non-ssd case, still more slots in cluster? */ --si->cluster_nr; @@ -964,11 +1020,13 @@ checks: } done: - set_cluster_next(si, offset + 1); + if (order == 0) + set_cluster_next(si, offset + 1); si->flags -= SWP_SCANNING; return n_ret; scan: + VM_WARN_ON(order > 0); spin_unlock(&si->lock); while (++offset <= READ_ONCE(si->highest_bit)) { if (unlikely(--latency_ration < 0)) { @@ -997,38 +1055,6 @@ no_page: return n_ret; } -static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) -{ - unsigned long idx; - struct swap_cluster_info *ci; - unsigned long offset; - - /* - * Should not even be attempting cluster allocations when huge - * page swap is disabled. Warn and fail the allocation. - */ - if (!IS_ENABLED(CONFIG_THP_SWAP)) { - VM_WARN_ON_ONCE(1); - return 0; - } - - if (cluster_list_empty(&si->free_clusters)) - return 0; - - idx = cluster_list_first(&si->free_clusters); - offset = idx * SWAPFILE_CLUSTER; - ci = lock_cluster(si, offset); - alloc_cluster(si, idx); - cluster_set_count(ci, SWAPFILE_CLUSTER); - - memset(si->swap_map + offset, SWAP_HAS_CACHE, SWAPFILE_CLUSTER); - unlock_cluster(ci); - swap_range_alloc(si, offset, SWAPFILE_CLUSTER); - *slot = swp_entry(si->type, offset); - - return 1; -} - static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) { unsigned long offset = idx * SWAPFILE_CLUSTER; @@ -1051,9 +1077,6 @@ int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) int n_ret = 0; int node; - /* Only single cluster request supported */ - WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); - spin_lock(&swap_avail_lock); avail_pgs = atomic_long_read(&nr_swap_pages) / size; @@ -1089,14 +1112,10 @@ start_over: spin_unlock(&si->lock); goto nextsi; } - if (size == SWAPFILE_CLUSTER) { - if (si->flags & SWP_BLKDEV) - n_ret = swap_alloc_cluster(si, swp_entries); - } else - n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, - n_goal, swp_entries); + n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, + n_goal, swp_entries, order); spin_unlock(&si->lock); - if (n_ret || size == SWAPFILE_CLUSTER) + if (n_ret || size > 1) goto check_out; cond_resched(); @@ -1673,7 +1692,7 @@ swp_entry_t get_swap_page_of_type(int type) /* This is called for allocating swap entry, not cache */ spin_lock(&si->lock); - if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry)) + if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) atomic_long_dec(&nr_swap_pages); spin_unlock(&si->lock); fail: @@ -3127,7 +3146,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) p->flags |= SWP_SYNCHRONOUS_IO; if (p->bdev && bdev_nonrot(p->bdev)) { - int cpu; + int cpu, i; unsigned long ci, nr_cluster; p->flags |= SWP_SOLIDSTATE; @@ -3165,7 +3184,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) struct percpu_cluster *cluster; cluster = per_cpu_ptr(p->percpu_cluster, cpu); - cluster->next = SWAP_NEXT_INVALID; + for (i = 0; i < SWAP_NR_ORDERS; i++) + cluster->next[i] = SWAP_NEXT_INVALID; } } else { atomic_inc(&nr_rotate_swap); -- cgit From 3931b871c4936c00c4e27c469056d8da47a3493f Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 8 Apr 2024 19:39:46 +0100 Subject: mm: madvise: avoid split during MADV_PAGEOUT and MADV_COLD Rework madvise_cold_or_pageout_pte_range() to avoid splitting any large folio that is fully and contiguously mapped in the pageout/cold vm range. This change means that large folios will be maintained all the way to swap storage. This both improves performance during swap-out, by eliding the cost of splitting the folio, and sets us up nicely for maintaining the large folio when it is swapped back in (to be covered in a separate series). Folios that are not fully mapped in the target range are still split, but note that behavior is changed so that if the split fails for any reason (folio locked, shared, etc) we now leave it as is and move to the next pte in the range and continue work on the proceeding folios. Previously any failure of this sort would cause the entire operation to give up and no folios mapped at higher addresses were paged out or made cold. Given large folios are becoming more common, this old behavior would have likely lead to wasted opportunities. While we are at it, change the code that clears young from the ptes to use ptep_test_and_clear_young(), via the new mkold_ptes() batch helper function. This is more efficent than get_and_clear/modify/set, especially for contpte mappings on arm64, where the old approach would require unfolding/refolding and the new approach can be done in place. Link: https://lkml.kernel.org/r/20240408183946.2991168-8-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Barry Song Acked-by: David Hildenbrand Cc: Barry Song <21cnbao@gmail.com> Cc: Chris Li Cc: Gao Xiang Cc: "Huang, Ying" Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox (Oracle) Cc: Michal Hocko Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 30 +++++++++++++++++ mm/internal.h | 12 +++++-- mm/madvise.c | 87 ++++++++++++++++++++++++++++--------------------- mm/memory.c | 4 +-- 4 files changed, 92 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 75096025fe52..e2f45e22a6d1 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -361,6 +361,36 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, } #endif +#ifndef mkold_ptes +/** + * mkold_ptes - Mark PTEs that map consecutive pages of the same folio as old. + * @vma: VMA the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to mark old. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_test_and_clear_young(). + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void mkold_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + for (;;) { + ptep_test_and_clear_young(vma, addr, ptep); + if (--nr == 0) + break; + ptep++; + addr += PAGE_SIZE; + } +} +#endif + #ifndef __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG) static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, diff --git a/mm/internal.h b/mm/internal.h index d34df04b11f6..640298a98a3c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -130,6 +130,8 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) * @flags: Flags to modify the PTE batch semantics. * @any_writable: Optional pointer to indicate whether any entry except the * first one is writable. + * @any_young: Optional pointer to indicate whether any entry except the + * first one is young. * * Detect a PTE batch: consecutive (present) PTEs that map consecutive * pages of the same large folio. @@ -145,16 +147,18 @@ static inline pte_t __pte_batch_clear_ignored(pte_t pte, fpb_t flags) */ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte_t *start_ptep, pte_t pte, int max_nr, fpb_t flags, - bool *any_writable) + bool *any_writable, bool *any_young) { unsigned long folio_end_pfn = folio_pfn(folio) + folio_nr_pages(folio); const pte_t *end_ptep = start_ptep + max_nr; pte_t expected_pte, *ptep; - bool writable; + bool writable, young; int nr; if (any_writable) *any_writable = false; + if (any_young) + *any_young = false; VM_WARN_ON_FOLIO(!pte_present(pte), folio); VM_WARN_ON_FOLIO(!folio_test_large(folio) || max_nr < 1, folio); @@ -168,6 +172,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, pte = ptep_get(ptep); if (any_writable) writable = !!pte_write(pte); + if (any_young) + young = !!pte_young(pte); pte = __pte_batch_clear_ignored(pte, flags); if (!pte_same(pte, expected_pte)) @@ -183,6 +189,8 @@ static inline int folio_pte_batch(struct folio *folio, unsigned long addr, if (any_writable) *any_writable |= writable; + if (any_young) + *any_young |= young; nr = pte_batch_hint(ptep, pte); expected_pte = pte_advance_pfn(expected_pte, nr); diff --git a/mm/madvise.c b/mm/madvise.c index 5011ecb24344..f59169888b8e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -336,6 +336,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, LIST_HEAD(folio_list); bool pageout_anon_only_filter; unsigned int batch_count = 0; + int nr; if (fatal_signal_pending(current)) return -EINTR; @@ -423,7 +424,8 @@ restart: return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); - for (; addr < end; pte++, addr += PAGE_SIZE) { + for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) { + nr = 1; ptent = ptep_get(pte); if (++batch_count == SWAP_CLUSTER_MAX) { @@ -447,55 +449,66 @@ restart: continue; /* - * Creating a THP page is expensive so split it only if we - * are sure it's worth. Split it if we are only owner. + * If we encounter a large folio, only split it if it is not + * fully mapped within the range we are operating on. Otherwise + * leave it as is so that it can be swapped out whole. If we + * fail to split a folio, leave it in place and advance to the + * next pte in the range. */ if (folio_test_large(folio)) { - int err; - - if (folio_likely_mapped_shared(folio)) - break; - if (pageout_anon_only_filter && !folio_test_anon(folio)) - break; - if (!folio_trylock(folio)) - break; - folio_get(folio); - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(start_pte, ptl); - start_pte = NULL; - err = split_folio(folio); - folio_unlock(folio); - folio_put(folio); - if (err) - break; - start_pte = pte = - pte_offset_map_lock(mm, pmd, addr, &ptl); - if (!start_pte) - break; - arch_enter_lazy_mmu_mode(); - pte--; - addr -= PAGE_SIZE; - continue; + const fpb_t fpb_flags = FPB_IGNORE_DIRTY | + FPB_IGNORE_SOFT_DIRTY; + int max_nr = (end - addr) / PAGE_SIZE; + bool any_young; + + nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, + fpb_flags, NULL, &any_young); + if (any_young) + ptent = pte_mkyoung(ptent); + + if (nr < folio_nr_pages(folio)) { + int err; + + if (folio_likely_mapped_shared(folio)) + continue; + if (pageout_anon_only_filter && !folio_test_anon(folio)) + continue; + if (!folio_trylock(folio)) + continue; + folio_get(folio); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); + folio_unlock(folio); + folio_put(folio); + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); + if (!err) + nr = 0; + continue; + } } /* * Do not interfere with other mappings of this folio and - * non-LRU folio. + * non-LRU folio. If we have a large folio at this point, we + * know it is fully mapped so if its mapcount is the same as its + * number of pages, it must be exclusive. */ - if (!folio_test_lru(folio) || folio_mapcount(folio) != 1) + if (!folio_test_lru(folio) || + folio_mapcount(folio) != folio_nr_pages(folio)) continue; if (pageout_anon_only_filter && !folio_test_anon(folio)) continue; - VM_BUG_ON_FOLIO(folio_test_large(folio), folio); - if (!pageout && pte_young(ptent)) { - ptent = ptep_get_and_clear_full(mm, addr, pte, - tlb->fullmm); - ptent = pte_mkold(ptent); - set_pte_at(mm, addr, pte, ptent); - tlb_remove_tlb_entry(tlb, pte, addr); + mkold_ptes(vma, addr, pte, nr); + tlb_remove_tlb_entries(tlb, pte, nr, addr); } /* diff --git a/mm/memory.c b/mm/memory.c index 7880400370c8..59c05dc8b18a 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -989,7 +989,7 @@ copy_present_ptes(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma flags |= FPB_IGNORE_SOFT_DIRTY; nr = folio_pte_batch(folio, addr, src_pte, pte, max_nr, flags, - &any_writable); + &any_writable, NULL); folio_ref_add(folio, nr); if (folio_test_anon(folio)) { if (unlikely(folio_try_dup_anon_rmap_ptes(folio, page, @@ -1559,7 +1559,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, */ if (unlikely(folio_test_large(folio) && max_nr != 1)) { nr = folio_pte_batch(folio, addr, pte, ptent, max_nr, fpb_flags, - NULL); + NULL, NULL); zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, addr, details, rss, force_flush, -- cgit From 25176ad09ca395fcc83b1fc78adf25c8eb1bd964 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 2 Apr 2024 14:55:15 +0200 Subject: mm/treewide: rename CONFIG_HAVE_FAST_GUP to CONFIG_HAVE_GUP_FAST Nowadays, we call it "GUP-fast", the external interface includes functions like "get_user_pages_fast()", and we renamed all internal functions to reflect that as well. Let's make the config option reflect that. Link: https://lkml.kernel.org/r/20240402125516.223131-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Mike Rapoport (IBM) Reviewed-by: Jason Gunthorpe Reviewed-by: John Hubbard Cc: Peter Xu Signed-off-by: Andrew Morton --- arch/arm/Kconfig | 2 +- arch/arm64/Kconfig | 2 +- arch/loongarch/Kconfig | 2 +- arch/mips/Kconfig | 2 +- arch/powerpc/Kconfig | 2 +- arch/riscv/Kconfig | 2 +- arch/s390/Kconfig | 2 +- arch/sh/Kconfig | 2 +- arch/x86/Kconfig | 2 +- include/linux/rmap.h | 8 ++++---- kernel/events/core.c | 4 ++-- mm/Kconfig | 2 +- mm/gup.c | 10 +++++----- mm/internal.h | 2 +- 14 files changed, 22 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b14aed3a17ab..817918f6635a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -99,7 +99,7 @@ config ARM select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_EFFICIENT_UNALIGNED_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && MMU select HAVE_EXIT_THREAD - select HAVE_FAST_GUP if ARM_LPAE + select HAVE_GUP_FAST if ARM_LPAE select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_TRACER diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 7b11c98b3e84..de076a191e9f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -205,7 +205,7 @@ config ARM64 select HAVE_SAMPLE_FTRACE_DIRECT select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_FAST_GUP + select HAVE_GUP_FAST select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_ERROR_INJECTION diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index a5f300ec6f28..cd642eefd9e5 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -119,7 +119,7 @@ config LOONGARCH select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS if !ARCH_STRICT_ALIGN select HAVE_EXIT_THREAD - select HAVE_FAST_GUP + select HAVE_GUP_FAST select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ERROR_INJECTION diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 516dc7022bd7..f1aa1bf11166 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -68,7 +68,7 @@ config MIPS select HAVE_DYNAMIC_FTRACE select HAVE_EBPF_JIT if !CPU_MICROMIPS select HAVE_EXIT_THREAD - select HAVE_FAST_GUP + select HAVE_GUP_FAST select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1c4be3373686..e42cc8cd415f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -236,7 +236,7 @@ config PPC select HAVE_DYNAMIC_FTRACE_WITH_REGS if ARCH_USING_PATCHABLE_FUNCTION_ENTRY || MPROFILE_KERNEL || PPC32 select HAVE_EBPF_JIT select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_FAST_GUP + select HAVE_GUP_FAST select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_DESCRIPTORS if PPC64_ELF_ABI_V1 diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index be09c8836d56..3ee60ddef93e 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -132,7 +132,7 @@ config RISCV select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !PREEMPTION select HAVE_EBPF_JIT if MMU - select HAVE_FAST_GUP if MMU + select HAVE_GUP_FAST if MMU select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ERROR_INJECTION select HAVE_GCC_PLUGINS diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8f01ada6845e..d9aed4c93ee6 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -174,7 +174,7 @@ config S390 select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_EBPF_JIT if HAVE_MARCH_Z196_FEATURES select HAVE_EFFICIENT_UNALIGNED_ACCESS - select HAVE_FAST_GUP + select HAVE_GUP_FAST select HAVE_FENTRY select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_ARG_ACCESS_API diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 2ad3e29f0ebe..7292542f75e8 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -38,7 +38,7 @@ config SUPERH select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_KMEMLEAK select HAVE_DYNAMIC_FTRACE - select HAVE_FAST_GUP if MMU + select HAVE_GUP_FAST if MMU select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_TRACER select HAVE_FTRACE_MCOUNT_RECORD diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 4474bf32d0a4..d95de5eab213 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -221,7 +221,7 @@ config X86 select HAVE_EFFICIENT_UNALIGNED_ACCESS select HAVE_EISA select HAVE_EXIT_THREAD - select HAVE_FAST_GUP + select HAVE_GUP_FAST select HAVE_FENTRY if X86_64 || DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_RETVAL if HAVE_FUNCTION_GRAPH_TRACER diff --git a/include/linux/rmap.h b/include/linux/rmap.h index b7944a833668..9bf9324214fc 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -284,7 +284,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) VM_WARN_ON_FOLIO(!PageAnonExclusive(&folio->page), folio); /* Paired with the memory barrier in try_grab_folio(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb(); if (unlikely(folio_maybe_dma_pinned(folio))) @@ -295,7 +295,7 @@ static inline int hugetlb_try_share_anon_rmap(struct folio *folio) * This is conceptually a smp_wmb() paired with the smp_rmb() in * gup_must_unshare(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb__after_atomic(); return 0; } @@ -541,7 +541,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, */ /* Paired with the memory barrier in try_grab_folio(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb(); if (unlikely(folio_maybe_dma_pinned(folio))) @@ -552,7 +552,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio, * This is conceptually a smp_wmb() paired with the smp_rmb() in * gup_must_unshare(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_mb__after_atomic(); return 0; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 724e6d7e128f..c5a0dc1f135f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7539,7 +7539,7 @@ static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr) { u64 size = 0; -#ifdef CONFIG_HAVE_FAST_GUP +#ifdef CONFIG_HAVE_GUP_FAST pgd_t *pgdp, pgd; p4d_t *p4dp, p4d; pud_t *pudp, pud; @@ -7587,7 +7587,7 @@ again: if (pte_present(pte)) size = pte_leaf_size(pte); pte_unmap(ptep); -#endif /* CONFIG_HAVE_FAST_GUP */ +#endif /* CONFIG_HAVE_GUP_FAST */ return size; } diff --git a/mm/Kconfig b/mm/Kconfig index f0ed3168db00..50df323eaece 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -473,7 +473,7 @@ config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP config HAVE_MEMBLOCK_PHYS_MAP bool -config HAVE_FAST_GUP +config HAVE_GUP_FAST depends on MMU bool diff --git a/mm/gup.c b/mm/gup.c index 7609efc55f1c..8dcbeae714e2 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -501,7 +501,7 @@ static inline void mm_set_has_pinned_flag(unsigned long *mm_flags) #ifdef CONFIG_MMU -#if defined(CONFIG_ARCH_HAS_HUGEPD) || defined(CONFIG_HAVE_FAST_GUP) +#if defined(CONFIG_ARCH_HAS_HUGEPD) || defined(CONFIG_HAVE_GUP_FAST) static int record_subpages(struct page *page, unsigned long sz, unsigned long addr, unsigned long end, struct page **pages) @@ -515,7 +515,7 @@ static int record_subpages(struct page *page, unsigned long sz, return nr; } -#endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_FAST_GUP */ +#endif /* CONFIG_ARCH_HAS_HUGEPD || CONFIG_HAVE_GUP_FAST */ #ifdef CONFIG_ARCH_HAS_HUGEPD static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end, @@ -2782,7 +2782,7 @@ EXPORT_SYMBOL(get_user_pages_unlocked); * * This code is based heavily on the PowerPC implementation by Nick Piggin. */ -#ifdef CONFIG_HAVE_FAST_GUP +#ifdef CONFIG_HAVE_GUP_FAST /* * Used in the GUP-fast path to determine whether GUP is permitted to work on @@ -3361,7 +3361,7 @@ static inline void gup_fast_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { } -#endif /* CONFIG_HAVE_FAST_GUP */ +#endif /* CONFIG_HAVE_GUP_FAST */ #ifndef gup_fast_permitted /* @@ -3381,7 +3381,7 @@ static unsigned long gup_fast(unsigned long start, unsigned long end, int nr_pinned = 0; unsigned seq; - if (!IS_ENABLED(CONFIG_HAVE_FAST_GUP) || + if (!IS_ENABLED(CONFIG_HAVE_GUP_FAST) || !gup_fast_permitted(start, end)) return 0; diff --git a/mm/internal.h b/mm/internal.h index 640298a98a3c..8fd41f889a95 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1265,7 +1265,7 @@ static inline bool gup_must_unshare(struct vm_area_struct *vma, } /* Paired with a memory barrier in folio_try_share_anon_rmap_*(). */ - if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) + if (IS_ENABLED(CONFIG_HAVE_GUP_FAST)) smp_rmb(); /* -- cgit From 7edea4c6fdf23754c77582a0377791e1aa9d2700 Mon Sep 17 00:00:00 2001 From: Jinjiang Tu Date: Tue, 2 Apr 2024 10:49:34 +0800 Subject: mm/ksm: remove redundant code in ksm_fork Since commit 3c6f33b7273a ("mm/ksm: support fork/exec for prctl"), when a child process is forked, the MMF_VM_MERGE_ANY flag will be inherited in mm_init(). So, it's unnecessary to set the flag in ksm_fork(). Link: https://lkml.kernel.org/r/20240402024934.1093361-1-tujinjiang@huawei.com Signed-off-by: Jinjiang Tu Reviewed-by: David Hildenbrand Cc: Johannes Weiner Cc: Kefeng Wang Cc: Nanyong Sun Cc: Rik van Riel Cc: Stefan Roesch Signed-off-by: Andrew Morton --- include/linux/ksm.h | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 7e2b1de3996a..358803cfd4d5 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -45,16 +45,8 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) { - int ret; - - if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) { - ret = __ksm_enter(mm); - if (ret) - return ret; - } - - if (test_bit(MMF_VM_MERGE_ANY, &oldmm->flags)) - set_bit(MMF_VM_MERGE_ANY, &mm->flags); + if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) + return __ksm_enter(mm); return 0; } -- cgit From 2542b1ac9a46ac58f9565de0048457956898d481 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Fri, 5 Apr 2024 16:32:25 +0100 Subject: mm: inline destroy_large_folio() into __folio_put_large() destroy_large_folio() has only one caller, move its contents there. Link: https://lkml.kernel.org/r/20240405153228.2563754-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 -- mm/page_alloc.c | 14 -------------- mm/swap.c | 13 ++++++++++--- 3 files changed, 10 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index 4f4e460d7853..cc9410d772dd 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1318,8 +1318,6 @@ void folio_copy(struct folio *dst, struct folio *src); unsigned long nr_free_buffer_pages(void); -void destroy_large_folio(struct folio *folio); - /* Returns the number of bytes in this potentially compound page. */ static inline unsigned long page_size(struct page *page) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c01757251de2..22e8b9f1d710 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -565,20 +565,6 @@ void prep_compound_page(struct page *page, unsigned int order) prep_compound_head(page, order); } -void destroy_large_folio(struct folio *folio) -{ - if (folio_test_hugetlb(folio)) { - free_huge_folio(folio); - return; - } - - if (folio_test_large_rmappable(folio)) - folio_undo_large_rmappable(folio); - - mem_cgroup_uncharge(folio); - free_unref_page(&folio->page, folio_order(folio)); -} - static inline void set_buddy_order(struct page *page, unsigned int order) { set_page_private(page, order); diff --git a/mm/swap.c b/mm/swap.c index d7db3cd4e80a..6e3bd03673ea 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -127,9 +127,16 @@ static void __folio_put_large(struct folio *folio) * (it's never listed to any LRU lists) and no memcg routines should * be called for hugetlb (it has a separate hugetlb_cgroup.) */ - if (!folio_test_hugetlb(folio)) - page_cache_release(folio); - destroy_large_folio(folio); + if (folio_test_hugetlb(folio)) { + free_huge_folio(folio); + return; + } + + page_cache_release(folio); + if (folio_test_large_rmappable(folio)) + folio_undo_large_rmappable(folio); + mem_cgroup_uncharge(folio); + free_unref_page(&folio->page, folio_order(folio)); } void __folio_put(struct folio *folio) -- cgit From 72801513b2bfb6bf571956a604d38f98ce9aacd9 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 5 Apr 2024 20:24:16 +0800 Subject: mm: set pageblock_order to HPAGE_PMD_ORDER in case with !CONFIG_HUGETLB_PAGE but THP enabled As Vlastimil suggested in previous discussion[1], it doesn't make sense to set pageblock_order as MAX_PAGE_ORDER when hugetlbfs is not enabled and THP is enabled. Instead, it should be set to HPAGE_PMD_ORDER. [1] https://lore.kernel.org/all/76457ec5-d789-449b-b8ca-dcb6ceb12445@suse.cz/ Link: https://lkml.kernel.org/r/3d57d253070035bdc0f6d6e5681ce1ed0e1934f7.1712286863.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Suggested-by: Vlastimil Babka Acked-by: Vlastimil Babka Reviewed-by: Zi Yan Acked-by: David Hildenbrand Cc: Mel Gorman Signed-off-by: Andrew Morton --- include/linux/pageblock-flags.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index 3f2409b968ec..547e82cdc89a 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -28,7 +28,7 @@ enum pageblock_bits { NR_PAGEBLOCK_BITS }; -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE @@ -45,7 +45,11 @@ extern unsigned int pageblock_order; #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ -#else /* CONFIG_HUGETLB_PAGE */ +#elif defined(CONFIG_TRANSPARENT_HUGEPAGE) + +#define pageblock_order min_t(unsigned int, HPAGE_PMD_ORDER, MAX_PAGE_ORDER) + +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ /* If huge pages are not used, group by MAX_ORDER_NR_PAGES */ #define pageblock_order MAX_PAGE_ORDER -- cgit From 3429055f0451cd3a281f8ed6691335ead626b136 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Wed, 20 Mar 2024 11:18:49 +0100 Subject: mm: kmsan: implement kmsan_memmove() Provide a hook that can be used by custom memcpy implementations to tell KMSAN that the metadata needs to be copied. Without that, false positive reports are possible in the cases where KMSAN fails to intercept memory initialization. Link: https://lore.kernel.org/all/3b7dbd88-0861-4638-b2d2-911c97a4cadf@I-love.SAKURA.ne.jp/ Link: https://lkml.kernel.org/r/20240320101851.2589698-1-glider@google.com Signed-off-by: Alexander Potapenko Suggested-by: Tetsuo Handa Reviewed-by: Marco Elver Cc: Dmitry Vyukov Cc: Linus Torvalds Cc: Tetsuo Handa Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/kmsan-checks.h | 15 +++++++++++++++ mm/kmsan/hooks.c | 11 +++++++++++ 2 files changed, 26 insertions(+) (limited to 'include/linux') diff --git a/include/linux/kmsan-checks.h b/include/linux/kmsan-checks.h index c4cae333deec..e1082dc40abc 100644 --- a/include/linux/kmsan-checks.h +++ b/include/linux/kmsan-checks.h @@ -61,6 +61,17 @@ void kmsan_check_memory(const void *address, size_t size); void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy, size_t left); +/** + * kmsan_memmove() - Notify KMSAN about a data copy within kernel. + * @to: destination address in the kernel. + * @from: source address in the kernel. + * @size: number of bytes to copy. + * + * Invoked after non-instrumented version (e.g. implemented using assembly + * code) of memmove()/memcpy() is called, in order to copy KMSAN's metadata. + */ +void kmsan_memmove(void *to, const void *from, size_t to_copy); + #else static inline void kmsan_poison_memory(const void *address, size_t size, @@ -78,6 +89,10 @@ static inline void kmsan_copy_to_user(void __user *to, const void *from, { } +static inline void kmsan_memmove(void *to, const void *from, size_t to_copy) +{ +} + #endif #endif /* _LINUX_KMSAN_CHECKS_H */ diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c index 0b09daa188ef..22e8657800ef 100644 --- a/mm/kmsan/hooks.c +++ b/mm/kmsan/hooks.c @@ -285,6 +285,17 @@ void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy, } EXPORT_SYMBOL(kmsan_copy_to_user); +void kmsan_memmove(void *to, const void *from, size_t size) +{ + if (!kmsan_enabled || kmsan_in_runtime()) + return; + + kmsan_enter_runtime(); + kmsan_internal_memmove_metadata(to, (void *)from, size); + kmsan_leave_runtime(); +} +EXPORT_SYMBOL(kmsan_memmove); + /* Helper function to check an URB. */ void kmsan_handle_urb(const struct urb *urb, bool is_out) { -- cgit From 922621a6828430ea3119b869336157d253489334 Mon Sep 17 00:00:00 2001 From: Alexander Potapenko Date: Wed, 20 Mar 2024 11:18:50 +0100 Subject: instrumented.h: add instrument_memcpy_before, instrument_memcpy_after Bug detection tools based on compiler instrumentation may miss memory accesses in custom memcpy implementations (such as copy_mc_to_kernel). Provide instrumentation hooks that tell KASAN, KCSAN, and KMSAN about such accesses. Link: https://lore.kernel.org/all/3b7dbd88-0861-4638-b2d2-911c97a4cadf@I-love.SAKURA.ne.jp/ Link: https://lkml.kernel.org/r/20240320101851.2589698-2-glider@google.com Signed-off-by: Alexander Potapenko Reviewed-by: Marco Elver Cc: Dmitry Vyukov Cc: Tetsuo Handa Cc: Linus Torvalds Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- include/linux/instrumented.h | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'include/linux') diff --git a/include/linux/instrumented.h b/include/linux/instrumented.h index 1b608e00290a..711a1f0d1a73 100644 --- a/include/linux/instrumented.h +++ b/include/linux/instrumented.h @@ -147,6 +147,41 @@ instrument_copy_from_user_after(const void *to, const void __user *from, kmsan_unpoison_memory(to, n - left); } +/** + * instrument_memcpy_before - add instrumentation before non-instrumented memcpy + * @to: destination address + * @from: source address + * @n: number of bytes to copy + * + * Instrument memory accesses that happen in custom memcpy implementations. The + * instrumentation should be inserted before the memcpy call. + */ +static __always_inline void instrument_memcpy_before(void *to, const void *from, + unsigned long n) +{ + kasan_check_write(to, n); + kasan_check_read(from, n); + kcsan_check_write(to, n); + kcsan_check_read(from, n); +} + +/** + * instrument_memcpy_after - add instrumentation after non-instrumented memcpy + * @to: destination address + * @from: source address + * @n: number of bytes to copy + * @left: number of bytes not copied (if known) + * + * Instrument memory accesses that happen in custom memcpy implementations. The + * instrumentation should be inserted after the memcpy call. + */ +static __always_inline void instrument_memcpy_after(void *to, const void *from, + unsigned long n, + unsigned long left) +{ + kmsan_memmove(to, from, n - left); +} + /** * instrument_get_user() - add instrumentation to get_user()-like macros * @to: destination variable, may not be address-taken -- cgit From 58fbfecab965014b6e3cc956a76b4a96265a1add Mon Sep 17 00:00:00 2001 From: Paul Davey Date: Tue, 23 Apr 2024 18:00:24 +1200 Subject: xfrm: Preserve vlan tags for transport mode software GRO The software GRO path for esp transport mode uses skb_mac_header_rebuild prior to re-injecting the packet via the xfrm_napi_dev. This only copies skb->mac_len bytes of header which may not be sufficient if the packet contains 802.1Q tags or other VLAN tags. Worse copying only the initial header will leave a packet marked as being VLAN tagged but without the corresponding tag leading to mangling when it is later untagged. The VLAN tags are important when receiving the decrypted esp transport mode packet after GRO processing to ensure it is received on the correct interface. Therefore record the full mac header length in xfrm*_transport_input for later use in corresponding xfrm*_transport_finish to copy the entire mac header when rebuilding the mac header for GRO. The skb->data pointer is left pointing skb->mac_header bytes after the start of the mac header as is expected by the network stack and network and transport header offsets reset to this location. Fixes: 7785bba299a8 ("esp: Add a software GRO codepath") Signed-off-by: Paul Davey Signed-off-by: Steffen Klassert --- include/linux/skbuff.h | 15 +++++++++++++++ include/net/xfrm.h | 3 +++ net/ipv4/xfrm4_input.c | 6 +++++- net/ipv6/xfrm6_input.c | 6 +++++- net/xfrm/xfrm_input.c | 8 ++++++++ 5 files changed, 36 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9d24aec064e8..4ff48eda3f64 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3031,6 +3031,21 @@ static inline void skb_mac_header_rebuild(struct sk_buff *skb) } } +/* Move the full mac header up to current network_header. + * Leaves skb->data pointing at offset skb->mac_len into the mac_header. + * Must be provided the complete mac header length. + */ +static inline void skb_mac_header_rebuild_full(struct sk_buff *skb, u32 full_mac_len) +{ + if (skb_mac_header_was_set(skb)) { + const unsigned char *old_mac = skb_mac_header(skb); + + skb_set_mac_header(skb, -full_mac_len); + memmove(skb_mac_header(skb), old_mac, full_mac_len); + __skb_push(skb, full_mac_len - skb->mac_len); + } +} + static inline int skb_checksum_start_offset(const struct sk_buff *skb) { return skb->csum_start - skb_headroom(skb); diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 57c743b7e4fe..cb4841a9fffd 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -1049,6 +1049,9 @@ struct xfrm_offload { #define CRYPTO_INVALID_PACKET_SYNTAX 64 #define CRYPTO_INVALID_PROTOCOL 128 + /* Used to keep whole l2 header for transport mode GRO */ + __u32 orig_mac_len; + __u8 proto; __u8 inner_ipproto; }; diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index dae35101d189..86382e08140e 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c @@ -63,7 +63,11 @@ int xfrm4_transport_finish(struct sk_buff *skb, int async) ip_send_check(iph); if (xo && (xo->flags & XFRM_GRO)) { - skb_mac_header_rebuild(skb); + /* The full l2 header needs to be preserved so that re-injecting the packet at l2 + * works correctly in the presence of vlan tags. + */ + skb_mac_header_rebuild_full(skb, xo->orig_mac_len); + skb_reset_network_header(skb); skb_reset_transport_header(skb); return 0; } diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index a17d783dc7c0..c6b8e132e10a 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c @@ -58,7 +58,11 @@ int xfrm6_transport_finish(struct sk_buff *skb, int async) skb_postpush_rcsum(skb, skb_network_header(skb), nhlen); if (xo && (xo->flags & XFRM_GRO)) { - skb_mac_header_rebuild(skb); + /* The full l2 header needs to be preserved so that re-injecting the packet at l2 + * works correctly in the presence of vlan tags. + */ + skb_mac_header_rebuild_full(skb, xo->orig_mac_len); + skb_reset_network_header(skb); skb_reset_transport_header(skb); return 0; } diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 161f535c8b94..3a2982a72a6b 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -389,11 +389,15 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) */ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) { + struct xfrm_offload *xo = xfrm_offload(skb); int ihl = skb->data - skb_transport_header(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); + if (xo) + xo->orig_mac_len = + skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0; skb->network_header = skb->transport_header; } ip_hdr(skb)->tot_len = htons(skb->len + ihl); @@ -404,11 +408,15 @@ static int xfrm4_transport_input(struct xfrm_state *x, struct sk_buff *skb) static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) { #if IS_ENABLED(CONFIG_IPV6) + struct xfrm_offload *xo = xfrm_offload(skb); int ihl = skb->data - skb_transport_header(skb); if (skb->transport_header != skb->network_header) { memmove(skb_transport_header(skb), skb_network_header(skb), ihl); + if (xo) + xo->orig_mac_len = + skb_mac_header_was_set(skb) ? skb_mac_header_len(skb) : 0; skb->network_header = skb->transport_header; } ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - -- cgit From 63a7cd660246aa36af263b85c33ecc6601bf04be Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 10 Apr 2024 21:16:34 +0200 Subject: mmc: core: Add mmc_gpiod_set_cd_config() function Some mmc host drivers may need to fixup a card-detection GPIO's config to e.g. enable the GPIO controllers builtin pull-up resistor on devices where the firmware description of the GPIO is broken (e.g. GpioInt with PullNone instead of PullUp in ACPI DSDT). Since this is the exception rather then the rule adding a config parameter to mmc_gpiod_request_cd() seems undesirable, so instead add a new mmc_gpiod_set_cd_config() function. This is simply a wrapper to call gpiod_set_config() on the card-detect GPIO acquired through mmc_gpiod_request_cd(). Reviewed-by: Andy Shevchenko Signed-off-by: Hans de Goede Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20240410191639.526324-2-hdegoede@redhat.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/slot-gpio.c | 20 ++++++++++++++++++++ include/linux/mmc/slot-gpio.h | 1 + 2 files changed, 21 insertions(+) (limited to 'include/linux') diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 39f45c2b6de8..8791656e9e20 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -221,6 +221,26 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, } EXPORT_SYMBOL(mmc_gpiod_request_cd); +/** + * mmc_gpiod_set_cd_config - set config for card-detection GPIO + * @host: mmc host + * @config: Generic pinconf config (from pinconf_to_config_packed()) + * + * This can be used by mmc host drivers to fixup a card-detection GPIO's config + * (e.g. set PIN_CONFIG_BIAS_PULL_UP) after acquiring the GPIO descriptor + * through mmc_gpiod_request_cd(). + * + * Returns: + * 0 on success, or a negative errno value on error. + */ +int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + + return gpiod_set_config(ctx->cd_gpio, config); +} +EXPORT_SYMBOL(mmc_gpiod_set_cd_config); + bool mmc_can_gpio_cd(struct mmc_host *host) { struct mmc_gpio *ctx = host->slot.handler_priv; diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 5d3d15e97868..66272fdce43d 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -21,6 +21,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int debounce); int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int idx, unsigned int debounce); +int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config); void mmc_gpio_set_cd_isr(struct mmc_host *host, irqreturn_t (*isr)(int irq, void *dev_id)); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); -- cgit From 6faaea2c70a5fbb05e460473698848f449bdb9ee Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 10 Apr 2024 22:56:18 +0300 Subject: mmc: slot-gpio: Use irq_handler_t type The irq_handler_t is already defined globally, let's use it in slot-gpio code. Signed-off-by: Andy Shevchenko Reviewed-by: Alexander Stein Link: https://lore.kernel.org/r/20240410195618.1632778-1-andriy.shevchenko@linux.intel.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/slot-gpio.c | 5 ++--- include/linux/mmc/slot-gpio.h | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index 8791656e9e20..12247219e1c2 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -19,7 +19,7 @@ struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; - irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); + irq_handler_t cd_gpio_isr; char *ro_label; char *cd_label; u32 cd_debounce_delay_ms; @@ -162,8 +162,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_wake); /* Register an alternate interrupt service routine for * the card-detect GPIO. */ -void mmc_gpio_set_cd_isr(struct mmc_host *host, - irqreturn_t (*isr)(int irq, void *dev_id)) +void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr) { struct mmc_gpio *ctx = host->slot.handler_priv; diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 66272fdce43d..274a2767ea49 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h @@ -8,8 +8,8 @@ #ifndef MMC_SLOT_GPIO_H #define MMC_SLOT_GPIO_H +#include #include -#include struct mmc_host; @@ -22,8 +22,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, unsigned int idx, unsigned int debounce); int mmc_gpiod_set_cd_config(struct mmc_host *host, unsigned long config); -void mmc_gpio_set_cd_isr(struct mmc_host *host, - irqreturn_t (*isr)(int irq, void *dev_id)); +void mmc_gpio_set_cd_isr(struct mmc_host *host, irq_handler_t isr); int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on); void mmc_gpiod_request_cd_irq(struct mmc_host *host); bool mmc_can_gpio_cd(struct mmc_host *host); -- cgit From a8625217a054ca058cf74742e58c2c88460eb202 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Sat, 20 Apr 2024 08:17:25 -0700 Subject: drivers/perf: riscv: Implement SBI PMU snapshot function SBI v2.0 SBI introduced PMU snapshot feature which adds the following features. 1. Read counter values directly from the shared memory instead of csr read. 2. Start multiple counters with initial values with one SBI call. These functionalities optimizes the number of traps to the higher privilege mode. If the kernel is in VS mode while the hypervisor deploy trap & emulate method, this would minimize all the hpmcounter CSR read traps. If the kernel is running in S-mode, the benefits reduced to CSR latency vs DRAM/cache latency as there is no trap involved while accessing the hpmcounter CSRs. In both modes, it does saves the number of ecalls while starting multiple counter together with an initial values. This is a likely scenario if multiple counters overflow at the same time. Acked-by: Palmer Dabbelt Reviewed-by: Anup Patel Reviewed-by: Conor Dooley Reviewed-by: Andrew Jones Reviewed-by: Samuel Holland Signed-off-by: Atish Patra Link: https://lore.kernel.org/r/20240420151741.962500-10-atishp@rivosinc.com Signed-off-by: Anup Patel --- drivers/perf/riscv_pmu.c | 3 +- drivers/perf/riscv_pmu_sbi.c | 274 ++++++++++++++++++++++++++++++++++++++--- include/linux/perf/riscv_pmu.h | 8 ++ 3 files changed, 264 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/riscv_pmu.c b/drivers/perf/riscv_pmu.c index b4efdddb2ad9..78c490e0505a 100644 --- a/drivers/perf/riscv_pmu.c +++ b/drivers/perf/riscv_pmu.c @@ -191,8 +191,6 @@ void riscv_pmu_stop(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); - WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); - if (!(hwc->state & PERF_HES_STOPPED)) { if (rvpmu->ctr_stop) { rvpmu->ctr_stop(event, 0); @@ -408,6 +406,7 @@ struct riscv_pmu *riscv_pmu_alloc(void) cpuc->n_events = 0; for (i = 0; i < RISCV_MAX_COUNTERS; i++) cpuc->events[i] = NULL; + cpuc->snapshot_addr = NULL; } pmu->pmu = (struct pmu) { .event_init = riscv_pmu_event_init, diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 4eacd89141a9..5d699b06dcb6 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -58,6 +58,9 @@ PMU_FORMAT_ATTR(event, "config:0-47"); PMU_FORMAT_ATTR(firmware, "config:63"); static bool sbi_v2_available; +static DEFINE_STATIC_KEY_FALSE(sbi_pmu_snapshot_available); +#define sbi_pmu_snapshot_available() \ + static_branch_unlikely(&sbi_pmu_snapshot_available) static struct attribute *riscv_arch_formats_attr[] = { &format_attr_event.attr, @@ -508,14 +511,106 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig) return ret; } +static void pmu_sbi_snapshot_free(struct riscv_pmu *pmu) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); + + if (!cpu_hw_evt->snapshot_addr) + continue; + + free_page((unsigned long)cpu_hw_evt->snapshot_addr); + cpu_hw_evt->snapshot_addr = NULL; + cpu_hw_evt->snapshot_addr_phys = 0; + } +} + +static int pmu_sbi_snapshot_alloc(struct riscv_pmu *pmu) +{ + int cpu; + struct page *snapshot_page; + + for_each_possible_cpu(cpu) { + struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); + + snapshot_page = alloc_page(GFP_ATOMIC | __GFP_ZERO); + if (!snapshot_page) { + pmu_sbi_snapshot_free(pmu); + return -ENOMEM; + } + cpu_hw_evt->snapshot_addr = page_to_virt(snapshot_page); + cpu_hw_evt->snapshot_addr_phys = page_to_phys(snapshot_page); + } + + return 0; +} + +static int pmu_sbi_snapshot_disable(void) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, SBI_SHMEM_DISABLE, + SBI_SHMEM_DISABLE, 0, 0, 0, 0); + if (ret.error) { + pr_warn("failed to disable snapshot shared memory\n"); + return sbi_err_map_linux_errno(ret.error); + } + + return 0; +} + +static int pmu_sbi_snapshot_setup(struct riscv_pmu *pmu, int cpu) +{ + struct cpu_hw_events *cpu_hw_evt; + struct sbiret ret = {0}; + + cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); + if (!cpu_hw_evt->snapshot_addr_phys) + return -EINVAL; + + if (cpu_hw_evt->snapshot_set_done) + return 0; + + if (IS_ENABLED(CONFIG_32BIT)) + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, + cpu_hw_evt->snapshot_addr_phys, + (u64)(cpu_hw_evt->snapshot_addr_phys) >> 32, 0, 0, 0, 0); + else + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_SNAPSHOT_SET_SHMEM, + cpu_hw_evt->snapshot_addr_phys, 0, 0, 0, 0, 0); + + /* Free up the snapshot area memory and fall back to SBI PMU calls without snapshot */ + if (ret.error) { + if (ret.error != SBI_ERR_NOT_SUPPORTED) + pr_warn("pmu snapshot setup failed with error %ld\n", ret.error); + return sbi_err_map_linux_errno(ret.error); + } + + memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS); + cpu_hw_evt->snapshot_set_done = true; + + return 0; +} + static u64 pmu_sbi_ctr_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; struct sbiret ret; u64 val = 0; + struct riscv_pmu *pmu = to_riscv_pmu(event->pmu); + struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); + struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; union sbi_pmu_ctr_info info = pmu_ctr_list[idx]; + /* Read the value from the shared memory directly only if counter is stopped */ + if (sbi_pmu_snapshot_available() && (hwc->state & PERF_HES_STOPPED)) { + val = sdata->ctr_values[idx]; + return val; + } + if (pmu_sbi_is_fw_event(event)) { ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_FW_READ, hwc->idx, 0, 0, 0, 0, 0); @@ -565,6 +660,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) struct hw_perf_event *hwc = &event->hw; unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; + /* There is no benefit setting SNAPSHOT FLAG for a single counter */ #if defined(CONFIG_32BIT) ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, 1, flag, ival, ival >> 32, 0); @@ -585,16 +681,36 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) { struct sbiret ret; struct hw_perf_event *hwc = &event->hw; + struct riscv_pmu *pmu = to_riscv_pmu(event->pmu); + struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); + struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) pmu_sbi_reset_scounteren((void *)event); + if (sbi_pmu_snapshot_available()) + flag |= SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT; + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); - if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && - flag != SBI_PMU_STOP_FLAG_RESET) + if (!ret.error && sbi_pmu_snapshot_available()) { + /* + * The counter snapshot is based on the index base specified by hwc->idx. + * The actual counter value is updated in shared memory at index 0 when counter + * mask is 0x01. To ensure accurate counter values, it's necessary to transfer + * the counter value to shared memory. However, if hwc->idx is zero, the counter + * value is already correctly updated in shared memory, requiring no further + * adjustment. + */ + if (hwc->idx > 0) { + sdata->ctr_values[hwc->idx] = sdata->ctr_values[0]; + sdata->ctr_values[0] = 0; + } + } else if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && + flag != SBI_PMU_STOP_FLAG_RESET) { pr_err("Stopping counter idx %d failed with error %d\n", hwc->idx, sbi_err_map_linux_errno(ret.error)); + } } static int pmu_sbi_find_num_ctrs(void) @@ -652,12 +768,39 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu) static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) { struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); - int i; + struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; + unsigned long flag = 0; + int i, idx; + struct sbiret ret; + u64 temp_ctr_overflow_mask = 0; - for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) + if (sbi_pmu_snapshot_available()) + flag = SBI_PMU_STOP_FLAG_TAKE_SNAPSHOT; + + /* Reset the shadow copy to avoid save/restore any value from previous overflow */ + memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS); + + for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) { /* No need to check the error here as we can't do anything about the error */ - sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, i * BITS_PER_LONG, - cpu_hw_evt->used_hw_ctrs[i], 0, 0, 0, 0); + ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, i * BITS_PER_LONG, + cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0); + if (!ret.error && sbi_pmu_snapshot_available()) { + /* Save the counter values to avoid clobbering */ + for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG) + cpu_hw_evt->snapshot_cval_shcopy[i * BITS_PER_LONG + idx] = + sdata->ctr_values[idx]; + /* Save the overflow mask to avoid clobbering */ + temp_ctr_overflow_mask |= sdata->ctr_overflow_mask << (i * BITS_PER_LONG); + } + } + + /* Restore the counter values to the shared memory for used hw counters */ + if (sbi_pmu_snapshot_available()) { + for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) + sdata->ctr_values[idx] = cpu_hw_evt->snapshot_cval_shcopy[idx]; + if (temp_ctr_overflow_mask) + sdata->ctr_overflow_mask = temp_ctr_overflow_mask; + } } /* @@ -666,11 +809,10 @@ static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu) * while the overflowed counters need to be started with updated initialization * value. */ -static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, - unsigned long ctr_ovf_mask) +static inline void pmu_sbi_start_ovf_ctrs_sbi(struct cpu_hw_events *cpu_hw_evt, + u64 ctr_ovf_mask) { int idx = 0, i; - struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); struct perf_event *event; unsigned long flag = SBI_PMU_START_FLAG_SET_INIT_VALUE; unsigned long ctr_start_mask = 0; @@ -706,6 +848,52 @@ static inline void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, } } +static inline void pmu_sbi_start_ovf_ctrs_snapshot(struct cpu_hw_events *cpu_hw_evt, + u64 ctr_ovf_mask) +{ + int i, idx = 0; + struct perf_event *event; + unsigned long flag = SBI_PMU_START_FLAG_INIT_SNAPSHOT; + u64 max_period, init_val = 0; + struct hw_perf_event *hwc; + struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; + + for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { + if (ctr_ovf_mask & BIT(idx)) { + event = cpu_hw_evt->events[idx]; + hwc = &event->hw; + max_period = riscv_pmu_ctr_get_width_mask(event); + init_val = local64_read(&hwc->prev_count) & max_period; + cpu_hw_evt->snapshot_cval_shcopy[idx] = init_val; + } + /* + * We do not need to update the non-overflow counters the previous + * value should have been there already. + */ + } + + for (i = 0; i < BITS_TO_LONGS(RISCV_MAX_COUNTERS); i++) { + /* Restore the counter values to relative indices for used hw counters */ + for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG) + sdata->ctr_values[idx] = + cpu_hw_evt->snapshot_cval_shcopy[idx + i * BITS_PER_LONG]; + /* Start all the counters in a single shot */ + sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, idx * BITS_PER_LONG, + cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0); + } +} + +static void pmu_sbi_start_overflow_mask(struct riscv_pmu *pmu, + u64 ctr_ovf_mask) +{ + struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); + + if (sbi_pmu_snapshot_available()) + pmu_sbi_start_ovf_ctrs_snapshot(cpu_hw_evt, ctr_ovf_mask); + else + pmu_sbi_start_ovf_ctrs_sbi(cpu_hw_evt, ctr_ovf_mask); +} + static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) { struct perf_sample_data data; @@ -715,10 +903,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) int lidx, hidx, fidx; struct riscv_pmu *pmu; struct perf_event *event; - unsigned long overflow; - unsigned long overflowed_ctrs = 0; + u64 overflow; + u64 overflowed_ctrs = 0; struct cpu_hw_events *cpu_hw_evt = dev; u64 start_clock = sched_clock(); + struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; if (WARN_ON_ONCE(!cpu_hw_evt)) return IRQ_NONE; @@ -740,7 +929,10 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) pmu_sbi_stop_hw_ctrs(pmu); /* Overflow status register should only be read after counter are stopped */ - ALT_SBI_PMU_OVERFLOW(overflow); + if (sbi_pmu_snapshot_available()) + overflow = sdata->ctr_overflow_mask; + else + ALT_SBI_PMU_OVERFLOW(overflow); /* * Overflow interrupt pending bit should only be cleared after stopping @@ -766,9 +958,14 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) if (!info || info->type != SBI_PMU_CTR_TYPE_HW) continue; - /* compute hardware counter index */ - hidx = info->csr - CSR_CYCLE; - /* check if the corresponding bit is set in sscountovf */ + if (sbi_pmu_snapshot_available()) + /* SBI implementation already updated the logical indicies */ + hidx = lidx; + else + /* compute hardware counter index */ + hidx = info->csr - CSR_CYCLE; + + /* check if the corresponding bit is set in sscountovf or overflow mask in shmem */ if (!(overflow & BIT(hidx))) continue; @@ -778,7 +975,10 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) */ overflowed_ctrs |= BIT(lidx); hw_evt = &event->hw; + /* Update the event states here so that we know the state while reading */ + hw_evt->state |= PERF_HES_STOPPED; riscv_pmu_event_update(event); + hw_evt->state |= PERF_HES_UPTODATE; perf_sample_data_init(&data, 0, hw_evt->last_period); if (riscv_pmu_event_set_period(event)) { /* @@ -791,6 +991,8 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) */ perf_event_overflow(event, &data, regs); } + /* Reset the state as we are going to start the counter after the loop */ + hw_evt->state = 0; } pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs); @@ -822,6 +1024,9 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node) enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE); } + if (sbi_pmu_snapshot_available()) + return pmu_sbi_snapshot_setup(pmu, cpu); + return 0; } @@ -834,6 +1039,9 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node) /* Disable all counters access for user mode now */ csr_write(CSR_SCOUNTEREN, 0x0); + if (sbi_pmu_snapshot_available()) + return pmu_sbi_snapshot_disable(); + return 0; } @@ -942,6 +1150,12 @@ static inline void riscv_pm_pmu_unregister(struct riscv_pmu *pmu) { } static void riscv_pmu_destroy(struct riscv_pmu *pmu) { + if (sbi_v2_available) { + if (sbi_pmu_snapshot_available()) { + pmu_sbi_snapshot_disable(); + pmu_sbi_snapshot_free(pmu); + } + } riscv_pm_pmu_unregister(pmu); cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); } @@ -1109,10 +1323,6 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) pmu->event_unmapped = pmu_sbi_event_unmapped; pmu->csr_index = pmu_sbi_csr_index; - ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); - if (ret) - return ret; - ret = riscv_pm_pmu_register(pmu); if (ret) goto out_unregister; @@ -1121,8 +1331,34 @@ static int pmu_sbi_device_probe(struct platform_device *pdev) if (ret) goto out_unregister; + /* SBI PMU Snapsphot is only available in SBI v2.0 */ + if (sbi_v2_available) { + ret = pmu_sbi_snapshot_alloc(pmu); + if (ret) + goto out_unregister; + + ret = pmu_sbi_snapshot_setup(pmu, smp_processor_id()); + if (ret) { + /* Snapshot is an optional feature. Continue if not available */ + pmu_sbi_snapshot_free(pmu); + } else { + pr_info("SBI PMU snapshot detected\n"); + /* + * We enable it once here for the boot cpu. If snapshot shmem setup + * fails during cpu hotplug process, it will fail to start the cpu + * as we can not handle hetergenous PMUs with different snapshot + * capability. + */ + static_branch_enable(&sbi_pmu_snapshot_available); + } + } + register_sysctl("kernel", sbi_pmu_sysctl_table); + ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); + if (ret) + goto out_unregister; + return 0; out_unregister: diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h index 43282e22ebe1..701974639ff2 100644 --- a/include/linux/perf/riscv_pmu.h +++ b/include/linux/perf/riscv_pmu.h @@ -39,6 +39,14 @@ struct cpu_hw_events { DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS); /* currently enabled firmware counters */ DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS); + /* The virtual address of the shared memory where counter snapshot will be taken */ + void *snapshot_addr; + /* The physical address of the shared memory where counter snapshot will be taken */ + phys_addr_t snapshot_addr_phys; + /* Boolean flag to indicate setup is already done */ + bool snapshot_set_done; + /* A shadow copy of the counter values to avoid clobbering during multiple SBI calls */ + u64 snapshot_cval_shcopy[RISCV_MAX_COUNTERS]; }; struct riscv_pmu { -- cgit From 55394d29c9e164f2e1991352f1dc8f973c4f1589 Mon Sep 17 00:00:00 2001 From: Dawid Osuchowski Date: Fri, 26 Apr 2024 09:58:54 +0200 Subject: fs: Create anon_inode_getfile_fmode() Creates an anon_inode_getfile_fmode() function that works similarly to anon_inode_getfile() with the addition of being able to set the fmode member. Signed-off-by: Dawid Osuchowski Link: https://lore.kernel.org/r/20240426075854.4723-1-linux@osuchow.ski Signed-off-by: Christian Brauner --- fs/anon_inodes.c | 33 +++++++++++++++++++++++++++++++++ include/linux/anon_inodes.h | 5 +++++ 2 files changed, 38 insertions(+) (limited to 'include/linux') diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 0496cb5b6eab..42bd1cb7c9cd 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c @@ -148,6 +148,38 @@ struct file *anon_inode_getfile(const char *name, } EXPORT_SYMBOL_GPL(anon_inode_getfile); +/** + * anon_inode_getfile_fmode - creates a new file instance by hooking it up to an + * anonymous inode, and a dentry that describe the "class" + * of the file + * + * @name: [in] name of the "class" of the new file + * @fops: [in] file operations for the new file + * @priv: [in] private data for the new file (will be file's private_data) + * @flags: [in] flags + * @f_mode: [in] fmode + * + * Creates a new file by hooking it on a single inode. This is useful for files + * that do not need to have a full-fledged inode in order to operate correctly. + * All the files created with anon_inode_getfile() will share a single inode, + * hence saving memory and avoiding code duplication for the file/inode/dentry + * setup. Allows setting the fmode. Returns the newly created file* or an error + * pointer. + */ +struct file *anon_inode_getfile_fmode(const char *name, + const struct file_operations *fops, + void *priv, int flags, fmode_t f_mode) +{ + struct file *file; + + file = __anon_inode_getfile(name, fops, priv, flags, NULL, false); + if (!IS_ERR(file)) + file->f_mode |= f_mode; + + return file; +} +EXPORT_SYMBOL_GPL(anon_inode_getfile_fmode); + /** * anon_inode_create_getfile - Like anon_inode_getfile(), but creates a new * !S_PRIVATE anon inode rather than reuse the @@ -271,6 +303,7 @@ int anon_inode_create_getfd(const char *name, const struct file_operations *fops return __anon_inode_getfd(name, fops, priv, flags, context_inode, true); } + static int __init anon_inode_init(void) { anon_inode_mnt = kern_mount(&anon_inode_fs_type); diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 93a5f16d03f3..edef565c2a1a 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h @@ -9,12 +9,17 @@ #ifndef _LINUX_ANON_INODES_H #define _LINUX_ANON_INODES_H +#include + struct file_operations; struct inode; struct file *anon_inode_getfile(const char *name, const struct file_operations *fops, void *priv, int flags); +struct file *anon_inode_getfile_fmode(const char *name, + const struct file_operations *fops, + void *priv, int flags, fmode_t f_mode); struct file *anon_inode_create_getfile(const char *name, const struct file_operations *fops, void *priv, int flags, -- cgit From d74169ceb0d2e32438946a2f1f9fc8c803304bd6 Mon Sep 17 00:00:00 2001 From: Dimitri Sivanich Date: Wed, 24 Apr 2024 15:16:29 +0800 Subject: iommu/vt-d: Allocate DMAR fault interrupts locally The Intel IOMMU code currently tries to allocate all DMAR fault interrupt vectors on the boot cpu. On large systems with high DMAR counts this results in vector exhaustion, and most of the vectors are not initially allocated socket local. Instead, have a cpu on each node do the vector allocation for the DMARs on that node. The boot cpu still does the allocation for its node during its boot sequence. Signed-off-by: Dimitri Sivanich Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/Zfydpp2Hm+as16TY@hpe.com Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/amd/amd_iommu.h | 2 +- drivers/iommu/amd/init.c | 2 +- drivers/iommu/intel/dmar.c | 9 +++++++-- drivers/iommu/irq_remapping.c | 5 ++++- drivers/iommu/irq_remapping.h | 2 +- include/linux/dmar.h | 2 +- 6 files changed, 15 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index f482aab420f7..410c360e7e24 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -33,7 +33,7 @@ int amd_iommu_prepare(void); int amd_iommu_enable(void); void amd_iommu_disable(void); int amd_iommu_reenable(int mode); -int amd_iommu_enable_faulting(void); +int amd_iommu_enable_faulting(unsigned int cpu); extern int amd_iommu_guest_ir; extern enum io_pgtable_fmt amd_iommu_pgtable; extern int amd_iommu_gpt_level; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index ac6754a85f35..8085e13e0100 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3392,7 +3392,7 @@ int amd_iommu_reenable(int mode) return 0; } -int __init amd_iommu_enable_faulting(void) +int __init amd_iommu_enable_faulting(unsigned int cpu) { /* We enable MSI later when PCI is initialized */ return 0; diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 351be9455214..932e0c10c0fe 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -2121,7 +2121,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu) return ret; } -int __init enable_drhd_fault_handling(void) +int enable_drhd_fault_handling(unsigned int cpu) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; @@ -2131,7 +2131,12 @@ int __init enable_drhd_fault_handling(void) */ for_each_iommu(iommu, drhd) { u32 fault_status; - int ret = dmar_set_interrupt(iommu); + int ret; + + if (iommu->irq || iommu->node != cpu_to_node(cpu)) + continue; + + ret = dmar_set_interrupt(iommu); if (ret) { pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index ee59647c2050..2f7281ccc05f 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -151,7 +151,10 @@ int __init irq_remap_enable_fault_handling(void) if (!remap_ops->enable_faulting) return -ENODEV; - return remap_ops->enable_faulting(); + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dmar:enable_fault_handling", + remap_ops->enable_faulting, NULL); + + return remap_ops->enable_faulting(smp_processor_id()); } void panic_if_irq_remap(const char *msg) diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index 8c89cb947cdb..0d6f140b5e01 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h @@ -41,7 +41,7 @@ struct irq_remap_ops { int (*reenable)(int); /* Enable fault handling */ - int (*enable_faulting)(void); + int (*enable_faulting)(unsigned int); }; extern struct irq_remap_ops intel_irq_remap_ops; diff --git a/include/linux/dmar.h b/include/linux/dmar.h index e34b601b71fd..499bb2c63483 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -117,7 +117,7 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, int count); /* Intel IOMMU detection */ void detect_intel_iommu(void); -extern int enable_drhd_fault_handling(void); +extern int enable_drhd_fault_handling(unsigned int cpu); extern int dmar_device_add(acpi_handle handle); extern int dmar_device_remove(acpi_handle handle); -- cgit From 621b7e54f288c5e4e32d1dd81a926b8ecb547c60 Mon Sep 17 00:00:00 2001 From: Jingqi Liu Date: Wed, 24 Apr 2024 15:16:31 +0800 Subject: iommu/vt-d: Remove private data use in fault message According to Intel VT-d specification revision 4.0, "Private Data" field has been removed from Page Request/Response. Since the private data field is not used in fault message, remove the related definitions in page request descriptor and remove the related code in page request/response handler, as Intel hasn't shipped any products which support private data in the page request message. Signed-off-by: Jingqi Liu Link: https://lore.kernel.org/r/20240308103811.76744-3-Jingqi.liu@intel.com Signed-off-by: Lu Baolu Signed-off-by: Joerg Roedel --- drivers/iommu/intel/iommu.h | 1 - drivers/iommu/intel/svm.c | 75 +++++++++------------------------------------ include/linux/iommu.h | 3 +- 3 files changed, 16 insertions(+), 63 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h index 404d2476a877..9ee326f7bf62 100644 --- a/drivers/iommu/intel/iommu.h +++ b/drivers/iommu/intel/iommu.h @@ -455,7 +455,6 @@ enum { /* Page group response descriptor QW0 */ #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) -#define QI_PGRP_PDP(p) (((u64)(p)) << 5) #define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) #define QI_PGRP_DID(rid) (((u64)(rid)) << 16) #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index e014350db354..e05c6c4cb8c3 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -418,8 +418,7 @@ struct page_req_dsc { struct { u64 type:8; u64 pasid_present:1; - u64 priv_data_present:1; - u64 rsvd:6; + u64 rsvd:7; u64 rid:16; u64 pasid:20; u64 exe_req:1; @@ -438,7 +437,8 @@ struct page_req_dsc { }; u64 qw_1; }; - u64 priv_data[2]; + u64 qw_2; + u64 qw_3; }; static bool is_canonical_address(u64 addr) @@ -572,18 +572,6 @@ static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; } - if (desc->priv_data_present) { - /* - * Set last page in group bit if private data is present, - * page response is required as it does for LPIG. - * iommu_report_device_fault() doesn't understand this vendor - * specific requirement thus we set last_page as a workaround. - */ - event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; - event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; - event.fault.prm.private_data[0] = desc->priv_data[0]; - event.fault.prm.private_data[1] = desc->priv_data[1]; - } iommu_report_device_fault(dev, &event); } @@ -591,39 +579,23 @@ static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev, static void handle_bad_prq_event(struct intel_iommu *iommu, struct page_req_dsc *req, int result) { - struct qi_desc desc; + struct qi_desc desc = { }; pr_err("%s: Invalid page request: %08llx %08llx\n", iommu->name, ((unsigned long long *)req)[0], ((unsigned long long *)req)[1]); - /* - * Per VT-d spec. v3.0 ch7.7, system software must - * respond with page group response if private data - * is present (PDP) or last page in group (LPIG) bit - * is set. This is an additional VT-d feature beyond - * PCI ATS spec. - */ - if (!req->lpig && !req->priv_data_present) + if (!req->lpig) return; desc.qw0 = QI_PGRP_PASID(req->pasid) | QI_PGRP_DID(req->rid) | QI_PGRP_PASID_P(req->pasid_present) | - QI_PGRP_PDP(req->priv_data_present) | QI_PGRP_RESP_CODE(result) | QI_PGRP_RESP_TYPE; desc.qw1 = QI_PGRP_IDX(req->prg_index) | QI_PGRP_LPIG(req->lpig); - if (req->priv_data_present) { - desc.qw2 = req->priv_data[0]; - desc.qw3 = req->priv_data[1]; - } else { - desc.qw2 = 0; - desc.qw3 = 0; - } - qi_submit_sync(iommu, &desc, 1, 0); } @@ -691,7 +663,7 @@ bad_req: intel_svm_prq_report(iommu, dev, req); trace_prq_report(iommu, dev, req->qw_0, req->qw_1, - req->priv_data[0], req->priv_data[1], + req->qw_2, req->qw_3, iommu->prq_seq_number++); mutex_unlock(&iommu->iopf_lock); prq_advance: @@ -730,7 +702,7 @@ void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, struct intel_iommu *iommu = info->iommu; u8 bus = info->bus, devfn = info->devfn; struct iommu_fault_page_request *prm; - bool private_present; + struct qi_desc desc; bool pasid_present; bool last_page; u16 sid; @@ -738,34 +710,17 @@ void intel_svm_page_response(struct device *dev, struct iopf_fault *evt, prm = &evt->fault.prm; sid = PCI_DEVID(bus, devfn); pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; - private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; - /* - * Per VT-d spec. v3.0 ch7.7, system software must respond - * with page group response if private data is present (PDP) - * or last page in group (LPIG) bit is set. This is an - * additional VT-d requirement beyond PCI ATS spec. - */ - if (last_page || private_present) { - struct qi_desc desc; - - desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | - QI_PGRP_PASID_P(pasid_present) | - QI_PGRP_PDP(private_present) | - QI_PGRP_RESP_CODE(msg->code) | - QI_PGRP_RESP_TYPE; - desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); - desc.qw2 = 0; - desc.qw3 = 0; - - if (private_present) { - desc.qw2 = prm->private_data[0]; - desc.qw3 = prm->private_data[1]; - } + desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | + QI_PGRP_PASID_P(pasid_present) | + QI_PGRP_RESP_CODE(msg->code) | + QI_PGRP_RESP_TYPE; + desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); + desc.qw2 = 0; + desc.qw3 = 0; - qi_submit_sync(iommu, &desc, 1, 0); - } + qi_submit_sync(iommu, &desc, 1, 0); } static void intel_svm_domain_free(struct iommu_domain *domain) diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2e925b5eba53..e6549bdfaed9 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -69,8 +69,7 @@ enum iommu_fault_type { struct iommu_fault_page_request { #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) -#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2) -#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3) +#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 2) u32 flags; u32 pasid; u32 grpid; -- cgit From 65442507026a79e2de8014880e6ba376b8b44584 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Wed, 24 Apr 2024 15:16:43 +0800 Subject: iommu: Add ops->domain_alloc_sva() Make a new op that receives the device and the mm_struct that the SVA domain should be created for. Unlike domain_alloc_paging() the dev argument is never NULL here. This allows drivers to fully initialize the SVA domain and allocate the mmu_notifier during allocation. It allows the notifier lifetime to follow the lifetime of the iommu_domain. Since we have only one call site, upgrade the new op to return ERR_PTR instead of NULL. Signed-off-by: Jason Gunthorpe Signed-off-by: Vasant Hegde Reviewed-by: Tina Zhang Link: https://lore.kernel.org/r/20240311090843.133455-15-vasant.hegde@amd.com Signed-off-by: Lu Baolu Reviewed-by: Kevin Tian Link: https://lore.kernel.org/r/20240416080656.60968-12-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu-sva.c | 16 +++++++++++----- include/linux/iommu.h | 3 +++ 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index 640acc804e8c..18a35e798b72 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -108,8 +108,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm /* Allocate a new domain and set it on device pasid. */ domain = iommu_sva_domain_alloc(dev, mm); - if (!domain) { - ret = -ENOMEM; + if (IS_ERR(domain)) { + ret = PTR_ERR(domain); goto out_free_handle; } @@ -283,9 +283,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; - domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); - if (!domain) - return NULL; + if (ops->domain_alloc_sva) { + domain = ops->domain_alloc_sva(dev, mm); + if (IS_ERR(domain)) + return domain; + } else { + domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); + if (!domain) + return ERR_PTR(-ENOMEM); + } domain->type = IOMMU_DOMAIN_SVA; mmgrab(mm); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e6549bdfaed9..f4cdffa985e4 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -517,6 +517,7 @@ static inline int __iommu_copy_struct_from_user_array( * Upon failure, ERR_PTR must be returned. * @domain_alloc_paging: Allocate an iommu_domain that can be used for * UNMANAGED, DMA, and DMA_FQ domain types. + * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -557,6 +558,8 @@ struct iommu_ops { struct device *dev, u32 flags, struct iommu_domain *parent, const struct iommu_user_data *user_data); struct iommu_domain *(*domain_alloc_paging)(struct device *dev); + struct iommu_domain *(*domain_alloc_sva)(struct device *dev, + struct mm_struct *mm); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); -- cgit From 91cfd679f9e8b9a7bf2f26adf66eff99dbe2026b Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 19 Apr 2024 17:54:42 +0100 Subject: ACPI/IORT: Handle memory address size limits as limits Return the Root Complex/Named Component memory address size limit as an inclusive limit value, rather than an exclusive size. This saves having to fudge an off-by-one for the 64-bit case, and simplifies our caller. Acked-by: Hanjun Guo Reviewed-by: Jason Gunthorpe Tested-by: Hanjun Guo Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/284ae9fbadb12f2e3b5a30cd4d037d0e6843a8f4.1713523152.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- drivers/acpi/arm64/dma.c | 9 +++------ drivers/acpi/arm64/iort.c | 20 ++++++++++---------- include/linux/acpi_iort.h | 4 ++-- 3 files changed, 15 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c index 93d796531af3..b98a149f8d50 100644 --- a/drivers/acpi/arm64/dma.c +++ b/drivers/acpi/arm64/dma.c @@ -8,7 +8,6 @@ void acpi_arch_dma_setup(struct device *dev) { int ret; u64 end, mask; - u64 size = 0; const struct bus_dma_region *map = NULL; /* @@ -23,9 +22,9 @@ void acpi_arch_dma_setup(struct device *dev) } if (dev->coherent_dma_mask) - size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1); + end = dev->coherent_dma_mask; else - size = 1ULL << 32; + end = (1ULL << 32) - 1; ret = acpi_dma_get_range(dev, &map); if (!ret && map) { @@ -36,18 +35,16 @@ void acpi_arch_dma_setup(struct device *dev) end = r->dma_start + r->size - 1; } - size = end + 1; dev->dma_range_map = map; } if (ret == -ENODEV) - ret = iort_dma_get_ranges(dev, &size); + ret = iort_dma_get_ranges(dev, &end); if (!ret) { /* * Limit coherent and dma mask based on size retrieved from * firmware. */ - end = size - 1; mask = DMA_BIT_MASK(ilog2(end) + 1); dev->bus_dma_limit = end; dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 6496ff5a6ba2..c0b1c2c19444 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1367,7 +1367,7 @@ int iort_iommu_configure_id(struct device *dev, const u32 *input_id) { return -ENODEV; } #endif -static int nc_dma_get_range(struct device *dev, u64 *size) +static int nc_dma_get_range(struct device *dev, u64 *limit) { struct acpi_iort_node *node; struct acpi_iort_named_component *ncomp; @@ -1384,13 +1384,13 @@ static int nc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } - *size = ncomp->memory_address_limit >= 64 ? U64_MAX : - 1ULL<memory_address_limit; + *limit = ncomp->memory_address_limit >= 64 ? U64_MAX : + (1ULL << ncomp->memory_address_limit) - 1; return 0; } -static int rc_dma_get_range(struct device *dev, u64 *size) +static int rc_dma_get_range(struct device *dev, u64 *limit) { struct acpi_iort_node *node; struct acpi_iort_root_complex *rc; @@ -1408,8 +1408,8 @@ static int rc_dma_get_range(struct device *dev, u64 *size) return -EINVAL; } - *size = rc->memory_address_limit >= 64 ? U64_MAX : - 1ULL<memory_address_limit; + *limit = rc->memory_address_limit >= 64 ? U64_MAX : + (1ULL << rc->memory_address_limit) - 1; return 0; } @@ -1417,16 +1417,16 @@ static int rc_dma_get_range(struct device *dev, u64 *size) /** * iort_dma_get_ranges() - Look up DMA addressing limit for the device * @dev: device to lookup - * @size: DMA range size result pointer + * @limit: DMA limit result pointer * * Return: 0 on success, an error otherwise. */ -int iort_dma_get_ranges(struct device *dev, u64 *size) +int iort_dma_get_ranges(struct device *dev, u64 *limit) { if (dev_is_pci(dev)) - return rc_dma_get_range(dev, size); + return rc_dma_get_range(dev, limit); else - return nc_dma_get_range(dev, size); + return nc_dma_get_range(dev, limit); } static void __init acpi_iort_register_irq(int hwirq, const char *name, diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h index 1cb65592c95d..d4ed5622cf2b 100644 --- a/include/linux/acpi_iort.h +++ b/include/linux/acpi_iort.h @@ -39,7 +39,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head); /* IOMMU interface */ -int iort_dma_get_ranges(struct device *dev, u64 *size); +int iort_dma_get_ranges(struct device *dev, u64 *limit); int iort_iommu_configure_id(struct device *dev, const u32 *id_in); void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head); phys_addr_t acpi_iort_dma_get_max_cpu_address(void); @@ -55,7 +55,7 @@ void iort_get_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *hea static inline void iort_put_rmr_sids(struct fwnode_handle *iommu_fwnode, struct list_head *head) { } /* IOMMU interface */ -static inline int iort_dma_get_ranges(struct device *dev, u64 *size) +static inline int iort_dma_get_ranges(struct device *dev, u64 *limit) { return -ENODEV; } static inline int iort_iommu_configure_id(struct device *dev, const u32 *id_in) { return -ENODEV; } -- cgit From fece6530bf4b59b01a476a12851e07751e73d69f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 19 Apr 2024 17:54:43 +0100 Subject: dma-mapping: Add helpers for dma_range_map bounds Several places want to compute the lower and/or upper bounds of a dma_range_map, so let's factor that out into reusable helpers. Acked-by: Rob Herring Reviewed-by: Christoph Hellwig Reviewed-by: Hanjun Guo # For arm64 Reviewed-by: Jason Gunthorpe Tested-by: Hanjun Guo Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/45ec52f033ec4dfb364e23f48abaf787f612fa53.1713523152.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- arch/loongarch/kernel/dma.c | 9 ++------- drivers/acpi/arm64/dma.c | 8 +------- drivers/of/device.c | 11 ++--------- include/linux/dma-direct.h | 18 ++++++++++++++++++ 4 files changed, 23 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c index 7a9c6a9dd2d0..429555fb4e13 100644 --- a/arch/loongarch/kernel/dma.c +++ b/arch/loongarch/kernel/dma.c @@ -8,17 +8,12 @@ void acpi_arch_dma_setup(struct device *dev) { int ret; - u64 mask, end = 0; + u64 mask, end; const struct bus_dma_region *map = NULL; ret = acpi_dma_get_range(dev, &map); if (!ret && map) { - const struct bus_dma_region *r = map; - - for (end = 0; r->size; r++) { - if (r->dma_start + r->size - 1 > end) - end = r->dma_start + r->size - 1; - } + end = dma_range_map_max(map); mask = DMA_BIT_MASK(ilog2(end) + 1); dev->bus_dma_limit = end; diff --git a/drivers/acpi/arm64/dma.c b/drivers/acpi/arm64/dma.c index b98a149f8d50..52b2abf88689 100644 --- a/drivers/acpi/arm64/dma.c +++ b/drivers/acpi/arm64/dma.c @@ -28,13 +28,7 @@ void acpi_arch_dma_setup(struct device *dev) ret = acpi_dma_get_range(dev, &map); if (!ret && map) { - const struct bus_dma_region *r = map; - - for (end = 0; r->size; r++) { - if (r->dma_start + r->size - 1 > end) - end = r->dma_start + r->size - 1; - } - + end = dma_range_map_max(map); dev->dma_range_map = map; } diff --git a/drivers/of/device.c b/drivers/of/device.c index 841ccd3a19d1..9e7963972fa7 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -117,16 +117,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, if (!force_dma) return ret == -ENODEV ? 0 : ret; } else { - const struct bus_dma_region *r = map; - /* Determine the overall bounds of all DMA regions */ - for (dma_start = ~0; r->size; r++) { - /* Take lower and upper limits */ - if (r->dma_start < dma_start) - dma_start = r->dma_start; - if (r->dma_start + r->size > end) - end = r->dma_start + r->size; - } + dma_start = dma_range_map_min(map); + end = dma_range_map_max(map); } /* diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 3eb3589ff43e..edbe13d00776 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -54,6 +54,24 @@ static inline phys_addr_t translate_dma_to_phys(struct device *dev, return (phys_addr_t)-1; } +static inline dma_addr_t dma_range_map_min(const struct bus_dma_region *map) +{ + dma_addr_t ret = (dma_addr_t)U64_MAX; + + for (; map->size; map++) + ret = min(ret, map->dma_start); + return ret; +} + +static inline dma_addr_t dma_range_map_max(const struct bus_dma_region *map) +{ + dma_addr_t ret = 0; + + for (; map->size; map++) + ret = max(ret, map->dma_start + map->size - 1); + return ret; +} + #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #include #ifndef phys_to_dma_unencrypted -- cgit From b67483b3c44eaef2f771fa4c712e13f452675a67 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 19 Apr 2024 17:54:45 +0100 Subject: iommu/dma: Centralise iommu_setup_dma_ops() It's somewhat hard to see, but arm64's arch_setup_dma_ops() should only ever call iommu_setup_dma_ops() after a successful iommu_probe_device(), which means there should be no harm in achieving the same order of operations by running it off the back of iommu_probe_device() itself. This then puts it in line with the x86 and s390 .probe_finalize bodges, letting us pull it all into the main flow properly. As a bonus this lets us fold in and de-scope the PCI workaround setup as well. At this point we can also then pull the call up inside the group mutex, and avoid having to think about whether iommu_group_store_type() could theoretically race and free the domain if iommu_setup_dma_ops() ran just *before* iommu_device_use_default_domain() claims it... Furthermore we replace one .probe_finalize call completely, since the only remaining implementations are now one which only needs to run once for the initial boot-time probe, and two which themselves render that path unreachable. This leaves us a big step closer to realistically being able to unpick the variety of different things that iommu_setup_dma_ops() has been muddling together, and further streamline iommu-dma into core API flows in future. Reviewed-by: Lu Baolu # For Intel IOMMU Reviewed-by: Jason Gunthorpe Tested-by: Hanjun Guo Signed-off-by: Robin Murphy Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/bebea331c1d688b34d9862eefd5ede47503961b8.1713523152.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- arch/arm64/mm/dma-mapping.c | 2 -- drivers/iommu/amd/iommu.c | 8 -------- drivers/iommu/dma-iommu.c | 18 ++++++------------ drivers/iommu/dma-iommu.h | 14 ++++++-------- drivers/iommu/intel/iommu.c | 7 ------- drivers/iommu/iommu.c | 20 +++++++------------- drivers/iommu/s390-iommu.c | 6 ------ drivers/iommu/virtio-iommu.c | 10 ---------- include/linux/iommu.h | 7 ------- 9 files changed, 19 insertions(+), 73 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 61886e43e3a1..313d8938a2f0 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -58,8 +58,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ARCH_DMA_MINALIGN, cls); dev->dma_coherent = coherent; - if (device_iommu_mapped(dev)) - iommu_setup_dma_ops(dev, dma_base, dma_base + size - 1); xen_setup_dma_ops(dev); } diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index d35c1b8c8e65..085abf098fa9 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2175,13 +2175,6 @@ static struct iommu_device *amd_iommu_probe_device(struct device *dev) return iommu_dev; } -static void amd_iommu_probe_finalize(struct device *dev) -{ - /* Domains are initialized for this device - have a look what we ended up with */ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); -} - static void amd_iommu_release_device(struct device *dev) { struct amd_iommu *iommu; @@ -2784,7 +2777,6 @@ const struct iommu_ops amd_iommu_ops = { .domain_alloc_user = amd_iommu_domain_alloc_user, .probe_device = amd_iommu_probe_device, .release_device = amd_iommu_release_device, - .probe_finalize = amd_iommu_probe_finalize, .device_group = amd_iommu_device_group, .get_resv_regions = amd_iommu_get_resv_regions, .is_attach_deferred = amd_iommu_is_attach_deferred, diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index c827ed690f1f..bc1490bd4f7a 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1741,25 +1741,20 @@ static const struct dma_map_ops iommu_dma_ops = { .max_mapping_size = iommu_dma_max_mapping_size, }; -/* - * The IOMMU core code allocates the default DMA domain, which the underlying - * IOMMU driver needs to support via the dma-iommu layer. - */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) +void iommu_setup_dma_ops(struct device *dev) { struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - if (!domain) - goto out_err; + if (dev_is_pci(dev)) + dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; - /* - * The IOMMU core code allocates the default DMA domain, which the - * underlying IOMMU driver needs to support via the dma-iommu layer. - */ if (iommu_is_dma_domain(domain)) { if (iommu_dma_init_domain(domain, dev)) goto out_err; dev->dma_ops = &iommu_dma_ops; + } else if (dev->dma_ops == &iommu_dma_ops) { + /* Clean up if we've switched *from* a DMA domain */ + dev->dma_ops = NULL; } return; @@ -1767,7 +1762,6 @@ out_err: pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n", dev_name(dev)); } -EXPORT_SYMBOL_GPL(iommu_setup_dma_ops); static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, phys_addr_t msi_addr, struct iommu_domain *domain) diff --git a/drivers/iommu/dma-iommu.h b/drivers/iommu/dma-iommu.h index c829f1f82a99..c12d63457c76 100644 --- a/drivers/iommu/dma-iommu.h +++ b/drivers/iommu/dma-iommu.h @@ -9,6 +9,8 @@ #ifdef CONFIG_IOMMU_DMA +void iommu_setup_dma_ops(struct device *dev); + int iommu_get_dma_cookie(struct iommu_domain *domain); void iommu_put_dma_cookie(struct iommu_domain *domain); @@ -17,13 +19,13 @@ int iommu_dma_init_fq(struct iommu_domain *domain); void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); extern bool iommu_dma_forcedac; -static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) -{ - dev->iommu->pci_32bit_workaround = !iommu_dma_forcedac; -} #else /* CONFIG_IOMMU_DMA */ +static inline void iommu_setup_dma_ops(struct device *dev) +{ +} + static inline int iommu_dma_init_fq(struct iommu_domain *domain) { return -EINVAL; @@ -42,9 +44,5 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } -static inline void iommu_dma_set_pci_32bit_workaround(struct device *dev) -{ -} - #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 45c75a8a0ef5..64adfdadeb49 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4349,12 +4349,6 @@ static void intel_iommu_release_device(struct device *dev) set_dma_ops(dev, NULL); } -static void intel_iommu_probe_finalize(struct device *dev) -{ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); -} - static void intel_iommu_get_resv_regions(struct device *device, struct list_head *head) { @@ -4834,7 +4828,6 @@ const struct iommu_ops intel_iommu_ops = { .domain_alloc = intel_iommu_domain_alloc, .domain_alloc_user = intel_iommu_domain_alloc_user, .probe_device = intel_iommu_probe_device, - .probe_finalize = intel_iommu_probe_finalize, .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .device_group = intel_iommu_device_group, diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3183b0ed4cdb..9df7cc75c1bc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -581,10 +581,11 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list if (list_empty(&group->entry)) list_add_tail(&group->entry, group_list); } - mutex_unlock(&group->mutex); - if (dev_is_pci(dev)) - iommu_dma_set_pci_32bit_workaround(dev); + if (group->default_domain) + iommu_setup_dma_ops(dev); + + mutex_unlock(&group->mutex); return 0; @@ -1828,6 +1829,8 @@ int bus_iommu_probe(const struct bus_type *bus) mutex_unlock(&group->mutex); return ret; } + for_each_group_device(group, gdev) + iommu_setup_dma_ops(gdev->dev); mutex_unlock(&group->mutex); /* @@ -3066,18 +3069,9 @@ static ssize_t iommu_group_store_type(struct iommu_group *group, if (ret) goto out_unlock; - /* - * Release the mutex here because ops->probe_finalize() call-back of - * some vendor IOMMU drivers calls arm_iommu_attach_device() which - * in-turn might call back into IOMMU core code, where it tries to take - * group->mutex, resulting in a deadlock. - */ - mutex_unlock(&group->mutex); - /* Make sure dma_ops is appropriatley set */ for_each_group_device(group, gdev) - iommu_group_do_probe_finalize(gdev->dev); - return count; + iommu_setup_dma_ops(gdev->dev); out_unlock: mutex_unlock(&group->mutex); diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 9a5196f523de..d8eaa7ea380b 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -695,11 +695,6 @@ static size_t s390_iommu_unmap_pages(struct iommu_domain *domain, return size; } -static void s390_iommu_probe_finalize(struct device *dev) -{ - iommu_setup_dma_ops(dev, 0, U64_MAX); -} - struct zpci_iommu_ctrs *zpci_get_iommu_ctrs(struct zpci_dev *zdev) { if (!zdev || !zdev->s390_domain) @@ -785,7 +780,6 @@ static const struct iommu_ops s390_iommu_ops = { .capable = s390_iommu_capable, .domain_alloc_paging = s390_domain_alloc_paging, .probe_device = s390_iommu_probe_device, - .probe_finalize = s390_iommu_probe_finalize, .release_device = s390_iommu_release_device, .device_group = generic_device_group, .pgsize_bitmap = SZ_4K, diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index 04048f64a2c0..8e776f6c6e35 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1025,15 +1025,6 @@ err_free_dev: return ERR_PTR(ret); } -static void viommu_probe_finalize(struct device *dev) -{ -#ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS - /* First clear the DMA ops in case we're switching from a DMA domain */ - set_dma_ops(dev, NULL); - iommu_setup_dma_ops(dev, 0, U64_MAX); -#endif -} - static void viommu_release_device(struct device *dev) { struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); @@ -1073,7 +1064,6 @@ static struct iommu_ops viommu_ops = { .capable = viommu_capable, .domain_alloc = viommu_domain_alloc, .probe_device = viommu_probe_device, - .probe_finalize = viommu_probe_finalize, .release_device = viommu_release_device, .device_group = viommu_device_group, .get_resv_regions = viommu_get_resv_regions, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 40dd439307e8..86f0bff3cc95 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -1446,9 +1446,6 @@ static inline void iommu_debugfs_setup(void) {} #ifdef CONFIG_IOMMU_DMA #include -/* Setup call for arch DMA mapping code */ -void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); - int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base); int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr); @@ -1459,10 +1456,6 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, struct msi_msg *msg); struct msi_desc; struct msi_msg; -static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit) -{ -} - static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base) { return -ENODEV; -- cgit From f091e93306e0429ebb7589b9874590b6a9705e64 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Fri, 19 Apr 2024 17:54:46 +0100 Subject: dma-mapping: Simplify arch_setup_dma_ops() The dma_base, size and iommu arguments are only used by ARM, and can now easily be deduced from the device itself, so there's no need to pass them through the callchain as well. Acked-by: Rob Herring Reviewed-by: Christoph Hellwig Reviewed-by: Michael Kelley # For Hyper-V Reviewed-by: Jason Gunthorpe Tested-by: Hanjun Guo Signed-off-by: Robin Murphy Acked-by: Catalin Marinas Link: https://lore.kernel.org/r/5291c2326eab405b1aa7693aa964e8d3cb7193de.1713523152.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel --- arch/arc/mm/dma.c | 3 +-- arch/arm/mm/dma-mapping-nommu.c | 3 +-- arch/arm/mm/dma-mapping.c | 16 +++++++++------- arch/arm64/mm/dma-mapping.c | 3 +-- arch/mips/mm/dma-noncoherent.c | 3 +-- arch/riscv/mm/dma-noncoherent.c | 3 +-- drivers/acpi/scan.c | 7 +------ drivers/hv/hv_common.c | 6 +----- drivers/of/device.c | 4 +--- include/linux/dma-map-ops.h | 6 ++---- 10 files changed, 19 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 197707bc7658..6b85e94f3275 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c @@ -90,8 +90,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, /* * Plug in direct dma map ops. */ -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { /* * IOC hardware snoops all DMA traffic keeping the caches consistent diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index b94850b57995..97db5397c320 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -33,8 +33,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, } } -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { if (IS_ENABLED(CONFIG_CPU_V7M)) { /* diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f68db05eba29..5adf1769eee4 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1709,11 +1709,15 @@ void arm_iommu_detach_device(struct device *dev) } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); -static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +static void arm_setup_iommu_dma_ops(struct device *dev) { struct dma_iommu_mapping *mapping; + u64 dma_base = 0, size = 1ULL << 32; + if (dev->dma_range_map) { + dma_base = dma_range_map_min(dev->dma_range_map); + size = dma_range_map_max(dev->dma_range_map) - dma_base; + } mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); if (IS_ERR(mapping)) { pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", @@ -1744,8 +1748,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) #else -static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +static void arm_setup_iommu_dma_ops(struct device *dev) { } @@ -1753,8 +1756,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } #endif /* CONFIG_ARM_DMA_USE_IOMMU */ -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { /* * Due to legacy code that sets the ->dma_coherent flag from a bus @@ -1774,7 +1776,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, return; if (device_iommu_mapped(dev)) - arm_setup_iommu_dma_ops(dev, dma_base, size, coherent); + arm_setup_iommu_dma_ops(dev); xen_setup_dma_ops(dev); dev->archdata.dma_ops_setup = true; diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 313d8938a2f0..0b320a25a471 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -46,8 +46,7 @@ void arch_teardown_dma_ops(struct device *dev) } #endif -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { int cls = cache_line_size_of_cpu(); diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index 0f3cec663a12..ab4f2a75a7d0 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c @@ -137,8 +137,7 @@ void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, #endif #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { dev->dma_coherent = coherent; } diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index 843107f834b2..cb89d7e0ba88 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -128,8 +128,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size); } -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent) +void arch_setup_dma_ops(struct device *dev, bool coherent) { WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, TAINT_CPU_OUT_OF_SPEC, diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 7c157bf92695..b1a88992c1a9 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1675,12 +1675,7 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr, if (ret == -EPROBE_DEFER) return -EPROBE_DEFER; - /* - * Historically this routine doesn't fail driver probing due to errors - * in acpi_iommu_configure_id() - */ - - arch_setup_dma_ops(dev, 0, U64_MAX, attr == DEV_DMA_COHERENT); + arch_setup_dma_ops(dev, attr == DEV_DMA_COHERENT); return 0; } diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c index dde3f9b6871a..9c452bfbd571 100644 --- a/drivers/hv/hv_common.c +++ b/drivers/hv/hv_common.c @@ -561,11 +561,7 @@ EXPORT_SYMBOL_GPL(hv_query_ext_cap); void hv_setup_dma_ops(struct device *dev, bool coherent) { - /* - * Hyper-V does not offer a vIOMMU in the guest - * VM, so pass 0/NULL for the IOMMU settings - */ - arch_setup_dma_ops(dev, 0, 0, coherent); + arch_setup_dma_ops(dev, coherent); } EXPORT_SYMBOL_GPL(hv_setup_dma_ops); diff --git a/drivers/of/device.c b/drivers/of/device.c index 9e7963972fa7..312c63361211 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -95,7 +95,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, { const struct bus_dma_region *map = NULL; struct device_node *bus_np; - u64 dma_start = 0; u64 mask, end = 0; bool coherent; int iommu_ret; @@ -118,7 +117,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, return ret == -ENODEV ? 0 : ret; } else { /* Determine the overall bounds of all DMA regions */ - dma_start = dma_range_map_min(map); end = dma_range_map_max(map); } @@ -175,7 +173,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, } else dev_dbg(dev, "device is behind an iommu\n"); - arch_setup_dma_ops(dev, dma_start, end - dma_start + 1, coherent); + arch_setup_dma_ops(dev, coherent); if (iommu_ret) of_dma_set_restricted_buffer(dev, np); diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 4abc60f04209..ed89e1ce0114 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -426,11 +426,9 @@ bool arch_dma_unmap_sg_direct(struct device *dev, struct scatterlist *sg, #endif #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS -void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - bool coherent); +void arch_setup_dma_ops(struct device *dev, bool coherent); #else -static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, - u64 size, bool coherent) +static inline void arch_setup_dma_ops(struct device *dev, bool coherent) { } #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ -- cgit From 80af5a45202422db957549a241e00bf4d4e0ce89 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 18 Apr 2024 10:33:59 +0000 Subject: iommu: Add ops->domain_alloc_sva() Make a new op that receives the device and the mm_struct that the SVA domain should be created for. Unlike domain_alloc_paging() the dev argument is never NULL here. This allows drivers to fully initialize the SVA domain and allocate the mmu_notifier during allocation. It allows the notifier lifetime to follow the lifetime of the iommu_domain. Since we have only one call site, upgrade the new op to return ERR_PTR instead of NULL. Signed-off-by: Jason Gunthorpe [Removed smmu3 related changes - Vasant] Signed-off-by: Vasant Hegde Reviewed-by: Tina Zhang Link: https://lore.kernel.org/r/20240418103400.6229-15-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu-sva.c | 16 +++++++++++----- include/linux/iommu.h | 3 +++ 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index 640acc804e8c..18a35e798b72 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -108,8 +108,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm /* Allocate a new domain and set it on device pasid. */ domain = iommu_sva_domain_alloc(dev, mm); - if (!domain) { - ret = -ENOMEM; + if (IS_ERR(domain)) { + ret = PTR_ERR(domain); goto out_free_handle; } @@ -283,9 +283,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, const struct iommu_ops *ops = dev_iommu_ops(dev); struct iommu_domain *domain; - domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); - if (!domain) - return NULL; + if (ops->domain_alloc_sva) { + domain = ops->domain_alloc_sva(dev, mm); + if (IS_ERR(domain)) + return domain; + } else { + domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); + if (!domain) + return ERR_PTR(-ENOMEM); + } domain->type = IOMMU_DOMAIN_SVA; mmgrab(mm); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 2e925b5eba53..8aabe83af8f2 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -518,6 +518,7 @@ static inline int __iommu_copy_struct_from_user_array( * Upon failure, ERR_PTR must be returned. * @domain_alloc_paging: Allocate an iommu_domain that can be used for * UNMANAGED, DMA, and DMA_FQ domain types. + * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -558,6 +559,8 @@ struct iommu_ops { struct device *dev, u32 flags, struct iommu_domain *parent, const struct iommu_user_data *user_data); struct iommu_domain *(*domain_alloc_paging)(struct device *dev); + struct iommu_domain *(*domain_alloc_sva)(struct device *dev, + struct mm_struct *mm); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); -- cgit From 96a8326d69fffd7e8c2eb737dc060d8009a33b39 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Wed, 24 Apr 2024 12:40:45 +0200 Subject: virtio: add debugfs infrastructure to allow to debug virtio features Currently there is no way for user to set what features the driver should obey or not, it is hard wired in the code. In order to be able to debug the device behavior in case some feature is disabled, introduce a debugfs infrastructure with couple of files allowing user to see what features the device advertises and to set filter for features used by driver. Example: $cat /sys/bus/virtio/devices/virtio0/features 1110010111111111111101010000110010000000100000000000000000000000 $ echo "5" >/sys/kernel/debug/virtio/virtio0/filter_feature_add $ cat /sys/kernel/debug/virtio/virtio0/filter_features 5 $ echo "virtio0" > /sys/bus/virtio/drivers/virtio_net/unbind $ echo "virtio0" > /sys/bus/virtio/drivers/virtio_net/bind $ cat /sys/bus/virtio/devices/virtio0/features 1110000111111111111101010000110010000000100000000000000000000000 Note that sysfs "features" now already exists, this patch does not touch it. Signed-off-by: Jiri Pirko Acked-by: Michael S. Tsirkin Signed-off-by: Paolo Abeni --- drivers/virtio/Kconfig | 10 ++++ drivers/virtio/Makefile | 1 + drivers/virtio/virtio.c | 8 +++ drivers/virtio/virtio_debug.c | 114 ++++++++++++++++++++++++++++++++++++++++++ include/linux/virtio.h | 35 +++++++++++++ 5 files changed, 168 insertions(+) create mode 100644 drivers/virtio/virtio_debug.c (limited to 'include/linux') diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index c17193544268..6284538a8184 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -178,4 +178,14 @@ config VIRTIO_DMA_SHARED_BUFFER This option adds a flavor of dma buffers that are backed by virtio resources. +config VIRTIO_DEBUG + bool "Debug facilities" + depends on VIRTIO + help + Enable this to expose debug facilities over debugfs. + This allows to debug features, to see what features the device + advertises and to set filter for features used by driver. + + If unsure, say N. + endif # VIRTIO_MENU diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 73ace62af440..58b2b0489fc9 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -13,3 +13,4 @@ obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o obj-$(CONFIG_VIRTIO_MEM) += virtio_mem.o obj-$(CONFIG_VIRTIO_DMA_SHARED_BUFFER) += virtio_dma_buf.o +obj-$(CONFIG_VIRTIO_DEBUG) += virtio_debug.o diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index 9510c551dce8..b968b2aa5f4d 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -274,6 +274,9 @@ static int virtio_dev_probe(struct device *_d) else dev->features = driver_features_legacy & device_features; + /* When debugging, user may filter some features by hand. */ + virtio_debug_device_filter_features(dev); + /* Transport features always preserved to pass to finalize_features. */ for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) if (device_features & (1ULL << i)) @@ -465,6 +468,8 @@ int register_virtio_device(struct virtio_device *dev) /* Acknowledge that we've seen the device. */ virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + virtio_debug_device_init(dev); + /* * device_add() causes the bus infrastructure to look for a matching * driver. @@ -496,6 +501,7 @@ void unregister_virtio_device(struct virtio_device *dev) int index = dev->index; /* save for after device release */ device_unregister(&dev->dev); + virtio_debug_device_exit(dev); ida_free(&virtio_index_ida, index); } EXPORT_SYMBOL_GPL(unregister_virtio_device); @@ -590,11 +596,13 @@ static int virtio_init(void) { if (bus_register(&virtio_bus) != 0) panic("virtio bus registration failed"); + virtio_debug_init(); return 0; } static void __exit virtio_exit(void) { + virtio_debug_exit(); bus_unregister(&virtio_bus); ida_destroy(&virtio_index_ida); } diff --git a/drivers/virtio/virtio_debug.c b/drivers/virtio/virtio_debug.c new file mode 100644 index 000000000000..95c8fc7705bb --- /dev/null +++ b/drivers/virtio/virtio_debug.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include + +static struct dentry *virtio_debugfs_dir; + +static int virtio_debug_device_features_show(struct seq_file *s, void *data) +{ + struct virtio_device *dev = s->private; + u64 device_features; + unsigned int i; + + device_features = dev->config->get_features(dev); + for (i = 0; i < BITS_PER_LONG_LONG; i++) { + if (device_features & (1ULL << i)) + seq_printf(s, "%u\n", i); + } + return 0; +} +DEFINE_SHOW_ATTRIBUTE(virtio_debug_device_features); + +static int virtio_debug_filter_features_show(struct seq_file *s, void *data) +{ + struct virtio_device *dev = s->private; + unsigned int i; + + for (i = 0; i < BITS_PER_LONG_LONG; i++) { + if (dev->debugfs_filter_features & (1ULL << i)) + seq_printf(s, "%u\n", i); + } + return 0; +} +DEFINE_SHOW_ATTRIBUTE(virtio_debug_filter_features); + +static int virtio_debug_filter_features_clear(void *data, u64 val) +{ + struct virtio_device *dev = data; + + if (val == 1) + dev->debugfs_filter_features = 0; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_features_clear_fops, NULL, + virtio_debug_filter_features_clear, "%llu\n"); + +static int virtio_debug_filter_feature_add(void *data, u64 val) +{ + struct virtio_device *dev = data; + + if (val >= BITS_PER_LONG_LONG) + return -EINVAL; + dev->debugfs_filter_features |= BIT_ULL_MASK(val); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_feature_add_fops, NULL, + virtio_debug_filter_feature_add, "%llu\n"); + +static int virtio_debug_filter_feature_del(void *data, u64 val) +{ + struct virtio_device *dev = data; + + if (val >= BITS_PER_LONG_LONG) + return -EINVAL; + dev->debugfs_filter_features &= ~BIT_ULL_MASK(val); + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(virtio_debug_filter_feature_del_fops, NULL, + virtio_debug_filter_feature_del, "%llu\n"); + +void virtio_debug_device_init(struct virtio_device *dev) +{ + dev->debugfs_dir = debugfs_create_dir(dev_name(&dev->dev), + virtio_debugfs_dir); + debugfs_create_file("device_features", 0400, dev->debugfs_dir, dev, + &virtio_debug_device_features_fops); + debugfs_create_file("filter_features", 0400, dev->debugfs_dir, dev, + &virtio_debug_filter_features_fops); + debugfs_create_file("filter_features_clear", 0200, dev->debugfs_dir, dev, + &virtio_debug_filter_features_clear_fops); + debugfs_create_file("filter_feature_add", 0200, dev->debugfs_dir, dev, + &virtio_debug_filter_feature_add_fops); + debugfs_create_file("filter_feature_del", 0200, dev->debugfs_dir, dev, + &virtio_debug_filter_feature_del_fops); +} +EXPORT_SYMBOL_GPL(virtio_debug_device_init); + +void virtio_debug_device_filter_features(struct virtio_device *dev) +{ + dev->features &= ~dev->debugfs_filter_features; +} +EXPORT_SYMBOL_GPL(virtio_debug_device_filter_features); + +void virtio_debug_device_exit(struct virtio_device *dev) +{ + debugfs_remove_recursive(dev->debugfs_dir); +} +EXPORT_SYMBOL_GPL(virtio_debug_device_exit); + +void virtio_debug_init(void) +{ + virtio_debugfs_dir = debugfs_create_dir("virtio", NULL); +} +EXPORT_SYMBOL_GPL(virtio_debug_init); + +void virtio_debug_exit(void) +{ + debugfs_remove_recursive(virtio_debugfs_dir); +} +EXPORT_SYMBOL_GPL(virtio_debug_exit); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 26c4325aa373..96fea920873b 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -126,6 +126,8 @@ struct virtio_admin_cmd { * @vqs: the list of virtqueues for this device. * @features: the features supported by both driver and device. * @priv: private pointer for the driver's use. + * @debugfs_dir: debugfs directory entry. + * @debugfs_filter_features: features to be filtered set by debugfs. */ struct virtio_device { int index; @@ -141,6 +143,10 @@ struct virtio_device { struct list_head vqs; u64 features; void *priv; +#ifdef CONFIG_VIRTIO_DEBUG + struct dentry *debugfs_dir; + u64 debugfs_filter_features; +#endif }; #define dev_to_virtio(_dev) container_of_const(_dev, struct virtio_device, dev) @@ -237,4 +243,33 @@ void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t a void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr, unsigned long offset, size_t size, enum dma_data_direction dir); + +#ifdef CONFIG_VIRTIO_DEBUG +void virtio_debug_device_init(struct virtio_device *dev); +void virtio_debug_device_exit(struct virtio_device *dev); +void virtio_debug_device_filter_features(struct virtio_device *dev); +void virtio_debug_init(void); +void virtio_debug_exit(void); +#else +static inline void virtio_debug_device_init(struct virtio_device *dev) +{ +} + +static inline void virtio_debug_device_exit(struct virtio_device *dev) +{ +} + +static inline void virtio_debug_device_filter_features(struct virtio_device *dev) +{ +} + +static inline void virtio_debug_init(void) +{ +} + +static inline void virtio_debug_exit(void) +{ +} +#endif + #endif /* _LINUX_VIRTIO_H */ -- cgit From 66e13b615a0ce76b785d780ecc9776ba71983629 Mon Sep 17 00:00:00 2001 From: Puranjay Mohan Date: Wed, 24 Apr 2024 10:02:08 +0000 Subject: bpf: verifier: prevent userspace memory access With BPF_PROBE_MEM, BPF allows de-referencing an untrusted pointer. To thwart invalid memory accesses, the JITs add an exception table entry for all such accesses. But in case the src_reg + offset is a userspace address, the BPF program might read that memory if the user has mapped it. Make the verifier add guard instructions around such memory accesses and skip the load if the address falls into the userspace region. The JITs need to implement bpf_arch_uaddress_limit() to define where the userspace addresses end for that architecture or TASK_SIZE is taken as default. The implementation is as follows: REG_AX = SRC_REG if(offset) REG_AX += offset; REG_AX >>= 32; if (REG_AX <= (uaddress_limit >> 32)) DST_REG = 0; else DST_REG = *(size *)(SRC_REG + offset); Comparing just the upper 32 bits of the load address with the upper 32 bits of uaddress_limit implies that the values are being aligned down to a 4GB boundary before comparison. The above means that all loads with address <= uaddress_limit + 4GB are skipped. This is acceptable because there is a large hole (much larger than 4GB) between userspace and kernel space memory, therefore a correctly functioning BPF program should not access this 4GB memory above the userspace. Let's analyze what this patch does to the following fentry program dereferencing an untrusted pointer: SEC("fentry/tcp_v4_connect") int BPF_PROG(fentry_tcp_v4_connect, struct sock *sk) { *(volatile long *)sk; return 0; } BPF Program before | BPF Program after ------------------ | ----------------- 0: (79) r1 = *(u64 *)(r1 +0) 0: (79) r1 = *(u64 *)(r1 +0) ----------------------------------------------------------------------- 1: (79) r1 = *(u64 *)(r1 +0) --\ 1: (bf) r11 = r1 ----------------------------\ \ 2: (77) r11 >>= 32 2: (b7) r0 = 0 \ \ 3: (b5) if r11 <= 0x8000 goto pc+2 3: (95) exit \ \-> 4: (79) r1 = *(u64 *)(r1 +0) \ 5: (05) goto pc+1 \ 6: (b7) r1 = 0 \-------------------------------------- 7: (b7) r0 = 0 8: (95) exit As you can see from above, in the best case (off=0), 5 extra instructions are emitted. Now, we analyze the same program after it has gone through the JITs of ARM64 and RISC-V architectures. We follow the single load instruction that has the untrusted pointer and see what instrumentation has been added around it. x86-64 JIT ========== JIT's Instrumentation (upstream) --------------------- 0: nopl 0x0(%rax,%rax,1) 5: xchg %ax,%ax 7: push %rbp 8: mov %rsp,%rbp b: mov 0x0(%rdi),%rdi --------------------------------- f: movabs $0x800000000000,%r11 19: cmp %r11,%rdi 1c: jb 0x000000000000002a 1e: mov %rdi,%r11 21: add $0x0,%r11 28: jae 0x000000000000002e 2a: xor %edi,%edi 2c: jmp 0x0000000000000032 2e: mov 0x0(%rdi),%rdi --------------------------------- 32: xor %eax,%eax 34: leave 35: ret The x86-64 JIT already emits some instructions to protect against user memory access. This patch doesn't make any changes for the x86-64 JIT. ARM64 JIT ========= No Intrumentation Verifier's Instrumentation (upstream) (This patch) ----------------- -------------------------- 0: add x9, x30, #0x0 0: add x9, x30, #0x0 4: nop 4: nop 8: paciasp 8: paciasp c: stp x29, x30, [sp, #-16]! c: stp x29, x30, [sp, #-16]! 10: mov x29, sp 10: mov x29, sp 14: stp x19, x20, [sp, #-16]! 14: stp x19, x20, [sp, #-16]! 18: stp x21, x22, [sp, #-16]! 18: stp x21, x22, [sp, #-16]! 1c: stp x25, x26, [sp, #-16]! 1c: stp x25, x26, [sp, #-16]! 20: stp x27, x28, [sp, #-16]! 20: stp x27, x28, [sp, #-16]! 24: mov x25, sp 24: mov x25, sp 28: mov x26, #0x0 28: mov x26, #0x0 2c: sub x27, x25, #0x0 2c: sub x27, x25, #0x0 30: sub sp, sp, #0x0 30: sub sp, sp, #0x0 34: ldr x0, [x0] 34: ldr x0, [x0] -------------------------------------------------------------------------------- 38: ldr x0, [x0] ----------\ 38: add x9, x0, #0x0 -----------------------------------\\ 3c: lsr x9, x9, #32 3c: mov x7, #0x0 \\ 40: cmp x9, #0x10, lsl #12 40: mov sp, sp \\ 44: b.ls 0x0000000000000050 44: ldp x27, x28, [sp], #16 \\--> 48: ldr x0, [x0] 48: ldp x25, x26, [sp], #16 \ 4c: b 0x0000000000000054 4c: ldp x21, x22, [sp], #16 \ 50: mov x0, #0x0 50: ldp x19, x20, [sp], #16 \--------------------------------------- 54: ldp x29, x30, [sp], #16 54: mov x7, #0x0 58: add x0, x7, #0x0 58: mov sp, sp 5c: autiasp 5c: ldp x27, x28, [sp], #16 60: ret 60: ldp x25, x26, [sp], #16 64: nop 64: ldp x21, x22, [sp], #16 68: ldr x10, 0x0000000000000070 68: ldp x19, x20, [sp], #16 6c: br x10 6c: ldp x29, x30, [sp], #16 70: add x0, x7, #0x0 74: autiasp 78: ret 7c: nop 80: ldr x10, 0x0000000000000088 84: br x10 There are 6 extra instructions added in ARM64 in the best case. This will become 7 in the worst case (off != 0). RISC-V JIT (RISCV_ISA_C Disabled) ========== No Intrumentation Verifier's Instrumentation (upstream) (This patch) ----------------- -------------------------- 0: nop 0: nop 4: nop 4: nop 8: li a6, 33 8: li a6, 33 c: addi sp, sp, -16 c: addi sp, sp, -16 10: sd s0, 8(sp) 10: sd s0, 8(sp) 14: addi s0, sp, 16 14: addi s0, sp, 16 18: ld a0, 0(a0) 18: ld a0, 0(a0) --------------------------------------------------------------- 1c: ld a0, 0(a0) --\ 1c: mv t0, a0 --------------------------\ \ 20: srli t0, t0, 32 20: li a5, 0 \ \ 24: lui t1, 4096 24: ld s0, 8(sp) \ \ 28: sext.w t1, t1 28: addi sp, sp, 16 \ \ 2c: bgeu t1, t0, 12 2c: sext.w a0, a5 \ \--> 30: ld a0, 0(a0) 30: ret \ 34: j 8 \ 38: li a0, 0 \------------------------------ 3c: li a5, 0 40: ld s0, 8(sp) 44: addi sp, sp, 16 48: sext.w a0, a5 4c: ret There are 7 extra instructions added in RISC-V. Fixes: 800834285361 ("bpf, arm64: Add BPF exception tables") Reported-by: Breno Leitao Suggested-by: Alexei Starovoitov Acked-by: Ilya Leoshkevich Signed-off-by: Puranjay Mohan Link: https://lore.kernel.org/r/20240424100210.11982-2-puranjay@kernel.org Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp.c | 6 ++++++ include/linux/filter.h | 1 + kernel/bpf/core.c | 9 +++++++++ kernel/bpf/verifier.c | 30 ++++++++++++++++++++++++++++++ 4 files changed, 46 insertions(+) (limited to 'include/linux') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index df5fac428408..b520b66ad14b 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -3473,3 +3473,9 @@ bool bpf_jit_supports_ptr_xchg(void) { return true; } + +/* x86-64 JIT emits its own code to filter user addresses so return 0 here */ +u64 bpf_arch_uaddress_limit(void) +{ + return 0; +} diff --git a/include/linux/filter.h b/include/linux/filter.h index c99bc3df2d28..219ee7a76874 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -963,6 +963,7 @@ bool bpf_jit_supports_far_kfunc_call(void); bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_ptr_xchg(void); bool bpf_jit_supports_arena(void); +u64 bpf_arch_uaddress_limit(void); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 696bc55de8e8..1ea5ce5bb599 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -2942,6 +2942,15 @@ bool __weak bpf_jit_supports_arena(void) return false; } +u64 __weak bpf_arch_uaddress_limit(void) +{ +#if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE) + return TASK_SIZE; +#else + return 0; +#endif +} + /* Return TRUE if the JIT backend satisfies the following two conditions: * 1) JIT backend supports atomic_xchg() on pointer-sized words. * 2) Under the specific arch, the implementation of xchg() is the same diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index aa24beb65393..cb7ad1f795e1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -19675,6 +19675,36 @@ static int do_misc_fixups(struct bpf_verifier_env *env) goto next_insn; } + /* Make it impossible to de-reference a userspace address */ + if (BPF_CLASS(insn->code) == BPF_LDX && + (BPF_MODE(insn->code) == BPF_PROBE_MEM || + BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) { + struct bpf_insn *patch = &insn_buf[0]; + u64 uaddress_limit = bpf_arch_uaddress_limit(); + + if (!uaddress_limit) + goto next_insn; + + *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg); + if (insn->off) + *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off); + *patch++ = BPF_ALU64_IMM(BPF_RSH, BPF_REG_AX, 32); + *patch++ = BPF_JMP_IMM(BPF_JLE, BPF_REG_AX, uaddress_limit >> 32, 2); + *patch++ = *insn; + *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0); + + cnt = patch - insn_buf; + new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + + delta += cnt - 1; + env->prog = prog = new_prog; + insn = new_prog->insnsi + i + delta; + goto next_insn; + } + /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */ if (BPF_CLASS(insn->code) == BPF_LD && (BPF_MODE(insn->code) == BPF_ABS || -- cgit From b37ef7210e51b1e996ca03b03227d93f7470784b Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Thu, 25 Apr 2024 16:07:51 +0800 Subject: cpufreq: amd-pstate: Document *_limit_* fields in struct amd_cpudata The four fields of struct cpudata namely min_limit_perf, max_limit_perf, min_limit_freq, max_limit_freq introduced in the commit febab20caeba("cpufreq/amd-pstate: Fix scaling_min_freq and scaling_max_freq update") are currently undocumented Add comments describing these fields Acked-by: Huang Rui Fixes: febab20caeba("cpufreq/amd-pstate: Fix scaling_min_freq and scaling_max_freq update") Reviewed-by: Li Meng Tested-by: Dhananjay Ugwekar Signed-off-by: Gautham R. Shenoy Signed-off-by: Perry Yuan Signed-off-by: Rafael J. Wysocki --- include/linux/amd-pstate.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index d21838835abd..83fb3fc647fc 100644 --- a/include/linux/amd-pstate.h +++ b/include/linux/amd-pstate.h @@ -49,6 +49,10 @@ struct amd_aperf_mperf { * @lowest_perf: the absolute lowest performance level of the processor * @prefcore_ranking: the preferred core ranking, the higher value indicates a higher * priority. + * @min_limit_perf: Cached value of the performance corresponding to policy->min + * @max_limit_perf: Cached value of the performance corresponding to policy->max + * @min_limit_freq: Cached value of policy->min + * @max_limit_freq: Cached value of policy->max * @max_freq: the frequency that mapped to highest_perf * @min_freq: the frequency that mapped to lowest_perf * @nominal_freq: the frequency that mapped to nominal_perf -- cgit From 4fcfd1954ad305e331b6b4b62de2874fbae61394 Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Thu, 25 Apr 2024 16:07:52 +0800 Subject: cpufreq: amd-pstate: Document the units for freq variables in amd_cpudata The min_limit_freq, max_limit_freq, min_freq, max_freq, nominal_freq and the lowest_nominal_freq members of struct cpudata store the frequency value in khz to be consistent with the cpufreq core. Update the comment to document this. Reviewed-by: Li Meng Tested-by: Dhananjay Ugwekar Signed-off-by: Gautham R. Shenoy Signed-off-by: Perry Yuan Acked-by: Huang Rui Signed-off-by: Rafael J. Wysocki --- include/linux/amd-pstate.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'include/linux') diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index 83fb3fc647fc..ec0b0fa3e9bb 100644 --- a/include/linux/amd-pstate.h +++ b/include/linux/amd-pstate.h @@ -51,15 +51,15 @@ struct amd_aperf_mperf { * priority. * @min_limit_perf: Cached value of the performance corresponding to policy->min * @max_limit_perf: Cached value of the performance corresponding to policy->max - * @min_limit_freq: Cached value of policy->min - * @max_limit_freq: Cached value of policy->max - * @max_freq: the frequency that mapped to highest_perf - * @min_freq: the frequency that mapped to lowest_perf - * @nominal_freq: the frequency that mapped to nominal_perf - * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf + * @min_limit_freq: Cached value of policy->min (in khz) + * @max_limit_freq: Cached value of policy->max (in khz) + * @max_freq: the frequency (in khz) that mapped to highest_perf + * @min_freq: the frequency (in khz) that mapped to lowest_perf + * @nominal_freq: the frequency (in khz) that mapped to nominal_perf + * @lowest_nonlinear_freq: the frequency (in khz) that mapped to lowest_nonlinear_perf * @cur: Difference of Aperf/Mperf/tsc count between last and current sample * @prev: Last Aperf/Mperf/tsc count value read from register - * @freq: current cpu frequency value + * @freq: current cpu frequency value (in khz) * @boost_supported: check whether the Processor or SBIOS supports boost mode * @hw_prefcore: check whether HW supports preferred core featue. * Only when hw_prefcore and early prefcore param are true, -- cgit From eb8b6c36820214df96e7e86d8614d93f6b028f28 Mon Sep 17 00:00:00 2001 From: Perry Yuan Date: Thu, 25 Apr 2024 16:07:58 +0800 Subject: cpufreq: amd-pstate: Add quirk for the pstate CPPC capabilities missing Add quirks table to get CPPC capabilities issue fixed by providing correct perf or frequency values while driver loading. If CPPC capabilities are not defined in the ACPI tables or wrongly defined by platform firmware, it needs to use quick to get those issues fixed with correct workaround values to make pstate driver can be loaded even though there are CPPC capabilities errors. The workaround will match the broken BIOS which lack of CPPC capabilities nominal_freq and lowest_freq definition in the ACPI table. $ cat /sys/devices/system/cpu/cpu0/acpi_cppc/lowest_freq 0 $ cat /sys/devices/system/cpu/cpu0/acpi_cppc/nominal_freq 0 Acked-by: Huang Rui Reviewed-by: Mario Limonciello Reviewed-by: Gautham R. Shenoy Tested-by: Dhananjay Ugwekar Signed-off-by: Perry Yuan Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/amd-pstate.c | 53 ++++++++++++++++++++++++++++++++++++++++++-- include/linux/amd-pstate.h | 6 +++++ 2 files changed, 57 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c index 510b5aec42ea..83a29b257794 100644 --- a/drivers/cpufreq/amd-pstate.c +++ b/drivers/cpufreq/amd-pstate.c @@ -67,6 +67,7 @@ static struct cpufreq_driver amd_pstate_epp_driver; static int cppc_state = AMD_PSTATE_UNDEFINED; static bool cppc_enabled; static bool amd_pstate_prefcore = true; +static struct quirk_entry *quirks; /* * AMD Energy Preference Performance (EPP) @@ -111,6 +112,41 @@ static unsigned int epp_values[] = { typedef int (*cppc_mode_transition_fn)(int); +static struct quirk_entry quirk_amd_7k62 = { + .nominal_freq = 2600, + .lowest_freq = 550, +}; + +static int __init dmi_matched_7k62_bios_bug(const struct dmi_system_id *dmi) +{ + /** + * match the broken bios for family 17h processor support CPPC V2 + * broken BIOS lack of nominal_freq and lowest_freq capabilities + * definition in ACPI tables + */ + if (boot_cpu_has(X86_FEATURE_ZEN2)) { + quirks = dmi->driver_data; + pr_info("Overriding nominal and lowest frequencies for %s\n", dmi->ident); + return 1; + } + + return 0; +} + +static const struct dmi_system_id amd_pstate_quirks_table[] __initconst = { + { + .callback = dmi_matched_7k62_bios_bug, + .ident = "AMD EPYC 7K62", + .matches = { + DMI_MATCH(DMI_BIOS_VERSION, "5.14"), + DMI_MATCH(DMI_BIOS_RELEASE, "12/12/2019"), + }, + .driver_data = &quirk_amd_7k62, + }, + {} +}; +MODULE_DEVICE_TABLE(dmi, amd_pstate_quirks_table); + static inline int get_mode_idx_from_str(const char *str, size_t size) { int i; @@ -812,8 +848,16 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata) if (ret) return ret; - min_freq = cppc_perf.lowest_freq * 1000; - nominal_freq = cppc_perf.nominal_freq; + if (quirks && quirks->lowest_freq) + min_freq = quirks->lowest_freq * 1000; + else + min_freq = cppc_perf.lowest_freq * 1000; + + if (quirks && quirks->nominal_freq) + nominal_freq = quirks->nominal_freq ; + else + nominal_freq = cppc_perf.nominal_freq; + nominal_perf = READ_ONCE(cpudata->nominal_perf); highest_perf = READ_ONCE(cpudata->highest_perf); @@ -1662,6 +1706,11 @@ static int __init amd_pstate_init(void) if (cpufreq_get_current_driver()) return -EEXIST; + quirks = NULL; + + /* check if this machine need CPPC quirks */ + dmi_check_system(amd_pstate_quirks_table); + switch (cppc_state) { case AMD_PSTATE_UNDEFINED: /* Disable on the following configs by default: diff --git a/include/linux/amd-pstate.h b/include/linux/amd-pstate.h index ec0b0fa3e9bb..d58fc022ec46 100644 --- a/include/linux/amd-pstate.h +++ b/include/linux/amd-pstate.h @@ -128,4 +128,10 @@ static const char * const amd_pstate_mode_string[] = { [AMD_PSTATE_GUIDED] = "guided", NULL, }; + +struct quirk_entry { + u32 nominal_freq; + u32 lowest_freq; +}; + #endif /* _LINUX_AMD_PSTATE_H */ -- cgit From 05947224ff469bf17b3791fd009bc27ce5151997 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Wed, 14 Feb 2024 10:33:28 +0100 Subject: pwm: Ensure that pwm_chips are allocated using pwmchip_alloc() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Memory holding a struct device must not be freed before the reference count drops to zero. So a struct pwm_chip must not live in memory freed by a driver on unbind. All in-tree drivers were fixed accordingly, but as out-of-tree drivers, that were not adapted, still compile fine, catch these in pwmchip_add(). Link: https://lore.kernel.org/r/35f5b229c98f78b2f6ce2397259a4a936be477c0.1707900770.git.u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König --- drivers/pwm/core.c | 10 ++++++++++ include/linux/pwm.h | 2 ++ 2 files changed, 12 insertions(+) (limited to 'include/linux') diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 403525cc1783..d3f475eca97f 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -481,6 +481,7 @@ struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t chip->dev = parent; chip->npwm = npwm; + chip->uses_pwmchip_alloc = true; pwmchip_set_drvdata(chip, pwmchip_priv(chip)); @@ -561,6 +562,15 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner) if (!chip || !pwmchip_parent(chip) || !chip->ops || !chip->npwm) return -EINVAL; + /* + * a struct pwm_chip must be allocated using (devm_)pwmchip_alloc, + * otherwise the embedded struct device might disappear too early + * resulting in memory corruption. + * Catch drivers that were not converted appropriately. + */ + if (!chip->uses_pwmchip_alloc) + return -EINVAL; + if (!pwm_ops_check(chip)) return -EINVAL; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 4a6568dfdf3f..94a642a88817 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -272,6 +272,7 @@ struct pwm_ops { * @npwm: number of PWMs controlled by this chip * @of_xlate: request a PWM device given a device tree PWM specifier * @atomic: can the driver's ->apply() be called in atomic context + * @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip * @driver_data: Private pointer for driver specific info * @pwms: array of PWM devices allocated by the framework */ @@ -287,6 +288,7 @@ struct pwm_chip { bool atomic; /* only used internally by the PWM framework */ + bool uses_pwmchip_alloc; void *driver_data; struct pwm_device *pwms; }; -- cgit From e9cc807f87ffd1ccc919731e8f624982935af3e0 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sun, 17 Mar 2024 11:40:34 +0100 Subject: pwm: Move contents of sysfs.c into core.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With the upcoming restructuring having all in a single file simplifies things a bit. The relevant and somewhat visible changes are: - Some dropped prototypes from include/linux/pwm.h that were only necessary that core.c has a declaration of the symbols defined in sysfs.c. The respective functions are static now. - The pwm class now also exists if CONFIG_SYSFS isn't enabled. Having CONFIG_SYSFS is not very relevant today, but even without it the class and device stuff still provides lifetime tracking. - Both files had an initcall, these are merged into a single one now. Instead of a big #ifdef block for CONFIG_DEBUG_FS, a single if (IS_ENABLED(CONFIG_DEBUG_FS)) is used now. This increases compile coverage a bit and is a tad nicer on the eyes. Link: https://lore.kernel.org/r/9e2d39a5280d7dda5bfc6682a8aef510148635b2.1710670958.git.u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König --- drivers/pwm/Kconfig | 4 - drivers/pwm/Makefile | 1 - drivers/pwm/core.c | 540 +++++++++++++++++++++++++++++++++++++++++++++++++- drivers/pwm/sysfs.c | 550 --------------------------------------------------- include/linux/pwm.h | 13 -- 5 files changed, 534 insertions(+), 574 deletions(-) delete mode 100644 drivers/pwm/sysfs.c (limited to 'include/linux') diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 4b956d661755..1dd7921194f5 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -29,10 +29,6 @@ menuconfig PWM if PWM -config PWM_SYSFS - bool - default y if SYSFS - config PWM_DEBUG bool "PWM lowlevel drivers additional checks and debug messages" depends on DEBUG_KERNEL diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index c5ec9e168ee7..90913519f11a 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -1,6 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PWM) += core.o -obj-$(CONFIG_PWM_SYSFS) += sysfs.o obj-$(CONFIG_PWM_AB8500) += pwm-ab8500.o obj-$(CONFIG_PWM_APPLE) += pwm-apple.o obj-$(CONFIG_PWM_ATMEL) += pwm-atmel.o diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index d3f475eca97f..faa0f885e057 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -454,6 +454,535 @@ of_pwm_single_xlate(struct pwm_chip *chip, const struct of_phandle_args *args) } EXPORT_SYMBOL_GPL(of_pwm_single_xlate); +struct pwm_export { + struct device pwm_dev; + struct pwm_device *pwm; + struct mutex lock; + struct pwm_state suspend; +}; + +static inline struct pwm_chip *pwmchip_from_dev(struct device *pwmchip_dev) +{ + return dev_get_drvdata(pwmchip_dev); +} + +static inline struct pwm_export *pwmexport_from_dev(struct device *pwm_dev) +{ + return container_of(pwm_dev, struct pwm_export, pwm_dev); +} + +static inline struct pwm_device *pwm_from_dev(struct device *pwm_dev) +{ + struct pwm_export *export = pwmexport_from_dev(pwm_dev); + + return export->pwm; +} + +static ssize_t period_show(struct device *pwm_dev, + struct device_attribute *attr, + char *buf) +{ + const struct pwm_device *pwm = pwm_from_dev(pwm_dev); + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return sysfs_emit(buf, "%llu\n", state.period); +} + +static ssize_t period_store(struct device *pwm_dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct pwm_export *export = pwmexport_from_dev(pwm_dev); + struct pwm_device *pwm = export->pwm; + struct pwm_state state; + u64 val; + int ret; + + ret = kstrtou64(buf, 0, &val); + if (ret) + return ret; + + mutex_lock(&export->lock); + pwm_get_state(pwm, &state); + state.period = val; + ret = pwm_apply_might_sleep(pwm, &state); + mutex_unlock(&export->lock); + + return ret ? : size; +} + +static ssize_t duty_cycle_show(struct device *pwm_dev, + struct device_attribute *attr, + char *buf) +{ + const struct pwm_device *pwm = pwm_from_dev(pwm_dev); + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return sysfs_emit(buf, "%llu\n", state.duty_cycle); +} + +static ssize_t duty_cycle_store(struct device *pwm_dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct pwm_export *export = pwmexport_from_dev(pwm_dev); + struct pwm_device *pwm = export->pwm; + struct pwm_state state; + u64 val; + int ret; + + ret = kstrtou64(buf, 0, &val); + if (ret) + return ret; + + mutex_lock(&export->lock); + pwm_get_state(pwm, &state); + state.duty_cycle = val; + ret = pwm_apply_might_sleep(pwm, &state); + mutex_unlock(&export->lock); + + return ret ? : size; +} + +static ssize_t enable_show(struct device *pwm_dev, + struct device_attribute *attr, + char *buf) +{ + const struct pwm_device *pwm = pwm_from_dev(pwm_dev); + struct pwm_state state; + + pwm_get_state(pwm, &state); + + return sysfs_emit(buf, "%d\n", state.enabled); +} + +static ssize_t enable_store(struct device *pwm_dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct pwm_export *export = pwmexport_from_dev(pwm_dev); + struct pwm_device *pwm = export->pwm; + struct pwm_state state; + int val, ret; + + ret = kstrtoint(buf, 0, &val); + if (ret) + return ret; + + mutex_lock(&export->lock); + + pwm_get_state(pwm, &state); + + switch (val) { + case 0: + state.enabled = false; + break; + case 1: + state.enabled = true; + break; + default: + ret = -EINVAL; + goto unlock; + } + + ret = pwm_apply_might_sleep(pwm, &state); + +unlock: + mutex_unlock(&export->lock); + return ret ? : size; +} + +static ssize_t polarity_show(struct device *pwm_dev, + struct device_attribute *attr, + char *buf) +{ + const struct pwm_device *pwm = pwm_from_dev(pwm_dev); + const char *polarity = "unknown"; + struct pwm_state state; + + pwm_get_state(pwm, &state); + + switch (state.polarity) { + case PWM_POLARITY_NORMAL: + polarity = "normal"; + break; + + case PWM_POLARITY_INVERSED: + polarity = "inversed"; + break; + } + + return sysfs_emit(buf, "%s\n", polarity); +} + +static ssize_t polarity_store(struct device *pwm_dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct pwm_export *export = pwmexport_from_dev(pwm_dev); + struct pwm_device *pwm = export->pwm; + enum pwm_polarity polarity; + struct pwm_state state; + int ret; + + if (sysfs_streq(buf, "normal")) + polarity = PWM_POLARITY_NORMAL; + else if (sysfs_streq(buf, "inversed")) + polarity = PWM_POLARITY_INVERSED; + else + return -EINVAL; + + mutex_lock(&export->lock); + pwm_get_state(pwm, &state); + state.polarity = polarity; + ret = pwm_apply_might_sleep(pwm, &state); + mutex_unlock(&export->lock); + + return ret ? : size; +} + +static ssize_t capture_show(struct device *pwm_dev, + struct device_attribute *attr, + char *buf) +{ + struct pwm_device *pwm = pwm_from_dev(pwm_dev); + struct pwm_capture result; + int ret; + + ret = pwm_capture(pwm, &result, jiffies_to_msecs(HZ)); + if (ret) + return ret; + + return sysfs_emit(buf, "%u %u\n", result.period, result.duty_cycle); +} + +static DEVICE_ATTR_RW(period); +static DEVICE_ATTR_RW(duty_cycle); +static DEVICE_ATTR_RW(enable); +static DEVICE_ATTR_RW(polarity); +static DEVICE_ATTR_RO(capture); + +static struct attribute *pwm_attrs[] = { + &dev_attr_period.attr, + &dev_attr_duty_cycle.attr, + &dev_attr_enable.attr, + &dev_attr_polarity.attr, + &dev_attr_capture.attr, + NULL +}; +ATTRIBUTE_GROUPS(pwm); + +static void pwm_export_release(struct device *pwm_dev) +{ + struct pwm_export *export = pwmexport_from_dev(pwm_dev); + + kfree(export); +} + +static int pwm_export_child(struct device *pwmchip_dev, struct pwm_device *pwm) +{ + struct pwm_export *export; + char *pwm_prop[2]; + int ret; + + if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags)) + return -EBUSY; + + export = kzalloc(sizeof(*export), GFP_KERNEL); + if (!export) { + clear_bit(PWMF_EXPORTED, &pwm->flags); + return -ENOMEM; + } + + export->pwm = pwm; + mutex_init(&export->lock); + + export->pwm_dev.release = pwm_export_release; + export->pwm_dev.parent = pwmchip_dev; + export->pwm_dev.devt = MKDEV(0, 0); + export->pwm_dev.groups = pwm_groups; + dev_set_name(&export->pwm_dev, "pwm%u", pwm->hwpwm); + + ret = device_register(&export->pwm_dev); + if (ret) { + clear_bit(PWMF_EXPORTED, &pwm->flags); + put_device(&export->pwm_dev); + export = NULL; + return ret; + } + pwm_prop[0] = kasprintf(GFP_KERNEL, "EXPORT=pwm%u", pwm->hwpwm); + pwm_prop[1] = NULL; + kobject_uevent_env(&pwmchip_dev->kobj, KOBJ_CHANGE, pwm_prop); + kfree(pwm_prop[0]); + + return 0; +} + +static int pwm_unexport_match(struct device *pwm_dev, void *data) +{ + return pwm_from_dev(pwm_dev) == data; +} + +static int pwm_unexport_child(struct device *pwmchip_dev, struct pwm_device *pwm) +{ + struct device *pwm_dev; + char *pwm_prop[2]; + + if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags)) + return -ENODEV; + + pwm_dev = device_find_child(pwmchip_dev, pwm, pwm_unexport_match); + if (!pwm_dev) + return -ENODEV; + + pwm_prop[0] = kasprintf(GFP_KERNEL, "UNEXPORT=pwm%u", pwm->hwpwm); + pwm_prop[1] = NULL; + kobject_uevent_env(&pwmchip_dev->kobj, KOBJ_CHANGE, pwm_prop); + kfree(pwm_prop[0]); + + /* for device_find_child() */ + put_device(pwm_dev); + device_unregister(pwm_dev); + pwm_put(pwm); + + return 0; +} + +static ssize_t export_store(struct device *pwmchip_dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); + struct pwm_device *pwm; + unsigned int hwpwm; + int ret; + + ret = kstrtouint(buf, 0, &hwpwm); + if (ret < 0) + return ret; + + if (hwpwm >= chip->npwm) + return -ENODEV; + + pwm = pwm_request_from_chip(chip, hwpwm, "sysfs"); + if (IS_ERR(pwm)) + return PTR_ERR(pwm); + + ret = pwm_export_child(pwmchip_dev, pwm); + if (ret < 0) + pwm_put(pwm); + + return ret ? : len; +} +static DEVICE_ATTR_WO(export); + +static ssize_t unexport_store(struct device *pwmchip_dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); + unsigned int hwpwm; + int ret; + + ret = kstrtouint(buf, 0, &hwpwm); + if (ret < 0) + return ret; + + if (hwpwm >= chip->npwm) + return -ENODEV; + + ret = pwm_unexport_child(pwmchip_dev, &chip->pwms[hwpwm]); + + return ret ? : len; +} +static DEVICE_ATTR_WO(unexport); + +static ssize_t npwm_show(struct device *pwmchip_dev, struct device_attribute *attr, + char *buf) +{ + const struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); + + return sysfs_emit(buf, "%u\n", chip->npwm); +} +static DEVICE_ATTR_RO(npwm); + +static struct attribute *pwm_chip_attrs[] = { + &dev_attr_export.attr, + &dev_attr_unexport.attr, + &dev_attr_npwm.attr, + NULL, +}; +ATTRIBUTE_GROUPS(pwm_chip); + +/* takes export->lock on success */ +static struct pwm_export *pwm_class_get_state(struct device *pwmchip_dev, + struct pwm_device *pwm, + struct pwm_state *state) +{ + struct device *pwm_dev; + struct pwm_export *export; + + if (!test_bit(PWMF_EXPORTED, &pwm->flags)) + return NULL; + + pwm_dev = device_find_child(pwmchip_dev, pwm, pwm_unexport_match); + if (!pwm_dev) + return NULL; + + export = pwmexport_from_dev(pwm_dev); + put_device(pwm_dev); /* for device_find_child() */ + + mutex_lock(&export->lock); + pwm_get_state(pwm, state); + + return export; +} + +static int pwm_class_apply_state(struct pwm_export *export, + struct pwm_device *pwm, + struct pwm_state *state) +{ + int ret = pwm_apply_might_sleep(pwm, state); + + /* release lock taken in pwm_class_get_state */ + mutex_unlock(&export->lock); + + return ret; +} + +static int pwm_class_resume_npwm(struct device *pwmchip_dev, unsigned int npwm) +{ + struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); + unsigned int i; + int ret = 0; + + for (i = 0; i < npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + struct pwm_state state; + struct pwm_export *export; + + export = pwm_class_get_state(pwmchip_dev, pwm, &state); + if (!export) + continue; + + /* If pwmchip was not enabled before suspend, do nothing. */ + if (!export->suspend.enabled) { + /* release lock taken in pwm_class_get_state */ + mutex_unlock(&export->lock); + continue; + } + + state.enabled = export->suspend.enabled; + ret = pwm_class_apply_state(export, pwm, &state); + if (ret < 0) + break; + } + + return ret; +} + +static int pwm_class_suspend(struct device *pwmchip_dev) +{ + struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); + unsigned int i; + int ret = 0; + + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + struct pwm_state state; + struct pwm_export *export; + + export = pwm_class_get_state(pwmchip_dev, pwm, &state); + if (!export) + continue; + + /* + * If pwmchip was not enabled before suspend, save + * state for resume time and do nothing else. + */ + export->suspend = state; + if (!state.enabled) { + /* release lock taken in pwm_class_get_state */ + mutex_unlock(&export->lock); + continue; + } + + state.enabled = false; + ret = pwm_class_apply_state(export, pwm, &state); + if (ret < 0) { + /* + * roll back the PWM devices that were disabled by + * this suspend function. + */ + pwm_class_resume_npwm(pwmchip_dev, i); + break; + } + } + + return ret; +} + +static int pwm_class_resume(struct device *pwmchip_dev) +{ + struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); + + return pwm_class_resume_npwm(pwmchip_dev, chip->npwm); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(pwm_class_pm_ops, pwm_class_suspend, pwm_class_resume); + +static struct class pwm_class = { + .name = "pwm", + .dev_groups = pwm_chip_groups, + .pm = pm_sleep_ptr(&pwm_class_pm_ops), +}; + +static int pwmchip_sysfs_match(struct device *pwmchip_dev, const void *data) +{ + return pwmchip_from_dev(pwmchip_dev) == data; +} + +static void pwmchip_sysfs_export(struct pwm_chip *chip) +{ + struct device *pwmchip_dev; + + /* + * If device_create() fails the pwm_chip is still usable by + * the kernel it's just not exported. + */ + pwmchip_dev = device_create(&pwm_class, pwmchip_parent(chip), MKDEV(0, 0), chip, + "pwmchip%d", chip->id); + if (IS_ERR(pwmchip_dev)) { + dev_warn(pwmchip_parent(chip), + "device_create failed for pwm_chip sysfs export\n"); + } +} + +static void pwmchip_sysfs_unexport(struct pwm_chip *chip) +{ + struct device *pwmchip_dev; + unsigned int i; + + pwmchip_dev = class_find_device(&pwm_class, NULL, chip, + pwmchip_sysfs_match); + if (!pwmchip_dev) + return; + + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + + if (test_bit(PWMF_EXPORTED, &pwm->flags)) + pwm_unexport_child(pwmchip_dev, pwm); + } + + put_device(pwmchip_dev); + device_unregister(pwmchip_dev); +} + #define PWMCHIP_ALIGN ARCH_DMA_MINALIGN static void *pwmchip_priv(struct pwm_chip *chip) @@ -1086,7 +1615,6 @@ struct pwm_device *devm_fwnode_pwm_get(struct device *dev, } EXPORT_SYMBOL_GPL(devm_fwnode_pwm_get); -#ifdef CONFIG_DEBUG_FS static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) { unsigned int i; @@ -1171,11 +1699,11 @@ static const struct seq_operations pwm_debugfs_sops = { DEFINE_SEQ_ATTRIBUTE(pwm_debugfs); -static int __init pwm_debugfs_init(void) +static int __init pwm_init(void) { - debugfs_create_file("pwm", 0444, NULL, NULL, &pwm_debugfs_fops); + if (IS_ENABLED(CONFIG_DEBUG_FS)) + debugfs_create_file("pwm", 0444, NULL, NULL, &pwm_debugfs_fops); - return 0; + return class_register(&pwm_class); } -subsys_initcall(pwm_debugfs_init); -#endif /* CONFIG_DEBUG_FS */ +subsys_initcall(pwm_init); diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c deleted file mode 100644 index 1a1b1f6dd98f..000000000000 --- a/drivers/pwm/sysfs.c +++ /dev/null @@ -1,550 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * A simple sysfs interface for the generic PWM framework - * - * Copyright (C) 2013 H Hartley Sweeten - * - * Based on previous work by Lars Poeschel - */ - -#include -#include -#include -#include -#include -#include - -struct pwm_export { - struct device pwm_dev; - struct pwm_device *pwm; - struct mutex lock; - struct pwm_state suspend; -}; - -static inline struct pwm_chip *pwmchip_from_dev(struct device *pwmchip_dev) -{ - return dev_get_drvdata(pwmchip_dev); -} - -static inline struct pwm_export *pwmexport_from_dev(struct device *pwm_dev) -{ - return container_of(pwm_dev, struct pwm_export, pwm_dev); -} - -static inline struct pwm_device *pwm_from_dev(struct device *pwm_dev) -{ - struct pwm_export *export = pwmexport_from_dev(pwm_dev); - - return export->pwm; -} - -static ssize_t period_show(struct device *pwm_dev, - struct device_attribute *attr, - char *buf) -{ - const struct pwm_device *pwm = pwm_from_dev(pwm_dev); - struct pwm_state state; - - pwm_get_state(pwm, &state); - - return sysfs_emit(buf, "%llu\n", state.period); -} - -static ssize_t period_store(struct device *pwm_dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct pwm_export *export = pwmexport_from_dev(pwm_dev); - struct pwm_device *pwm = export->pwm; - struct pwm_state state; - u64 val; - int ret; - - ret = kstrtou64(buf, 0, &val); - if (ret) - return ret; - - mutex_lock(&export->lock); - pwm_get_state(pwm, &state); - state.period = val; - ret = pwm_apply_might_sleep(pwm, &state); - mutex_unlock(&export->lock); - - return ret ? : size; -} - -static ssize_t duty_cycle_show(struct device *pwm_dev, - struct device_attribute *attr, - char *buf) -{ - const struct pwm_device *pwm = pwm_from_dev(pwm_dev); - struct pwm_state state; - - pwm_get_state(pwm, &state); - - return sysfs_emit(buf, "%llu\n", state.duty_cycle); -} - -static ssize_t duty_cycle_store(struct device *pwm_dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct pwm_export *export = pwmexport_from_dev(pwm_dev); - struct pwm_device *pwm = export->pwm; - struct pwm_state state; - u64 val; - int ret; - - ret = kstrtou64(buf, 0, &val); - if (ret) - return ret; - - mutex_lock(&export->lock); - pwm_get_state(pwm, &state); - state.duty_cycle = val; - ret = pwm_apply_might_sleep(pwm, &state); - mutex_unlock(&export->lock); - - return ret ? : size; -} - -static ssize_t enable_show(struct device *pwm_dev, - struct device_attribute *attr, - char *buf) -{ - const struct pwm_device *pwm = pwm_from_dev(pwm_dev); - struct pwm_state state; - - pwm_get_state(pwm, &state); - - return sysfs_emit(buf, "%d\n", state.enabled); -} - -static ssize_t enable_store(struct device *pwm_dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct pwm_export *export = pwmexport_from_dev(pwm_dev); - struct pwm_device *pwm = export->pwm; - struct pwm_state state; - int val, ret; - - ret = kstrtoint(buf, 0, &val); - if (ret) - return ret; - - mutex_lock(&export->lock); - - pwm_get_state(pwm, &state); - - switch (val) { - case 0: - state.enabled = false; - break; - case 1: - state.enabled = true; - break; - default: - ret = -EINVAL; - goto unlock; - } - - ret = pwm_apply_might_sleep(pwm, &state); - -unlock: - mutex_unlock(&export->lock); - return ret ? : size; -} - -static ssize_t polarity_show(struct device *pwm_dev, - struct device_attribute *attr, - char *buf) -{ - const struct pwm_device *pwm = pwm_from_dev(pwm_dev); - const char *polarity = "unknown"; - struct pwm_state state; - - pwm_get_state(pwm, &state); - - switch (state.polarity) { - case PWM_POLARITY_NORMAL: - polarity = "normal"; - break; - - case PWM_POLARITY_INVERSED: - polarity = "inversed"; - break; - } - - return sysfs_emit(buf, "%s\n", polarity); -} - -static ssize_t polarity_store(struct device *pwm_dev, - struct device_attribute *attr, - const char *buf, size_t size) -{ - struct pwm_export *export = pwmexport_from_dev(pwm_dev); - struct pwm_device *pwm = export->pwm; - enum pwm_polarity polarity; - struct pwm_state state; - int ret; - - if (sysfs_streq(buf, "normal")) - polarity = PWM_POLARITY_NORMAL; - else if (sysfs_streq(buf, "inversed")) - polarity = PWM_POLARITY_INVERSED; - else - return -EINVAL; - - mutex_lock(&export->lock); - pwm_get_state(pwm, &state); - state.polarity = polarity; - ret = pwm_apply_might_sleep(pwm, &state); - mutex_unlock(&export->lock); - - return ret ? : size; -} - -static ssize_t capture_show(struct device *pwm_dev, - struct device_attribute *attr, - char *buf) -{ - struct pwm_device *pwm = pwm_from_dev(pwm_dev); - struct pwm_capture result; - int ret; - - ret = pwm_capture(pwm, &result, jiffies_to_msecs(HZ)); - if (ret) - return ret; - - return sysfs_emit(buf, "%u %u\n", result.period, result.duty_cycle); -} - -static DEVICE_ATTR_RW(period); -static DEVICE_ATTR_RW(duty_cycle); -static DEVICE_ATTR_RW(enable); -static DEVICE_ATTR_RW(polarity); -static DEVICE_ATTR_RO(capture); - -static struct attribute *pwm_attrs[] = { - &dev_attr_period.attr, - &dev_attr_duty_cycle.attr, - &dev_attr_enable.attr, - &dev_attr_polarity.attr, - &dev_attr_capture.attr, - NULL -}; -ATTRIBUTE_GROUPS(pwm); - -static void pwm_export_release(struct device *pwm_dev) -{ - struct pwm_export *export = pwmexport_from_dev(pwm_dev); - - kfree(export); -} - -static int pwm_export_child(struct device *pwmchip_dev, struct pwm_device *pwm) -{ - struct pwm_export *export; - char *pwm_prop[2]; - int ret; - - if (test_and_set_bit(PWMF_EXPORTED, &pwm->flags)) - return -EBUSY; - - export = kzalloc(sizeof(*export), GFP_KERNEL); - if (!export) { - clear_bit(PWMF_EXPORTED, &pwm->flags); - return -ENOMEM; - } - - export->pwm = pwm; - mutex_init(&export->lock); - - export->pwm_dev.release = pwm_export_release; - export->pwm_dev.parent = pwmchip_dev; - export->pwm_dev.devt = MKDEV(0, 0); - export->pwm_dev.groups = pwm_groups; - dev_set_name(&export->pwm_dev, "pwm%u", pwm->hwpwm); - - ret = device_register(&export->pwm_dev); - if (ret) { - clear_bit(PWMF_EXPORTED, &pwm->flags); - put_device(&export->pwm_dev); - export = NULL; - return ret; - } - pwm_prop[0] = kasprintf(GFP_KERNEL, "EXPORT=pwm%u", pwm->hwpwm); - pwm_prop[1] = NULL; - kobject_uevent_env(&pwmchip_dev->kobj, KOBJ_CHANGE, pwm_prop); - kfree(pwm_prop[0]); - - return 0; -} - -static int pwm_unexport_match(struct device *pwm_dev, void *data) -{ - return pwm_from_dev(pwm_dev) == data; -} - -static int pwm_unexport_child(struct device *pwmchip_dev, struct pwm_device *pwm) -{ - struct device *pwm_dev; - char *pwm_prop[2]; - - if (!test_and_clear_bit(PWMF_EXPORTED, &pwm->flags)) - return -ENODEV; - - pwm_dev = device_find_child(pwmchip_dev, pwm, pwm_unexport_match); - if (!pwm_dev) - return -ENODEV; - - pwm_prop[0] = kasprintf(GFP_KERNEL, "UNEXPORT=pwm%u", pwm->hwpwm); - pwm_prop[1] = NULL; - kobject_uevent_env(&pwmchip_dev->kobj, KOBJ_CHANGE, pwm_prop); - kfree(pwm_prop[0]); - - /* for device_find_child() */ - put_device(pwm_dev); - device_unregister(pwm_dev); - pwm_put(pwm); - - return 0; -} - -static ssize_t export_store(struct device *pwmchip_dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); - struct pwm_device *pwm; - unsigned int hwpwm; - int ret; - - ret = kstrtouint(buf, 0, &hwpwm); - if (ret < 0) - return ret; - - if (hwpwm >= chip->npwm) - return -ENODEV; - - pwm = pwm_request_from_chip(chip, hwpwm, "sysfs"); - if (IS_ERR(pwm)) - return PTR_ERR(pwm); - - ret = pwm_export_child(pwmchip_dev, pwm); - if (ret < 0) - pwm_put(pwm); - - return ret ? : len; -} -static DEVICE_ATTR_WO(export); - -static ssize_t unexport_store(struct device *pwmchip_dev, - struct device_attribute *attr, - const char *buf, size_t len) -{ - struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); - unsigned int hwpwm; - int ret; - - ret = kstrtouint(buf, 0, &hwpwm); - if (ret < 0) - return ret; - - if (hwpwm >= chip->npwm) - return -ENODEV; - - ret = pwm_unexport_child(pwmchip_dev, &chip->pwms[hwpwm]); - - return ret ? : len; -} -static DEVICE_ATTR_WO(unexport); - -static ssize_t npwm_show(struct device *pwmchip_dev, struct device_attribute *attr, - char *buf) -{ - const struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); - - return sysfs_emit(buf, "%u\n", chip->npwm); -} -static DEVICE_ATTR_RO(npwm); - -static struct attribute *pwm_chip_attrs[] = { - &dev_attr_export.attr, - &dev_attr_unexport.attr, - &dev_attr_npwm.attr, - NULL, -}; -ATTRIBUTE_GROUPS(pwm_chip); - -/* takes export->lock on success */ -static struct pwm_export *pwm_class_get_state(struct device *pwmchip_dev, - struct pwm_device *pwm, - struct pwm_state *state) -{ - struct device *pwm_dev; - struct pwm_export *export; - - if (!test_bit(PWMF_EXPORTED, &pwm->flags)) - return NULL; - - pwm_dev = device_find_child(pwmchip_dev, pwm, pwm_unexport_match); - if (!pwm_dev) - return NULL; - - export = pwmexport_from_dev(pwm_dev); - put_device(pwm_dev); /* for device_find_child() */ - - mutex_lock(&export->lock); - pwm_get_state(pwm, state); - - return export; -} - -static int pwm_class_apply_state(struct pwm_export *export, - struct pwm_device *pwm, - struct pwm_state *state) -{ - int ret = pwm_apply_might_sleep(pwm, state); - - /* release lock taken in pwm_class_get_state */ - mutex_unlock(&export->lock); - - return ret; -} - -static int pwm_class_resume_npwm(struct device *pwmchip_dev, unsigned int npwm) -{ - struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); - unsigned int i; - int ret = 0; - - for (i = 0; i < npwm; i++) { - struct pwm_device *pwm = &chip->pwms[i]; - struct pwm_state state; - struct pwm_export *export; - - export = pwm_class_get_state(pwmchip_dev, pwm, &state); - if (!export) - continue; - - /* If pwmchip was not enabled before suspend, do nothing. */ - if (!export->suspend.enabled) { - /* release lock taken in pwm_class_get_state */ - mutex_unlock(&export->lock); - continue; - } - - state.enabled = export->suspend.enabled; - ret = pwm_class_apply_state(export, pwm, &state); - if (ret < 0) - break; - } - - return ret; -} - -static int pwm_class_suspend(struct device *pwmchip_dev) -{ - struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); - unsigned int i; - int ret = 0; - - for (i = 0; i < chip->npwm; i++) { - struct pwm_device *pwm = &chip->pwms[i]; - struct pwm_state state; - struct pwm_export *export; - - export = pwm_class_get_state(pwmchip_dev, pwm, &state); - if (!export) - continue; - - /* - * If pwmchip was not enabled before suspend, save - * state for resume time and do nothing else. - */ - export->suspend = state; - if (!state.enabled) { - /* release lock taken in pwm_class_get_state */ - mutex_unlock(&export->lock); - continue; - } - - state.enabled = false; - ret = pwm_class_apply_state(export, pwm, &state); - if (ret < 0) { - /* - * roll back the PWM devices that were disabled by - * this suspend function. - */ - pwm_class_resume_npwm(pwmchip_dev, i); - break; - } - } - - return ret; -} - -static int pwm_class_resume(struct device *pwmchip_dev) -{ - struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); - - return pwm_class_resume_npwm(pwmchip_dev, chip->npwm); -} - -static DEFINE_SIMPLE_DEV_PM_OPS(pwm_class_pm_ops, pwm_class_suspend, pwm_class_resume); - -static struct class pwm_class = { - .name = "pwm", - .dev_groups = pwm_chip_groups, - .pm = pm_sleep_ptr(&pwm_class_pm_ops), -}; - -static int pwmchip_sysfs_match(struct device *pwmchip_dev, const void *data) -{ - return pwmchip_from_dev(pwmchip_dev) == data; -} - -void pwmchip_sysfs_export(struct pwm_chip *chip) -{ - struct device *pwmchip_dev; - - /* - * If device_create() fails the pwm_chip is still usable by - * the kernel it's just not exported. - */ - pwmchip_dev = device_create(&pwm_class, pwmchip_parent(chip), MKDEV(0, 0), chip, - "pwmchip%d", chip->id); - if (IS_ERR(pwmchip_dev)) { - dev_warn(pwmchip_parent(chip), - "device_create failed for pwm_chip sysfs export\n"); - } -} - -void pwmchip_sysfs_unexport(struct pwm_chip *chip) -{ - struct device *pwmchip_dev; - unsigned int i; - - pwmchip_dev = class_find_device(&pwm_class, NULL, chip, - pwmchip_sysfs_match); - if (!pwmchip_dev) - return; - - for (i = 0; i < chip->npwm; i++) { - struct pwm_device *pwm = &chip->pwms[i]; - - if (test_bit(PWMF_EXPORTED, &pwm->flags)) - pwm_unexport_child(pwmchip_dev, pwm); - } - - put_device(pwmchip_dev); - device_unregister(pwmchip_dev); -} - -static int __init pwm_sysfs_init(void) -{ - return class_register(&pwm_class); -} -subsys_initcall(pwm_sysfs_init); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 94a642a88817..17e45d8413ed 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -630,17 +630,4 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num) } #endif -#ifdef CONFIG_PWM_SYSFS -void pwmchip_sysfs_export(struct pwm_chip *chip); -void pwmchip_sysfs_unexport(struct pwm_chip *chip); -#else -static inline void pwmchip_sysfs_export(struct pwm_chip *chip) -{ -} - -static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip) -{ -} -#endif /* CONFIG_PWM_SYSFS */ - #endif /* __LINUX_PWM_H */ -- cgit From ee37bf50749f06b29394d7ba8a85b47f023b61e2 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sun, 17 Mar 2024 11:40:35 +0100 Subject: pwm: Ensure a struct pwm has the same lifetime as its pwm_chip MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It's required to not free the memory underlying a requested PWM while a consumer still has a reference to it. While currently a pwm_chip doesn't live long enough in all cases, linking the struct pwm to the pwm_chip results in the right lifetime as soon as the pwmchip is living long enough. This happens with the following commits. Note this is a breaking change for all pwm drivers that don't use pwmchip_alloc(). Reviewed-by: Gustavo A. R. Silva # for struct_size() and __counted_by() Link: https://lore.kernel.org/r/7e9e958841f049026c0023b309cc9deecf0ab61d.1710670958.git.u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König --- drivers/pwm/core.c | 27 ++++++++++----------------- include/linux/pwm.h | 2 +- 2 files changed, 11 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index faa0f885e057..19386452e6c8 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -987,7 +987,7 @@ static void pwmchip_sysfs_unexport(struct pwm_chip *chip) static void *pwmchip_priv(struct pwm_chip *chip) { - return (void *)chip + ALIGN(sizeof(*chip), PWMCHIP_ALIGN); + return (void *)chip + ALIGN(struct_size(chip, pwms, chip->npwm), PWMCHIP_ALIGN); } /* This is the counterpart to pwmchip_alloc() */ @@ -1001,8 +1001,10 @@ struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t { struct pwm_chip *chip; size_t alloc_size; + unsigned int i; - alloc_size = size_add(ALIGN(sizeof(*chip), PWMCHIP_ALIGN), sizeof_priv); + alloc_size = size_add(ALIGN(struct_size(chip, pwms, npwm), PWMCHIP_ALIGN), + sizeof_priv); chip = kzalloc(alloc_size, GFP_KERNEL); if (!chip) @@ -1014,6 +1016,12 @@ struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t pwmchip_set_drvdata(chip, pwmchip_priv(chip)); + for (i = 0; i < chip->npwm; i++) { + struct pwm_device *pwm = &chip->pwms[i]; + pwm->chip = chip; + pwm->hwpwm = i; + } + return chip; } EXPORT_SYMBOL_GPL(pwmchip_alloc); @@ -1085,7 +1093,6 @@ static bool pwm_ops_check(const struct pwm_chip *chip) */ int __pwmchip_add(struct pwm_chip *chip, struct module *owner) { - unsigned int i; int ret; if (!chip || !pwmchip_parent(chip) || !chip->ops || !chip->npwm) @@ -1105,28 +1112,16 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner) chip->owner = owner; - chip->pwms = kcalloc(chip->npwm, sizeof(*chip->pwms), GFP_KERNEL); - if (!chip->pwms) - return -ENOMEM; - mutex_lock(&pwm_lock); ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL); if (ret < 0) { mutex_unlock(&pwm_lock); - kfree(chip->pwms); return ret; } chip->id = ret; - for (i = 0; i < chip->npwm; i++) { - struct pwm_device *pwm = &chip->pwms[i]; - - pwm->chip = chip; - pwm->hwpwm = i; - } - mutex_unlock(&pwm_lock); if (IS_ENABLED(CONFIG_OF)) @@ -1156,8 +1151,6 @@ void pwmchip_remove(struct pwm_chip *chip) idr_remove(&pwm_chips, chip->id); mutex_unlock(&pwm_lock); - - kfree(chip->pwms); } EXPORT_SYMBOL_GPL(pwmchip_remove); diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 17e45d8413ed..78b9061572ff 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -290,7 +290,7 @@ struct pwm_chip { /* only used internally by the PWM framework */ bool uses_pwmchip_alloc; void *driver_data; - struct pwm_device *pwms; + struct pwm_device pwms[] __counted_by(npwm); }; static inline struct device *pwmchip_parent(const struct pwm_chip *chip) -- cgit From 4c56b1434b814899c42a9d9f43d8265371282cd0 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sun, 17 Mar 2024 11:40:36 +0100 Subject: pwm: Add a struct device to struct pwm_chip MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This replaces the formerly dynamically allocated struct device. This allows to additionally use it to track the lifetime of the struct pwm_chip. Otherwise the new struct device provides the same sysfs API as was provided by the dynamic device before. Link: https://lore.kernel.org/r/35c65ea7f6de789a568ff39d7b6b4ce80de4b7dc.1710670958.git.u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König --- drivers/pwm/core.c | 93 +++++++++++++++++++++++++++++------------------------ include/linux/pwm.h | 5 +-- 2 files changed, 54 insertions(+), 44 deletions(-) (limited to 'include/linux') diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 19386452e6c8..32871687c0a4 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -343,9 +343,16 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label) if (!try_module_get(chip->owner)) return -ENODEV; + if (!get_device(&chip->dev)) { + err = -ENODEV; + goto err_get_device; + } + if (ops->request) { err = ops->request(chip, pwm); if (err) { + put_device(&chip->dev); +err_get_device: module_put(chip->owner); return err; } @@ -463,7 +470,7 @@ struct pwm_export { static inline struct pwm_chip *pwmchip_from_dev(struct device *pwmchip_dev) { - return dev_get_drvdata(pwmchip_dev); + return container_of(pwmchip_dev, struct pwm_chip, dev); } static inline struct pwm_export *pwmexport_from_dev(struct device *pwm_dev) @@ -941,46 +948,16 @@ static struct class pwm_class = { .pm = pm_sleep_ptr(&pwm_class_pm_ops), }; -static int pwmchip_sysfs_match(struct device *pwmchip_dev, const void *data) -{ - return pwmchip_from_dev(pwmchip_dev) == data; -} - -static void pwmchip_sysfs_export(struct pwm_chip *chip) -{ - struct device *pwmchip_dev; - - /* - * If device_create() fails the pwm_chip is still usable by - * the kernel it's just not exported. - */ - pwmchip_dev = device_create(&pwm_class, pwmchip_parent(chip), MKDEV(0, 0), chip, - "pwmchip%d", chip->id); - if (IS_ERR(pwmchip_dev)) { - dev_warn(pwmchip_parent(chip), - "device_create failed for pwm_chip sysfs export\n"); - } -} - static void pwmchip_sysfs_unexport(struct pwm_chip *chip) { - struct device *pwmchip_dev; unsigned int i; - pwmchip_dev = class_find_device(&pwm_class, NULL, chip, - pwmchip_sysfs_match); - if (!pwmchip_dev) - return; - for (i = 0; i < chip->npwm; i++) { struct pwm_device *pwm = &chip->pwms[i]; if (test_bit(PWMF_EXPORTED, &pwm->flags)) - pwm_unexport_child(pwmchip_dev, pwm); + pwm_unexport_child(&chip->dev, pwm); } - - put_device(pwmchip_dev); - device_unregister(pwmchip_dev); } #define PWMCHIP_ALIGN ARCH_DMA_MINALIGN @@ -993,13 +970,21 @@ static void *pwmchip_priv(struct pwm_chip *chip) /* This is the counterpart to pwmchip_alloc() */ void pwmchip_put(struct pwm_chip *chip) { - kfree(chip); + put_device(&chip->dev); } EXPORT_SYMBOL_GPL(pwmchip_put); +static void pwmchip_release(struct device *pwmchip_dev) +{ + struct pwm_chip *chip = pwmchip_from_dev(pwmchip_dev); + + kfree(chip); +} + struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv) { struct pwm_chip *chip; + struct device *pwmchip_dev; size_t alloc_size; unsigned int i; @@ -1010,10 +995,15 @@ struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t if (!chip) return ERR_PTR(-ENOMEM); - chip->dev = parent; chip->npwm = npwm; chip->uses_pwmchip_alloc = true; + pwmchip_dev = &chip->dev; + device_initialize(pwmchip_dev); + pwmchip_dev->class = &pwm_class; + pwmchip_dev->parent = parent; + pwmchip_dev->release = pwmchip_release; + pwmchip_set_drvdata(chip, pwmchip_priv(chip)); for (i = 0; i < chip->npwm; i++) { @@ -1115,21 +1105,34 @@ int __pwmchip_add(struct pwm_chip *chip, struct module *owner) mutex_lock(&pwm_lock); ret = idr_alloc(&pwm_chips, chip, 0, 0, GFP_KERNEL); - if (ret < 0) { - mutex_unlock(&pwm_lock); - return ret; - } + if (ret < 0) + goto err_idr_alloc; chip->id = ret; - mutex_unlock(&pwm_lock); + dev_set_name(&chip->dev, "pwmchip%u", chip->id); if (IS_ENABLED(CONFIG_OF)) of_pwmchip_add(chip); - pwmchip_sysfs_export(chip); + ret = device_add(&chip->dev); + if (ret) + goto err_device_add; + + mutex_unlock(&pwm_lock); return 0; + +err_device_add: + if (IS_ENABLED(CONFIG_OF)) + of_pwmchip_remove(chip); + + idr_remove(&pwm_chips, chip->id); +err_idr_alloc: + + mutex_unlock(&pwm_lock); + + return ret; } EXPORT_SYMBOL_GPL(__pwmchip_add); @@ -1151,6 +1154,8 @@ void pwmchip_remove(struct pwm_chip *chip) idr_remove(&pwm_chips, chip->id); mutex_unlock(&pwm_lock); + + device_del(&chip->dev); } EXPORT_SYMBOL_GPL(pwmchip_remove); @@ -1520,6 +1525,8 @@ EXPORT_SYMBOL_GPL(pwm_get); */ void pwm_put(struct pwm_device *pwm) { + struct pwm_chip *chip = pwm->chip; + if (!pwm) return; @@ -1530,12 +1537,14 @@ void pwm_put(struct pwm_device *pwm) goto out; } - if (pwm->chip->ops->free) + if (chip->ops->free) pwm->chip->ops->free(pwm->chip, pwm); pwm->label = NULL; - module_put(pwm->chip->owner); + put_device(&chip->dev); + + module_put(chip->owner); out: mutex_unlock(&pwm_lock); } diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 78b9061572ff..495e23761f34 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -2,6 +2,7 @@ #ifndef __LINUX_PWM_H #define __LINUX_PWM_H +#include #include #include #include @@ -277,7 +278,7 @@ struct pwm_ops { * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { - struct device *dev; + struct device dev; const struct pwm_ops *ops; struct module *owner; unsigned int id; @@ -295,7 +296,7 @@ struct pwm_chip { static inline struct device *pwmchip_parent(const struct pwm_chip *chip) { - return chip->dev; + return chip->dev.parent; } static inline void *pwmchip_get_drvdata(struct pwm_chip *chip) -- cgit From 1192534407d0f8ab9a0503052777260ae74968c3 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:12 -0700 Subject: platform/x86/intel/tpmi: Align comments in kernel-doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Align comments in kernel-doc for the struct intel_tpmi_plat_info. Signed-off-by: Srinivas Pandruvada Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-4-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- include/linux/intel_tpmi.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index a3529b962be6..685a41dddf82 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -27,9 +27,9 @@ enum intel_tpmi_id { /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance - * @package_id: CPU Package id - * @bus_number: PCI bus number - * @device_number: PCI device number + * @package_id: CPU Package id + * @bus_number: PCI bus number + * @device_number: PCI device number * @function_number: PCI function number * * Structure to store platform data for a TPMI device instance. This -- cgit From c8405cc815151a8b2fa6f7510ede8256228e45da Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Apr 2024 13:46:13 -0700 Subject: platform/x86/intel/tpmi: Add additional TPMI header fields MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TPMI information header added additional fields in version 2. Some of the reserved fields in version 1 are used to define new fields. Parse new fields and export as part of platform data. These fields include: - PCI segment ID - Partition ID of the package: If a package is represented by more than one PCI device, then partition ID along with cdie_mask, describes the scope. For example to update get/set properties for a compute die, one of the PCI MMIO region is selected from the partition ID. - cdie_mask: Mask of all compute dies in this partition. Signed-off-by: Srinivas Pandruvada Reviewed-by: Andy Shevchenko Reviewed-by: Zhang Rui Reviewed-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20240423204619.3946901-5-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/intel/tpmi.c | 15 ++++++++++++++- include/linux/intel_tpmi.h | 6 ++++++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/platform/x86/intel/tpmi.c b/drivers/platform/x86/intel/tpmi.c index a5bcb77bcb35..6c0cbccd80bb 100644 --- a/drivers/platform/x86/intel/tpmi.c +++ b/drivers/platform/x86/intel/tpmi.c @@ -128,6 +128,9 @@ struct intel_tpmi_info { * @dev: PCI device number * @bus: PCI bus number * @pkg: CPU Package id + * @segment: PCI segment id + * @partition: Package Partition id + * @cdie_mask: Bitmap of compute dies in the current partition * @reserved: Reserved for future use * @lock: When set to 1 the register is locked and becomes read-only * until next reset. Not for use by the OS driver. @@ -139,7 +142,10 @@ struct tpmi_info_header { u64 dev:5; u64 bus:8; u64 pkg:8; - u64 reserved:39; + u64 segment:8; + u64 partition:2; + u64 cdie_mask:16; + u64 reserved:13; u64 lock:1; } __packed; @@ -667,6 +673,7 @@ static int tpmi_create_devices(struct intel_tpmi_info *tpmi_info) #define TPMI_INFO_BUS_INFO_OFFSET 0x08 #define TPMI_INFO_MAJOR_VERSION 0x00 +#define TPMI_INFO_MINOR_VERSION 0x02 static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, struct intel_tpmi_pm_feature *pfs) @@ -693,6 +700,12 @@ static int tpmi_process_info(struct intel_tpmi_info *tpmi_info, tpmi_info->plat_info.device_number = header.dev; tpmi_info->plat_info.function_number = header.fn; + if (TPMI_MINOR_VERSION(feature_header) >= TPMI_INFO_MINOR_VERSION) { + tpmi_info->plat_info.cdie_mask = header.cdie_mask; + tpmi_info->plat_info.partition = header.partition; + tpmi_info->plat_info.segment = header.segment; + } + error_info_header: iounmap(info_mem); diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h index 685a41dddf82..1e880cb0f454 100644 --- a/include/linux/intel_tpmi.h +++ b/include/linux/intel_tpmi.h @@ -27,7 +27,10 @@ enum intel_tpmi_id { /** * struct intel_tpmi_plat_info - Platform information for a TPMI device instance + * @cdie_mask: Mask of all compute dies in the partition * @package_id: CPU Package id + * @partition: Package partition id when multiple VSEC PCI devices per package + * @segment: PCI segment ID * @bus_number: PCI bus number * @device_number: PCI device number * @function_number: PCI function number @@ -36,7 +39,10 @@ enum intel_tpmi_id { * struct is used to return data via tpmi_get_platform_data(). */ struct intel_tpmi_plat_info { + u16 cdie_mask; u8 package_id; + u8 partition; + u8 segment; u8 bus_number; u8 device_number; u8 function_number; -- cgit From 2ff1e97587f4d398686f52c07afde3faf3da4e5c Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 19 Mar 2024 10:00:09 +0000 Subject: netfs: Replace PG_fscache by setting folio->private and marking dirty When dirty data is being written to the cache, setting/waiting on/clearing the fscache flag is always done in tandem with setting/waiting on/clearing the writeback flag. The netfslib buffered write routines wait on and set both flags and the write request cleanup clears both flags, so the fscache flag is almost superfluous. The reason it isn't superfluous is because the fscache flag is also used to indicate that data just read from the server is being written to the cache. The flag is used to prevent a race involving overlapping direct-I/O writes to the cache. Change this to indicate that a page is in need of being copied to the cache by placing a magic value in folio->private and marking the folios dirty. Then when the writeback code sees a folio marked in this way, it only writes it to the cache and not to the server. If a folio that has this magic value set is modified, the value is just replaced and the folio will then be uplodaded too. With this, PG_fscache is no longer required by the netfslib core, 9p and afs. Ceph and nfs, however, still need to use the old PG_fscache-based tracking. To deal with this, a flag, NETFS_ICTX_USE_PGPRIV2, now has to be set on the flags in the netfs_inode struct for those filesystems. This reenables the use of PG_fscache in that inode. 9p and afs use the netfslib write helpers so get switched over; cifs, for the moment, does page-by-page manual access to the cache, so doesn't use PG_fscache and is unaffected. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Matthew Wilcox (Oracle) cc: Eric Van Hensbergen cc: Latchesar Ionkov cc: Dominique Martinet cc: Christian Schoenebeck cc: Marc Dionne cc: Ilya Dryomov cc: Xiubo Li cc: Steve French cc: Paulo Alcantara cc: Ronnie Sahlberg cc: Shyam Prasad N cc: Tom Talpey cc: Bharath SM cc: Trond Myklebust cc: Anna Schumaker cc: netfs@lists.linux.dev cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: ceph-devel@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: linux-nfs@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- fs/ceph/addr.c | 2 +- fs/ceph/inode.c | 2 + fs/netfs/buffered_read.c | 36 ++++++++++++----- fs/netfs/buffered_write.c | 93 +++++++++++++++++++++----------------------- fs/netfs/fscache_io.c | 12 ++++-- fs/netfs/internal.h | 10 +++-- fs/netfs/io.c | 18 +++++---- fs/netfs/main.c | 1 + fs/netfs/misc.c | 10 +---- fs/netfs/objects.c | 6 ++- fs/nfs/fscache.h | 2 + include/linux/fscache.h | 22 +++++++---- include/linux/netfs.h | 20 ++++++++-- include/trace/events/netfs.h | 6 ++- 14 files changed, 143 insertions(+), 97 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index ee9caf7916fb..28ae4976a4f9 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -517,7 +517,7 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b struct fscache_cookie *cookie = ceph_fscache_cookie(ci); fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), - ceph_fscache_write_terminated, inode, caching); + ceph_fscache_write_terminated, inode, true, caching); } #else static inline void ceph_set_page_fscache(struct page *page) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 7b2e77517f23..99561fddcb38 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -577,6 +577,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) /* Set parameters for the netfs library */ netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false); + /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ + __set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags); spin_lock_init(&ci->i_ceph_lock); diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 3298c29b5548..6d49319c82c6 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -10,8 +10,11 @@ #include "internal.h" /* - * Unlock the folios in a read operation. We need to set PG_fscache on any + * Unlock the folios in a read operation. We need to set PG_writeback on any * folios we're going to write back before we unlock them. + * + * Note that if the deprecated NETFS_RREQ_USE_PGPRIV2 is set then we use + * PG_private_2 and do a direct write to the cache from here instead. */ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) { @@ -48,14 +51,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) xas_for_each(&xas, folio, last_page) { loff_t pg_end; bool pg_failed = false; - bool folio_started; + bool wback_to_cache = false; + bool folio_started = false; if (xas_retry(&xas, folio)) continue; pg_end = folio_pos(folio) + folio_size(folio) - 1; - folio_started = false; for (;;) { loff_t sreq_end; @@ -63,10 +66,16 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) pg_failed = true; break; } - if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { - trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); - folio_start_fscache(folio); - folio_started = true; + if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) { + if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, + &subreq->flags)) { + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); + folio_start_fscache(folio); + folio_started = true; + } + } else { + wback_to_cache |= + test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags); } pg_failed |= subreq_failed; sreq_end = subreq->start + subreq->len - 1; @@ -98,6 +107,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) kfree(finfo); } folio_mark_uptodate(folio); + if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) { + trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); + folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE); + filemap_dirty_folio(folio->mapping, folio); + } } if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { @@ -491,9 +505,11 @@ retry: netfs_put_request(rreq, false, netfs_rreq_trace_put_return); have_folio: - ret = folio_wait_fscache_killable(folio); - if (ret < 0) - goto error; + if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) { + ret = folio_wait_fscache_killable(folio); + if (ret < 0) + goto error; + } have_folio_no_wait: *_folio = folio; _leave(" = 0"); diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index f7455a579f21..57c6eab01261 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -30,21 +30,13 @@ static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq); static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) { - if (netfs_group && !folio_get_private(folio)) - folio_attach_private(folio, netfs_get_group(netfs_group)); -} + void *priv = folio_get_private(folio); -#if IS_ENABLED(CONFIG_FSCACHE) -static void netfs_folio_start_fscache(bool caching, struct folio *folio) -{ - if (caching) - folio_start_fscache(folio); -} -#else -static void netfs_folio_start_fscache(bool caching, struct folio *folio) -{ + if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE)) + folio_attach_private(folio, netfs_get_group(netfs_group)); + else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE) + folio_detach_private(folio); } -#endif /* * Decide how we should modify a folio. We might be attempting to do @@ -63,11 +55,12 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx, bool maybe_trouble) { struct netfs_folio *finfo = netfs_folio_info(folio); + struct netfs_group *group = netfs_folio_group(folio); loff_t pos = folio_file_pos(folio); _enter(""); - if (netfs_folio_group(folio) != netfs_group) + if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) return NETFS_FLUSH_CONTENT; if (folio_test_uptodate(folio)) @@ -396,9 +389,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, folio_clear_dirty_for_io(folio); /* We make multiple writes to the folio... */ if (!folio_test_writeback(folio)) { - folio_wait_fscache(folio); folio_start_writeback(folio); - folio_start_fscache(folio); if (wreq->iter.count == 0) trace_netfs_folio(folio, netfs_folio_trace_wthru); else @@ -528,6 +519,7 @@ EXPORT_SYMBOL(netfs_file_write_iter); */ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group) { + struct netfs_group *group; struct folio *folio = page_folio(vmf->page); struct file *file = vmf->vma->vm_file; struct inode *inode = file_inode(file); @@ -550,7 +542,8 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr goto out; } - if (netfs_folio_group(folio) != netfs_group) { + group = netfs_folio_group(folio); + if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) { folio_unlock(folio); err = filemap_fdatawait_range(inode->i_mapping, folio_pos(folio), @@ -606,8 +599,6 @@ static void netfs_kill_pages(struct address_space *mapping, trace_netfs_folio(folio, netfs_folio_trace_kill); folio_clear_uptodate(folio); - if (folio_test_fscache(folio)) - folio_end_fscache(folio); folio_end_writeback(folio); folio_lock(folio); generic_error_remove_folio(mapping, folio); @@ -643,8 +634,6 @@ static void netfs_redirty_pages(struct address_space *mapping, next = folio_next_index(folio); trace_netfs_folio(folio, netfs_folio_trace_redirty); filemap_dirty_folio(mapping, folio); - if (folio_test_fscache(folio)) - folio_end_fscache(folio); folio_end_writeback(folio); folio_put(folio); } while (index = next, index <= last); @@ -700,7 +689,11 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) if (!folio_test_dirty(folio)) { folio_detach_private(folio); gcount++; - trace_netfs_folio(folio, netfs_folio_trace_clear_g); + if (group == NETFS_FOLIO_COPY_TO_CACHE) + trace_netfs_folio(folio, + netfs_folio_trace_end_copy); + else + trace_netfs_folio(folio, netfs_folio_trace_clear_g); } else { trace_netfs_folio(folio, netfs_folio_trace_redirtied); } @@ -724,8 +717,6 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) trace_netfs_folio(folio, netfs_folio_trace_clear); } end_wb: - if (folio_test_fscache(folio)) - folio_end_fscache(folio); xas_advance(&xas, folio_next_index(folio) - 1); folio_end_writeback(folio); } @@ -795,7 +786,6 @@ static void netfs_extend_writeback(struct address_space *mapping, long *_count, loff_t start, loff_t max_len, - bool caching, size_t *_len, size_t *_top) { @@ -846,8 +836,7 @@ static void netfs_extend_writeback(struct address_space *mapping, break; } if (!folio_test_dirty(folio) || - folio_test_writeback(folio) || - folio_test_fscache(folio)) { + folio_test_writeback(folio)) { folio_unlock(folio); folio_put(folio); xas_reset(xas); @@ -860,7 +849,8 @@ static void netfs_extend_writeback(struct address_space *mapping, if ((const struct netfs_group *)priv != group) { stop = true; finfo = netfs_folio_info(folio); - if (finfo->netfs_group != group || + if (!finfo || + finfo->netfs_group != group || finfo->dirty_offset > 0) { folio_unlock(folio); folio_put(folio); @@ -894,12 +884,14 @@ static void netfs_extend_writeback(struct address_space *mapping, for (i = 0; i < folio_batch_count(&fbatch); i++) { folio = fbatch.folios[i]; - trace_netfs_folio(folio, netfs_folio_trace_store_plus); + if (group == NETFS_FOLIO_COPY_TO_CACHE) + trace_netfs_folio(folio, netfs_folio_trace_copy_plus); + else + trace_netfs_folio(folio, netfs_folio_trace_store_plus); if (!folio_clear_dirty_for_io(folio)) BUG(); folio_start_writeback(folio); - netfs_folio_start_fscache(caching, folio); folio_unlock(folio); } @@ -925,14 +917,14 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, struct netfs_inode *ctx = netfs_inode(mapping->host); unsigned long long i_size = i_size_read(&ctx->inode); size_t len, max_len; - bool caching = netfs_is_cache_enabled(ctx); long count = wbc->nr_to_write; int ret; - _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching); + _enter(",%lx,%llx-%llx", folio->index, start, end); wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio), - NETFS_WRITEBACK); + group == NETFS_FOLIO_COPY_TO_CACHE ? + NETFS_COPY_TO_CACHE : NETFS_WRITEBACK); if (IS_ERR(wreq)) { folio_unlock(folio); return PTR_ERR(wreq); @@ -941,7 +933,6 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, if (!folio_clear_dirty_for_io(folio)) BUG(); folio_start_writeback(folio); - netfs_folio_start_fscache(caching, folio); count -= folio_nr_pages(folio); @@ -950,7 +941,10 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, * immediately lockable, is not dirty or is missing, or we reach the * end of the range. */ - trace_netfs_folio(folio, netfs_folio_trace_store); + if (group == NETFS_FOLIO_COPY_TO_CACHE) + trace_netfs_folio(folio, netfs_folio_trace_copy); + else + trace_netfs_folio(folio, netfs_folio_trace_store); len = wreq->len; finfo = netfs_folio_info(folio); @@ -973,7 +967,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping, if (len < max_len) netfs_extend_writeback(mapping, group, xas, &count, start, - max_len, caching, &len, &wreq->upper_len); + max_len, &len, &wreq->upper_len); } cant_expand: @@ -997,15 +991,18 @@ cant_expand: iov_iter_xarray(&wreq->iter, ITER_SOURCE, &mapping->i_pages, start, wreq->upper_len); - __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); - ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback); + if (group != NETFS_FOLIO_COPY_TO_CACHE) { + __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); + ret = netfs_begin_write(wreq, true, netfs_write_trace_writeback); + } else { + ret = netfs_begin_write(wreq, true, netfs_write_trace_copy_to_cache); + } if (ret == 0 || ret == -EIOCBQUEUED) wbc->nr_to_write -= len / PAGE_SIZE; } else { _debug("write discard %zx @%llx [%llx]", len, start, i_size); /* The dirty region was entirely beyond the EOF. */ - fscache_clear_page_bits(mapping, start, len, caching); netfs_pages_written_back(wreq); ret = 0; } @@ -1058,9 +1055,11 @@ search_again: /* Skip any dirty folio that's not in the group of interest. */ priv = folio_get_private(folio); - if ((const struct netfs_group *)priv != group) { - finfo = netfs_folio_info(folio); - if (finfo->netfs_group != group) { + if ((const struct netfs_group *)priv == NETFS_FOLIO_COPY_TO_CACHE) { + group = NETFS_FOLIO_COPY_TO_CACHE; + } else if ((const struct netfs_group *)priv != group) { + finfo = __netfs_folio_info(priv); + if (!finfo || finfo->netfs_group != group) { folio_put(folio); continue; } @@ -1099,14 +1098,10 @@ lock_again: goto search_again; } - if (folio_test_writeback(folio) || - folio_test_fscache(folio)) { + if (folio_test_writeback(folio)) { folio_unlock(folio); if (wbc->sync_mode != WB_SYNC_NONE) { folio_wait_writeback(folio); -#ifdef CONFIG_FSCACHE - folio_wait_fscache(folio); -#endif goto lock_again; } @@ -1265,7 +1260,8 @@ int netfs_launder_folio(struct folio *folio) bvec_set_folio(&bvec, folio, len, offset); iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len); - __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); + if (group != NETFS_FOLIO_COPY_TO_CACHE) + __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); ret = netfs_begin_write(wreq, true, netfs_write_trace_launder); out_put: @@ -1274,7 +1270,6 @@ out_put: kfree(finfo); netfs_put_request(wreq, false, netfs_rreq_trace_put_return); out: - folio_wait_fscache(folio); _leave(" = %d", ret); return ret; } diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c index 43a651ed8264..5028f2ae30da 100644 --- a/fs/netfs/fscache_io.c +++ b/fs/netfs/fscache_io.c @@ -166,6 +166,7 @@ struct fscache_write_request { loff_t start; size_t len; bool set_bits; + bool using_pgpriv2; netfs_io_terminated_t term_func; void *term_func_priv; }; @@ -197,8 +198,9 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, { struct fscache_write_request *wreq = priv; - fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, - wreq->set_bits); + if (wreq->using_pgpriv2) + fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, + wreq->set_bits); if (wreq->term_func) wreq->term_func(wreq->term_func_priv, transferred_or_error, @@ -212,7 +214,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, loff_t start, size_t len, loff_t i_size, netfs_io_terminated_t term_func, void *term_func_priv, - bool cond) + bool using_pgpriv2, bool cond) { struct fscache_write_request *wreq; struct netfs_cache_resources *cres; @@ -230,6 +232,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, wreq->mapping = mapping; wreq->start = start; wreq->len = len; + wreq->using_pgpriv2 = using_pgpriv2; wreq->set_bits = cond; wreq->term_func = term_func; wreq->term_func_priv = term_func_priv; @@ -257,7 +260,8 @@ abandon_end: abandon_free: kfree(wreq); abandon: - fscache_clear_page_bits(mapping, start, len, cond); + if (using_pgpriv2) + fscache_clear_page_bits(mapping, start, len, cond); if (term_func) term_func(term_func_priv, ret, false); } diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index ec7045d24400..156ab138e224 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -168,7 +168,7 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) */ static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) { - if (netfs_group) + if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE) refcount_inc(&netfs_group->ref); return netfs_group; } @@ -178,7 +178,9 @@ static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_grou */ static inline void netfs_put_group(struct netfs_group *netfs_group) { - if (netfs_group && refcount_dec_and_test(&netfs_group->ref)) + if (netfs_group && + netfs_group != NETFS_FOLIO_COPY_TO_CACHE && + refcount_dec_and_test(&netfs_group->ref)) netfs_group->free(netfs_group); } @@ -187,7 +189,9 @@ static inline void netfs_put_group(struct netfs_group *netfs_group) */ static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) { - if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref)) + if (netfs_group && + netfs_group != NETFS_FOLIO_COPY_TO_CACHE && + refcount_sub_and_test(nr, &netfs_group->ref)) netfs_group->free(netfs_group); } diff --git a/fs/netfs/io.c b/fs/netfs/io.c index 4261ad6c55b6..b3b9827a9709 100644 --- a/fs/netfs/io.c +++ b/fs/netfs/io.c @@ -99,8 +99,9 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) } /* - * Deal with the completion of writing the data to the cache. We have to clear - * the PG_fscache bits on the folios involved and release the caller's ref. + * [DEPRECATED] Deal with the completion of writing the data to the cache. We + * have to clear the PG_fscache bits on the folios involved and release the + * caller's ref. * * May be called in softirq mode and we inherit a ref from the caller. */ @@ -138,7 +139,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, } static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, - bool was_async) + bool was_async) /* [DEPRECATED] */ { struct netfs_io_subrequest *subreq = priv; struct netfs_io_request *rreq = subreq->rreq; @@ -161,8 +162,8 @@ static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error, } /* - * Perform any outstanding writes to the cache. We inherit a ref from the - * caller. + * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref + * from the caller. */ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) { @@ -222,7 +223,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq) netfs_rreq_unmark_after_write(rreq, false); } -static void netfs_rreq_write_to_cache_work(struct work_struct *work) +static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */ { struct netfs_io_request *rreq = container_of(work, struct netfs_io_request, work); @@ -230,7 +231,7 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work) netfs_rreq_do_write_to_cache(rreq); } -static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) +static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */ { rreq->work.func = netfs_rreq_write_to_cache_work; if (!queue_work(system_unbound_wq, &rreq->work)) @@ -409,7 +410,8 @@ again: clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); - if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags)) + if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) && + test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) return netfs_rreq_write_to_cache(rreq); netfs_rreq_completed(rreq, was_async); diff --git a/fs/netfs/main.c b/fs/netfs/main.c index 5e77618a7940..c5a73c9ed126 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -31,6 +31,7 @@ static const char *netfs_origins[nr__netfs_io_origin] = { [NETFS_READAHEAD] = "RA", [NETFS_READPAGE] = "RP", [NETFS_READ_FOR_WRITE] = "RW", + [NETFS_COPY_TO_CACHE] = "CC", [NETFS_WRITEBACK] = "WB", [NETFS_WRITETHROUGH] = "WT", [NETFS_LAUNDER_WRITE] = "LW", diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 90051ced8e2a..bc1fc54fb724 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -177,13 +177,11 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback); */ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { - struct netfs_folio *finfo = NULL; + struct netfs_folio *finfo; size_t flen = folio_size(folio); _enter("{%lx},%zx,%zx", folio->index, offset, length); - folio_wait_fscache(folio); - if (!folio_test_private(folio)) return; @@ -248,12 +246,6 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp) if (folio_test_private(folio)) return false; - if (folio_test_fscache(folio)) { - if (current_is_kswapd() || !(gfp & __GFP_FS)) - return false; - folio_wait_fscache(folio); - } - fscache_note_page_release(netfs_i_cookie(ctx)); return true; } diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 610ceb5bd86c..72b52f070270 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -45,8 +45,12 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, refcount_set(&rreq->ref, 1); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); - if (cached) + if (cached) { __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); + if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) + /* Filesystem uses deprecated PG_private_2 marking. */ + __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags); + } if (file && file->f_flags & O_NONBLOCK) __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags); if (rreq->netfs_ops->init_request) { diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index e3cb4923316b..814363d1d7c7 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -81,6 +81,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs) static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) { netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false); + /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */ + __set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags); } extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr); extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr); diff --git a/include/linux/fscache.h b/include/linux/fscache.h index 6e8562cbcc43..9de27643607f 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -172,9 +172,12 @@ extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *); extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *); -extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *, - loff_t, size_t, loff_t, netfs_io_terminated_t, void *, - bool); +void __fscache_write_to_cache(struct fscache_cookie *cookie, + struct address_space *mapping, + loff_t start, size_t len, loff_t i_size, + netfs_io_terminated_t term_func, + void *term_func_priv, + bool using_pgpriv2, bool cond); extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t); /** @@ -597,7 +600,8 @@ static inline void fscache_clear_page_bits(struct address_space *mapping, * @i_size: The new size of the inode * @term_func: The function to call upon completion * @term_func_priv: The private data for @term_func - * @caching: If PG_fscache has been set + * @using_pgpriv2: If we're using PG_private_2 to mark in-progress write + * @caching: If we actually want to do the caching * * Helper function for a netfs to write dirty data from an inode into the cache * object that's backing it. @@ -608,19 +612,21 @@ static inline void fscache_clear_page_bits(struct address_space *mapping, * marked with PG_fscache. * * If given, @term_func will be called upon completion and supplied with - * @term_func_priv. Note that the PG_fscache flags will have been cleared by - * this point, so the netfs must retain its own pin on the mapping. + * @term_func_priv. Note that if @using_pgpriv2 is set, the PG_private_2 flags + * will have been cleared by this point, so the netfs must retain its own pin + * on the mapping. */ static inline void fscache_write_to_cache(struct fscache_cookie *cookie, struct address_space *mapping, loff_t start, size_t len, loff_t i_size, netfs_io_terminated_t term_func, void *term_func_priv, - bool caching) + bool using_pgpriv2, bool caching) { if (caching) __fscache_write_to_cache(cookie, mapping, start, len, i_size, - term_func, term_func_priv, caching); + term_func, term_func_priv, + using_pgpriv2, caching); else if (term_func) term_func(term_func_priv, -ENOBUFS, false); diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 100cbb261269..f5e9c5f84a0c 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -143,6 +143,8 @@ struct netfs_inode { #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ #define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */ +#define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark + * write to cache on read */ }; /* @@ -165,16 +167,25 @@ struct netfs_folio { unsigned int dirty_len; /* Write-streaming dirty data length */ }; #define NETFS_FOLIO_INFO 0x1UL /* OR'd with folio->private. */ +#define NETFS_FOLIO_COPY_TO_CACHE ((struct netfs_group *)0x356UL) /* Write to the cache only */ -static inline struct netfs_folio *netfs_folio_info(struct folio *folio) +static inline bool netfs_is_folio_info(const void *priv) { - void *priv = folio_get_private(folio); + return (unsigned long)priv & NETFS_FOLIO_INFO; +} - if ((unsigned long)priv & NETFS_FOLIO_INFO) +static inline struct netfs_folio *__netfs_folio_info(const void *priv) +{ + if (netfs_is_folio_info(priv)) return (struct netfs_folio *)((unsigned long)priv & ~NETFS_FOLIO_INFO); return NULL; } +static inline struct netfs_folio *netfs_folio_info(struct folio *folio) +{ + return __netfs_folio_info(folio_get_private(folio)); +} + static inline struct netfs_group *netfs_folio_group(struct folio *folio) { struct netfs_folio *finfo; @@ -230,6 +241,7 @@ enum netfs_io_origin { NETFS_READAHEAD, /* This read was triggered by readahead */ NETFS_READPAGE, /* This read is a synchronous read */ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ + NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */ NETFS_WRITEBACK, /* This write was triggered by writepages */ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */ @@ -287,6 +299,8 @@ struct netfs_io_request { #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ #define NETFS_RREQ_BLOCKED 10 /* We blocked */ +#define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark + * write to cache on read */ const struct netfs_request_ops *netfs_ops; void (*cleanup)(struct netfs_io_request *req); }; diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 447a8c21cf57..e03fafb0c1e3 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -24,6 +24,7 @@ E_(netfs_read_trace_write_begin, "WRITEBEGN") #define netfs_write_traces \ + EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \ EM(netfs_write_trace_dio_write, "DIO-WRITE") \ EM(netfs_write_trace_launder, "LAUNDER ") \ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \ @@ -34,6 +35,7 @@ EM(NETFS_READAHEAD, "RA") \ EM(NETFS_READPAGE, "RP") \ EM(NETFS_READ_FOR_WRITE, "RW") \ + EM(NETFS_COPY_TO_CACHE, "CC") \ EM(NETFS_WRITEBACK, "WB") \ EM(NETFS_WRITETHROUGH, "WT") \ EM(NETFS_LAUNDER_WRITE, "LW") \ @@ -127,7 +129,9 @@ EM(netfs_folio_trace_clear, "clear") \ EM(netfs_folio_trace_clear_s, "clear-s") \ EM(netfs_folio_trace_clear_g, "clear-g") \ - EM(netfs_folio_trace_copy_to_cache, "copy") \ + EM(netfs_folio_trace_copy, "copy") \ + EM(netfs_folio_trace_copy_plus, "copy+") \ + EM(netfs_folio_trace_copy_to_cache, "mark-copy") \ EM(netfs_folio_trace_end_copy, "end-copy") \ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \ EM(netfs_folio_trace_kill, "kill") \ -- cgit From 2e9d7e4b984a61823c41ba65e1b58b98ca9912bb Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 19 Mar 2024 11:13:26 +0000 Subject: mm: Remove the PG_fscache alias for PG_private_2 Remove the PG_fscache alias for PG_private_2 and use the latter directly. Use of this flag for marking pages undergoing writing to the cache should be considered deprecated and the folios should be marked dirty instead and the write done in ->writepages(). Note that PG_private_2 itself should be considered deprecated and up for future removal by the MM folks too. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Matthew Wilcox (Oracle) cc: Ilya Dryomov cc: Xiubo Li cc: Steve French cc: Paulo Alcantara cc: Ronnie Sahlberg cc: Shyam Prasad N cc: Tom Talpey cc: Bharath SM cc: Trond Myklebust cc: Anna Schumaker cc: netfs@lists.linux.dev cc: ceph-devel@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: linux-nfs@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- fs/ceph/addr.c | 11 ++++--- fs/netfs/buffered_read.c | 4 +-- fs/netfs/fscache_io.c | 2 +- fs/netfs/io.c | 2 +- fs/nfs/file.c | 8 ++--- fs/nfs/fscache.h | 4 +-- fs/nfs/write.c | 4 +-- fs/smb/client/file.c | 16 +++++----- include/linux/netfs.h | 80 +++--------------------------------------------- mm/filemap.c | 6 ++-- 10 files changed, 33 insertions(+), 104 deletions(-) (limited to 'include/linux') diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 28ae4976a4f9..18ddacb00511 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -500,7 +500,7 @@ const struct netfs_request_ops ceph_netfs_ops = { #ifdef CONFIG_CEPH_FSCACHE static void ceph_set_page_fscache(struct page *page) { - set_page_fscache(page); + folio_start_private_2(page_folio(page)); /* [DEPRECATED] */ } static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) @@ -800,7 +800,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc) return AOP_WRITEPAGE_ACTIVATE; } - wait_on_page_fscache(page); + folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */ err = writepage_nounlock(page, wbc); if (err == -ERESTARTSYS) { @@ -1075,7 +1075,8 @@ get_more_pages: unlock_page(page); break; } - if (PageWriteback(page) || PageFsCache(page)) { + if (PageWriteback(page) || + PagePrivate2(page) /* [DEPRECATED] */) { if (wbc->sync_mode == WB_SYNC_NONE) { doutc(cl, "%p under writeback\n", page); unlock_page(page); @@ -1083,7 +1084,7 @@ get_more_pages: } doutc(cl, "waiting on writeback %p\n", page); wait_on_page_writeback(page); - wait_on_page_fscache(page); + folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */ } if (!clear_page_dirty_for_io(page)) { @@ -1513,7 +1514,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, if (r < 0) return r; - folio_wait_fscache(folio); + folio_wait_private_2(folio); /* [DEPRECATED] */ WARN_ON_ONCE(!folio_test_locked(folio)); *pagep = &folio->page; return 0; diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 6d49319c82c6..b3fd6e1fa322 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -70,7 +70,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); - folio_start_fscache(folio); + folio_start_private_2(folio); folio_started = true; } } else { @@ -506,7 +506,7 @@ retry: have_folio: if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags)) { - ret = folio_wait_fscache_killable(folio); + ret = folio_wait_private_2_killable(folio); if (ret < 0) goto error; } diff --git a/fs/netfs/fscache_io.c b/fs/netfs/fscache_io.c index 5028f2ae30da..38637e5c9b57 100644 --- a/fs/netfs/fscache_io.c +++ b/fs/netfs/fscache_io.c @@ -183,7 +183,7 @@ void __fscache_clear_page_bits(struct address_space *mapping, rcu_read_lock(); xas_for_each(&xas, page, last) { - end_page_fscache(page); + folio_end_private_2(page_folio(page)); } rcu_read_unlock(); } diff --git a/fs/netfs/io.c b/fs/netfs/io.c index b3b9827a9709..60a19f96e0ce 100644 --- a/fs/netfs/io.c +++ b/fs/netfs/io.c @@ -129,7 +129,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq, continue; unlocked = folio_next_index(folio) - 1; trace_netfs_folio(folio, netfs_folio_trace_end_copy); - folio_end_fscache(folio); + folio_end_private_2(folio); have_unlocked = true; } } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 407c6e15afe2..6bd127e6683d 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -433,7 +433,7 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset, return; /* Cancel any unstarted writes on this page */ nfs_wb_folio_cancel(inode, folio); - folio_wait_fscache(folio); + folio_wait_private_2(folio); /* [DEPRECATED] */ trace_nfs_invalidate_folio(inode, folio); } @@ -500,7 +500,7 @@ static int nfs_launder_folio(struct folio *folio) dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n", inode->i_ino, folio_pos(folio)); - folio_wait_fscache(folio); + folio_wait_private_2(folio); /* [DEPRECATED] */ ret = nfs_wb_folio(inode, folio); trace_nfs_launder_folio_done(inode, folio, ret); return ret; @@ -593,8 +593,8 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf) sb_start_pagefault(inode->i_sb); /* make sure the cache has finished storing the page */ - if (folio_test_fscache(folio) && - folio_wait_fscache_killable(folio) < 0) { + if (folio_test_private_2(folio) && /* [DEPRECATED] */ + folio_wait_private_2_killable(folio) < 0) { ret = VM_FAULT_RETRY; goto out; } diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 814363d1d7c7..fbed0027996f 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -103,10 +103,10 @@ extern int nfs_netfs_read_folio(struct file *file, struct folio *folio); static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp) { - if (folio_test_fscache(folio)) { + if (folio_test_private_2(folio)) { /* [DEPRECATED] */ if (current_is_kswapd() || !(gfp & __GFP_FS)) return false; - folio_wait_fscache(folio); + folio_wait_private_2(folio); } fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host))); return true; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5de85d725fb9..2329cbb0e446 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -2120,10 +2120,10 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, if (folio_test_private(src)) return -EBUSY; - if (folio_test_fscache(src)) { + if (folio_test_private_2(src)) { /* [DEPRECATED] */ if (mode == MIGRATE_ASYNC) return -EBUSY; - folio_wait_fscache(src); + folio_wait_private_2(src); } return migrate_folio(mapping, dst, src, mode); diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 9be37d0fe724..b39caae652f6 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -3032,12 +3032,12 @@ lock_again: } if (folio_test_writeback(folio) || - folio_test_fscache(folio)) { + folio_test_private_2(folio)) { /* [DEPRECATED] */ folio_unlock(folio); if (wbc->sync_mode != WB_SYNC_NONE) { folio_wait_writeback(folio); #ifdef CONFIG_CIFS_FSCACHE - folio_wait_fscache(folio); + folio_wait_private_2(folio); #endif goto lock_again; } @@ -4510,8 +4510,8 @@ static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) * be modified. We then assume the entire folio will need writing back. */ #ifdef CONFIG_CIFS_FSCACHE - if (folio_test_fscache(folio) && - folio_wait_fscache_killable(folio) < 0) + if (folio_test_private_2(folio) && /* [DEPRECATED] */ + folio_wait_private_2_killable(folio) < 0) return VM_FAULT_RETRY; #endif @@ -4977,10 +4977,10 @@ static bool cifs_release_folio(struct folio *folio, gfp_t gfp) { if (folio_test_private(folio)) return 0; - if (folio_test_fscache(folio)) { + if (folio_test_private_2(folio)) { /* [DEPRECATED] */ if (current_is_kswapd() || !(gfp & __GFP_FS)) return false; - folio_wait_fscache(folio); + folio_wait_private_2(folio); } fscache_note_page_release(cifs_inode_cookie(folio->mapping->host)); return true; @@ -4989,7 +4989,7 @@ static bool cifs_release_folio(struct folio *folio, gfp_t gfp) static void cifs_invalidate_folio(struct folio *folio, size_t offset, size_t length) { - folio_wait_fscache(folio); + folio_wait_private_2(folio); /* [DEPRECATED] */ } static int cifs_launder_folio(struct folio *folio) @@ -5009,7 +5009,7 @@ static int cifs_launder_folio(struct folio *folio) if (folio_clear_dirty_for_io(folio)) rc = cifs_writepage_locked(&folio->page, &wbc); - folio_wait_fscache(folio); + folio_wait_private_2(folio); /* [DEPRECATED] */ return rc; } diff --git a/include/linux/netfs.h b/include/linux/netfs.h index f5e9c5f84a0c..f36a6d8163d1 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -21,94 +21,22 @@ enum netfs_sreq_ref_trace; -/* - * Overload PG_private_2 to give us PG_fscache - this is used to indicate that - * a page is currently backed by a local disk cache - */ -#define folio_test_fscache(folio) folio_test_private_2(folio) -#define PageFsCache(page) PagePrivate2((page)) -#define SetPageFsCache(page) SetPagePrivate2((page)) -#define ClearPageFsCache(page) ClearPagePrivate2((page)) -#define TestSetPageFsCache(page) TestSetPagePrivate2((page)) -#define TestClearPageFsCache(page) TestClearPagePrivate2((page)) - /** - * folio_start_fscache - Start an fscache write on a folio. + * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED] * @folio: The folio. * * Call this function before writing a folio to a local cache. Starting a * second write before the first one finishes is not allowed. + * + * Note that this should no longer be used. */ -static inline void folio_start_fscache(struct folio *folio) +static inline void folio_start_private_2(struct folio *folio) { VM_BUG_ON_FOLIO(folio_test_private_2(folio), folio); folio_get(folio); folio_set_private_2(folio); } -/** - * folio_end_fscache - End an fscache write on a folio. - * @folio: The folio. - * - * Call this function after the folio has been written to the local cache. - * This will wake any sleepers waiting on this folio. - */ -static inline void folio_end_fscache(struct folio *folio) -{ - folio_end_private_2(folio); -} - -/** - * folio_wait_fscache - Wait for an fscache write on this folio to end. - * @folio: The folio. - * - * If this folio is currently being written to a local cache, wait for - * the write to finish. Another write may start after this one finishes, - * unless the caller holds the folio lock. - */ -static inline void folio_wait_fscache(struct folio *folio) -{ - folio_wait_private_2(folio); -} - -/** - * folio_wait_fscache_killable - Wait for an fscache write on this folio to end. - * @folio: The folio. - * - * If this folio is currently being written to a local cache, wait - * for the write to finish or for a fatal signal to be received. - * Another write may start after this one finishes, unless the caller - * holds the folio lock. - * - * Return: - * - 0 if successful. - * - -EINTR if a fatal signal was encountered. - */ -static inline int folio_wait_fscache_killable(struct folio *folio) -{ - return folio_wait_private_2_killable(folio); -} - -static inline void set_page_fscache(struct page *page) -{ - folio_start_fscache(page_folio(page)); -} - -static inline void end_page_fscache(struct page *page) -{ - folio_end_private_2(page_folio(page)); -} - -static inline void wait_on_page_fscache(struct page *page) -{ - folio_wait_private_2(page_folio(page)); -} - -static inline int wait_on_page_fscache_killable(struct page *page) -{ - return folio_wait_private_2_killable(page_folio(page)); -} - /* Marks used on xarray-based buffers */ #define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */ #define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */ diff --git a/mm/filemap.c b/mm/filemap.c index 30de18c4fd28..9a2e28bf298a 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1540,7 +1540,7 @@ EXPORT_SYMBOL(folio_end_private_2); * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio. * @folio: The folio to wait on. * - * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio. + * Wait for PG_private_2 to be cleared on a folio. */ void folio_wait_private_2(struct folio *folio) { @@ -1553,8 +1553,8 @@ EXPORT_SYMBOL(folio_wait_private_2); * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio. * @folio: The folio to wait on. * - * Wait for PG_private_2 (aka PG_fscache) to be cleared on a folio or until a - * fatal signal is received by the calling task. + * Wait for PG_private_2 to be cleared on a folio or until a fatal signal is + * received by the calling task. * * Return: * - 0 if successful. -- cgit From 93bf1cc0096fa1e02244078db3334ca7fa1d88c1 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 18 Mar 2024 17:15:57 +0000 Subject: netfs: Make netfs_io_request::subreq_counter an atomic_t Make the netfs_io_request::subreq_counter, used to generate values for netfs_io_subrequest::debug_index, into an atomic_t so that it can be called from the retry thread at the same time as the app thread issuing writes. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org --- fs/netfs/output.c | 2 +- include/linux/netfs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/output.c b/fs/netfs/output.c index 625eb68f3e5a..fbdbb4f78234 100644 --- a/fs/netfs/output.c +++ b/fs/netfs/output.c @@ -37,7 +37,7 @@ struct netfs_io_subrequest *netfs_create_write_request(struct netfs_io_request * subreq->source = dest; subreq->start = start; subreq->len = len; - subreq->debug_index = wreq->subreq_counter++; + subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); switch (subreq->source) { case NETFS_UPLOAD_TO_SERVER: diff --git a/include/linux/netfs.h b/include/linux/netfs.h index f36a6d8163d1..ddafc6ebff42 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -202,7 +202,7 @@ struct netfs_io_request { unsigned int debug_id; unsigned int rsize; /* Maximum read size (0 for none) */ unsigned int wsize; /* Maximum write size (0 for none) */ - unsigned int subreq_counter; /* Next subreq->debug_index */ + atomic_t subreq_counter; /* Next subreq->debug_index */ atomic_t nr_outstanding; /* Number of ops in progress */ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ size_t submitted; /* Amount submitted for I/O so far */ -- cgit From 02eae0bb9538dc7dcb5a6bc2c3066bd6ca682969 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Thu, 25 Apr 2024 14:57:51 +0200 Subject: iio: core: Add iio_read_acpi_mount_matrix() helper function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ACPI "ROTM" rotation matrix parsing code atm is already duplicated between bmc150-accel-core.c and kxcjk-1013.c and a third user of this is coming. Add an iio_read_acpi_mount_matrix() helper function for this. The 2 existing copies of the code are identical, except that the kxcjk-1013.c has slightly better error logging. To new helper is a 1:1 copy of the kxcjk-1013.c version, the only change is the addition of a "char *acpi_method" parameter since some bmc150 dual-accel setups (360° hinges with 1 accel in kbd/base + 1 in display) declare both accels in a single ACPI device with 2 different method names for the 2 matrices. This new acpi_method parameter is not "const char *" because the pathname parameter to acpi_evaluate_object() is not const. The 2 existing copies of this function will be removed in further patches in this series. Acked-by: Rafael J. Wysocki Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20240425125754.76010-2-hdegoede@redhat.com Signed-off-by: Jonathan Cameron --- drivers/iio/Makefile | 1 + drivers/iio/industrialio-acpi.c | 85 +++++++++++++++++++++++++++++++++++++++++ include/linux/iio/iio.h | 13 +++++++ 3 files changed, 99 insertions(+) create mode 100644 drivers/iio/industrialio-acpi.c (limited to 'include/linux') diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index 0ba0e1521ba4..cb80ef837e84 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -7,6 +7,7 @@ obj-$(CONFIG_IIO) += industrialio.o industrialio-y := industrialio-core.o industrialio-event.o inkern.o industrialio-$(CONFIG_IIO_BUFFER) += industrialio-buffer.o industrialio-$(CONFIG_IIO_TRIGGER) += industrialio-trigger.o +industrialio-$(CONFIG_ACPI) += industrialio-acpi.o obj-$(CONFIG_IIO_CONFIGFS) += industrialio-configfs.o obj-$(CONFIG_IIO_GTS_HELPER) += industrialio-gts-helper.o diff --git a/drivers/iio/industrialio-acpi.c b/drivers/iio/industrialio-acpi.c new file mode 100644 index 000000000000..981b75d40780 --- /dev/null +++ b/drivers/iio/industrialio-acpi.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* IIO ACPI helper functions */ + +#include +#include +#include +#include + +/** + * iio_read_acpi_mount_matrix() - Read accelerometer mount matrix info from ACPI + * @dev: Device structure + * @orientation: iio_mount_matrix struct to fill + * @acpi_method: ACPI method name to read the matrix from, usually "ROTM" + * + * Try to read the mount-matrix by calling the specified method on the device's + * ACPI firmware-node. If the device has no ACPI firmware-node; or the method + * does not exist then this will fail silently. This expects the method to + * return data in the ACPI "ROTM" format defined by Microsoft: + * https://learn.microsoft.com/en-us/windows-hardware/drivers/sensors/sensors-acpi-entries + * This is a Microsoft extension and not part of the official ACPI spec. + * The method name is configurable because some dual-accel setups define 2 mount + * matrices in a single ACPI device using separate "ROMK" and "ROMS" methods. + * + * Returns: true if the matrix was successfully, false otherwise. + */ +bool iio_read_acpi_mount_matrix(struct device *dev, + struct iio_mount_matrix *orientation, + char *acpi_method) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_device *adev = ACPI_COMPANION(dev); + char *str; + union acpi_object *obj, *elements; + acpi_status status; + int i, j, val[3]; + bool ret = false; + + if (!adev || !acpi_has_method(adev->handle, acpi_method)) + return false; + + status = acpi_evaluate_object(adev->handle, acpi_method, NULL, &buffer); + if (ACPI_FAILURE(status)) { + dev_err(dev, "Failed to get ACPI mount matrix: %d\n", status); + return false; + } + + obj = buffer.pointer; + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) { + dev_err(dev, "Unknown ACPI mount matrix package format\n"); + goto out_free_buffer; + } + + elements = obj->package.elements; + for (i = 0; i < 3; i++) { + if (elements[i].type != ACPI_TYPE_STRING) { + dev_err(dev, "Unknown ACPI mount matrix element format\n"); + goto out_free_buffer; + } + + str = elements[i].string.pointer; + if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3) { + dev_err(dev, "Incorrect ACPI mount matrix string format\n"); + goto out_free_buffer; + } + + for (j = 0; j < 3; j++) { + switch (val[j]) { + case -1: str = "-1"; break; + case 0: str = "0"; break; + case 1: str = "1"; break; + default: + dev_err(dev, "Invalid value in ACPI mount matrix: %d\n", val[j]); + goto out_free_buffer; + } + orientation->rotation[i * 3 + j] = str; + } + } + + ret = true; + +out_free_buffer: + kfree(buffer.pointer); + return ret; +} +EXPORT_SYMBOL_GPL(iio_read_acpi_mount_matrix); diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index e370a7bb3300..55e2b22086a1 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -788,6 +788,19 @@ static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) } #endif +#ifdef CONFIG_ACPI +bool iio_read_acpi_mount_matrix(struct device *dev, + struct iio_mount_matrix *orientation, + char *acpi_method); +#else +static inline bool iio_read_acpi_mount_matrix(struct device *dev, + struct iio_mount_matrix *orientation, + char *acpi_method) +{ + return false; +} +#endif + ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, -- cgit From 09415814cd1d0b90b898f81d6ad6a1c0a2e22d32 Mon Sep 17 00:00:00 2001 From: Nuno Sa Date: Fri, 26 Apr 2024 17:42:10 +0200 Subject: iio: backend: change docs padding Using tabs and maintaining the start of the docs aligned is a pain and may lead to lot's of unrelated changes when adding new members. Hence, let#s change things now and just have a simple space after the member name. Signed-off-by: Nuno Sa Link: https://lore.kernel.org/r/20240426-ad9467-new-features-v2-1-6361fc3ba1cc@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-backend.c | 76 +++++++++++++++++++------------------- include/linux/iio/backend.h | 38 +++++++++---------- 2 files changed, 57 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index f08ed6d70ae5..c27243e88462 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -115,8 +115,8 @@ static DEFINE_MUTEX(iio_back_lock); /** * iio_backend_chan_enable - Enable a backend channel - * @back: Backend device - * @chan: Channel number + * @back: Backend device + * @chan: Channel number * * RETURNS: * 0 on success, negative error number on failure. @@ -129,8 +129,8 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND); /** * iio_backend_chan_disable - Disable a backend channel - * @back: Backend device - * @chan: Channel number + * @back: Backend device + * @chan: Channel number * * RETURNS: * 0 on success, negative error number on failure. @@ -148,8 +148,8 @@ static void __iio_backend_disable(void *back) /** * devm_iio_backend_enable - Device managed backend enable - * @dev: Consumer device for the backend - * @back: Backend device + * @dev: Consumer device for the backend + * @back: Backend device * * RETURNS: * 0 on success, negative error number on failure. @@ -168,9 +168,9 @@ EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND); /** * iio_backend_data_format_set - Configure the channel data format - * @back: Backend device - * @chan: Channel number - * @data: Data format + * @back: Backend device + * @chan: Channel number + * @data: Data format * * Properly configure a channel with respect to the expected data format. A * @struct iio_backend_data_fmt must be passed with the settings. @@ -190,9 +190,9 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND); /** * iio_backend_data_source_set - Select data source - * @back: Backend device - * @chan: Channel number - * @data: Data source + * @back: Backend device + * @chan: Channel number + * @data: Data source * * A given backend may have different sources to stream/sync data. This allows * to choose that source. @@ -212,9 +212,9 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, IIO_BACKEND); /** * iio_backend_set_sampling_freq - Set channel sampling rate - * @back: Backend device - * @chan: Channel number - * @sample_rate_hz: Sample rate + * @back: Backend device + * @chan: Channel number + * @sample_rate_hz: Sample rate * * RETURNS: * 0 on success, negative error number on failure. @@ -235,9 +235,9 @@ static void iio_backend_free_buffer(void *arg) /** * devm_iio_backend_request_buffer - Device managed buffer request - * @dev: Consumer device for the backend - * @back: Backend device - * @indio_dev: IIO device + * @dev: Consumer device for the backend + * @back: Backend device + * @indio_dev: IIO device * * Request an IIO buffer from the backend. The type of the buffer (typically * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because, @@ -300,10 +300,10 @@ static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device /** * iio_backend_ext_info_get - IIO ext_info read callback - * @indio_dev: IIO device - * @private: Data private to the driver - * @chan: IIO channel - * @buf: Buffer where to place the attribute data + * @indio_dev: IIO device + * @private: Data private to the driver + * @chan: IIO channel + * @buf: Buffer where to place the attribute data * * This helper is intended to be used by backends that extend an IIO channel * (through iio_backend_extend_chan_spec()) with extended info. In that case, @@ -335,11 +335,11 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, IIO_BACKEND); /** * iio_backend_ext_info_set - IIO ext_info write callback - * @indio_dev: IIO device - * @private: Data private to the driver - * @chan: IIO channel - * @buf: Buffer holding the sysfs attribute - * @len: Buffer length + * @indio_dev: IIO device + * @private: Data private to the driver + * @chan: IIO channel + * @buf: Buffer holding the sysfs attribute + * @len: Buffer length * * This helper is intended to be used by backends that extend an IIO channel * (trough iio_backend_extend_chan_spec()) with extended info. In that case, @@ -365,9 +365,9 @@ EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND); /** * iio_backend_extend_chan_spec - Extend an IIO channel - * @indio_dev: IIO device - * @back: Backend device - * @chan: IIO channel + * @indio_dev: IIO device + * @back: Backend device + * @chan: IIO channel * * Some backends may have their own functionalities and hence capable of * extending a frontend's channel. @@ -449,8 +449,8 @@ static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) /** * devm_iio_backend_get - Device managed backend device get - * @dev: Consumer device for the backend - * @name: Backend name + * @dev: Consumer device for the backend + * @name: Backend name * * Get's the backend associated with @dev. * @@ -501,8 +501,8 @@ EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND); /** * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get - * @dev: Consumer device for the backend - * @fwnode: Firmware node of the backend device + * @dev: Consumer device for the backend + * @fwnode: Firmware node of the backend device * * Search the backend list for a device matching @fwnode. * This API should not be used and it's only present for preventing the first @@ -536,7 +536,7 @@ EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND); /** * iio_backend_get_priv - Get driver private data - * @back: Backend device + * @back: Backend device */ void *iio_backend_get_priv(const struct iio_backend *back) { @@ -554,9 +554,9 @@ static void iio_backend_unregister(void *arg) /** * devm_iio_backend_register - Device managed backend device register - * @dev: Backend device being registered - * @ops: Backend ops - * @priv: Device private data + * @dev: Backend device being registered + * @ops: Backend ops + * @priv: Device private data * * @ops is mandatory. Not providing it results in -EINVAL. * diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index 9d144631134d..e3e62f65db14 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -24,9 +24,9 @@ enum iio_backend_data_source { /** * IIO_BACKEND_EX_INFO - Helper for an IIO extended channel attribute - * @_name: Attribute name - * @_shared: Whether the attribute is shared between all channels - * @_what: Data private to the driver + * @_name: Attribute name + * @_shared: Whether the attribute is shared between all channels + * @_what: Data private to the driver */ #define IIO_BACKEND_EX_INFO(_name, _shared, _what) { \ .name = (_name), \ @@ -38,10 +38,10 @@ enum iio_backend_data_source { /** * struct iio_backend_data_fmt - Backend data format - * @type: Data type. - * @sign_extend: Bool to tell if the data is sign extended. - * @enable: Enable/Disable the data format module. If disabled, - * not formatting will happen. + * @type: Data type. + * @sign_extend: Bool to tell if the data is sign extended. + * @enable: Enable/Disable the data format module. If disabled, + * not formatting will happen. */ struct iio_backend_data_fmt { enum iio_backend_data_type type; @@ -51,18 +51,18 @@ struct iio_backend_data_fmt { /** * struct iio_backend_ops - operations structure for an iio_backend - * @enable: Enable backend. - * @disable: Disable backend. - * @chan_enable: Enable one channel. - * @chan_disable: Disable one channel. - * @data_format_set: Configure the data format for a specific channel. - * @data_source_set: Configure the data source for a specific channel. - * @set_sample_rate: Configure the sampling rate for a specific channel. - * @request_buffer: Request an IIO buffer. - * @free_buffer: Free an IIO buffer. - * @extend_chan_spec: Extend an IIO channel. - * @ext_info_set: Extended info setter. - * @ext_info_get: Extended info getter. + * @enable: Enable backend. + * @disable: Disable backend. + * @chan_enable: Enable one channel. + * @chan_disable: Disable one channel. + * @data_format_set: Configure the data format for a specific channel. + * @data_source_set: Configure the data source for a specific channel. + * @set_sample_rate: Configure the sampling rate for a specific channel. + * @request_buffer: Request an IIO buffer. + * @free_buffer: Free an IIO buffer. + * @extend_chan_spec: Extend an IIO channel. + * @ext_info_set: Extended info setter. + * @ext_info_get: Extended info getter. **/ struct iio_backend_ops { int (*enable)(struct iio_backend *back); -- cgit From c66eabcc1ca64dbf20d0758ce210a85fa83f4b21 Mon Sep 17 00:00:00 2001 From: Nuno Sa Date: Fri, 26 Apr 2024 17:42:11 +0200 Subject: iio: backend: add API for interface tuning This is in preparation for supporting interface tuning in one for the devices using the axi-adc backend. The new added interfaces are all needed for that calibration: * iio_backend_test_pattern_set(); * iio_backend_chan_status(); * iio_backend_iodelay_set(); * iio_backend_data_sample_trigger(). Interface tuning is the process of going through a set of known points (typically by the frontend), change some clk or data delays (or both) and send/receive some known signal (so called test patterns in this change). The receiving end (either frontend or the backend) is responsible for validating the signal and see if it's good or not. The goal for all of this is to come up with ideal delays at the data interface level so we can have a proper, more reliable data transfer. Also note that for some devices we can change the sampling rate (which typically means changing some reference clock) and that can affect the data interface. In that case, it's import to run the tuning algorithm again as the values we had before may no longer be the best (or even valid) ones. Signed-off-by: Nuno Sa Link: https://lore.kernel.org/r/20240426-ad9467-new-features-v2-2-6361fc3ba1cc@analog.com Signed-off-by: Jonathan Cameron --- drivers/iio/industrialio-backend.c | 86 ++++++++++++++++++++++++++++++++++++++ include/linux/iio/backend.h | 36 ++++++++++++++++ 2 files changed, 122 insertions(+) (limited to 'include/linux') diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index c27243e88462..929aff4040ed 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -226,6 +226,92 @@ int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, } EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, IIO_BACKEND); +/** + * iio_backend_test_pattern_set - Configure a test pattern + * @back: Backend device + * @chan: Channel number + * @pattern: Test pattern + * + * Configure a test pattern on the backend. This is typically used for + * calibrating the timings on the data digital interface. + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_test_pattern_set(struct iio_backend *back, + unsigned int chan, + enum iio_backend_test_pattern pattern) +{ + if (pattern >= IIO_BACKEND_TEST_PATTERN_MAX) + return -EINVAL; + + return iio_backend_op_call(back, test_pattern_set, chan, pattern); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_test_pattern_set, IIO_BACKEND); + +/** + * iio_backend_chan_status - Get the channel status + * @back: Backend device + * @chan: Channel number + * @error: Error indication + * + * Get the current state of the backend channel. Typically used to check if + * there were any errors sending/receiving data. + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_chan_status(struct iio_backend *back, unsigned int chan, + bool *error) +{ + return iio_backend_op_call(back, chan_status, chan, error); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_chan_status, IIO_BACKEND); + +/** + * iio_backend_iodelay_set - Set digital I/O delay + * @back: Backend device + * @lane: Lane number + * @taps: Number of taps + * + * Controls delays on sending/receiving data. One usecase for this is to + * calibrate the data digital interface so we get the best results when + * transferring data. Note that @taps has no unit since the actual delay per tap + * is very backend specific. Hence, frontend devices typically should go through + * an array of @taps (the size of that array should typically match the size of + * calibration points on the frontend device) and call this API. + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane, + unsigned int taps) +{ + return iio_backend_op_call(back, iodelay_set, lane, taps); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_iodelay_set, IIO_BACKEND); + +/** + * iio_backend_data_sample_trigger - Control when to sample data + * @back: Backend device + * @trigger: Data trigger + * + * Mostly useful for input backends. Configures the backend for when to sample + * data (eg: rising vs falling edge). + * + * RETURNS: + * 0 on success, negative error number on failure. + */ +int iio_backend_data_sample_trigger(struct iio_backend *back, + enum iio_backend_sample_trigger trigger) +{ + if (trigger >= IIO_BACKEND_SAMPLE_TRIGGER_MAX) + return -EINVAL; + + return iio_backend_op_call(back, data_sample_trigger, trigger); +} +EXPORT_SYMBOL_NS_GPL(iio_backend_data_sample_trigger, IIO_BACKEND); + static void iio_backend_free_buffer(void *arg) { struct iio_backend_buffer_pair *pair = arg; diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index e3e62f65db14..8099759d7242 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -49,6 +49,20 @@ struct iio_backend_data_fmt { bool enable; }; +/* vendor specific from 32 */ +enum iio_backend_test_pattern { + IIO_BACKEND_NO_TEST_PATTERN, + /* modified prbs9 */ + IIO_BACKEND_ADI_PRBS_9A = 32, + IIO_BACKEND_TEST_PATTERN_MAX +}; + +enum iio_backend_sample_trigger { + IIO_BACKEND_SAMPLE_TRIGGER_EDGE_FALLING, + IIO_BACKEND_SAMPLE_TRIGGER_EDGE_RISING, + IIO_BACKEND_SAMPLE_TRIGGER_MAX +}; + /** * struct iio_backend_ops - operations structure for an iio_backend * @enable: Enable backend. @@ -58,6 +72,10 @@ struct iio_backend_data_fmt { * @data_format_set: Configure the data format for a specific channel. * @data_source_set: Configure the data source for a specific channel. * @set_sample_rate: Configure the sampling rate for a specific channel. + * @test_pattern_set: Configure a test pattern. + * @chan_status: Get the channel status. + * @iodelay_set: Set digital I/O delay. + * @data_sample_trigger: Control when to sample data. * @request_buffer: Request an IIO buffer. * @free_buffer: Free an IIO buffer. * @extend_chan_spec: Extend an IIO channel. @@ -75,6 +93,15 @@ struct iio_backend_ops { enum iio_backend_data_source data); int (*set_sample_rate)(struct iio_backend *back, unsigned int chan, u64 sample_rate_hz); + int (*test_pattern_set)(struct iio_backend *back, + unsigned int chan, + enum iio_backend_test_pattern pattern); + int (*chan_status)(struct iio_backend *back, unsigned int chan, + bool *error); + int (*iodelay_set)(struct iio_backend *back, unsigned int chan, + unsigned int taps); + int (*data_sample_trigger)(struct iio_backend *back, + enum iio_backend_sample_trigger trigger); struct iio_buffer *(*request_buffer)(struct iio_backend *back, struct iio_dev *indio_dev); void (*free_buffer)(struct iio_backend *back, @@ -97,6 +124,15 @@ int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, enum iio_backend_data_source data); int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, u64 sample_rate_hz); +int iio_backend_test_pattern_set(struct iio_backend *back, + unsigned int chan, + enum iio_backend_test_pattern pattern); +int iio_backend_chan_status(struct iio_backend *back, unsigned int chan, + bool *error); +int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane, + unsigned int taps); +int iio_backend_data_sample_trigger(struct iio_backend *back, + enum iio_backend_sample_trigger trigger); int devm_iio_backend_request_buffer(struct device *dev, struct iio_backend *back, struct iio_dev *indio_dev); -- cgit From cfd3bfe9507b4aa39f7e86772e60b50b799e490e Mon Sep 17 00:00:00 2001 From: Dmitrii Bundin Date: Sat, 20 Apr 2024 07:24:57 +0300 Subject: bpf: Include linux/types.h for u32 Inclusion of the header linux/btf_ids.h relies on indirect inclusion of the header linux/types.h. Including it directly on the top level helps to avoid potential problems if linux/types.h hasn't been included before. The main motivation to introduce this it is to avoid similar problems that have shown up in the bpftool where GNU libc indirectly pulls linux/types.h causing compile error of the form: error: unknown type name 'u32' u32 cnt; ^~~ The bpftool compile error was fixed in 62248b22d01e ("tools/resolve_btfids: fix build with musl libc"). Signed-off-by: Dmitrii Bundin Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20240420042457.3198883-1-dmitrii.bundin.a@gmail.com --- include/linux/btf_ids.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index e24aabfe8ecc..c0e3e1426a82 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -3,6 +3,8 @@ #ifndef _LINUX_BTF_IDS_H #define _LINUX_BTF_IDS_H +#include /* for u32 */ + struct btf_id_set { u32 cnt; u32 ids[]; -- cgit From 5cd9fac3a369edd072e8e72a2b5f5abae57c97ed Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Sun, 28 Apr 2024 15:51:12 +0100 Subject: net: sfp-bus: constify link_modes to sfp_select_interface() sfp_select_interface() does not modify its link_modes argument, so make this a const pointer. Signed-off-by: Russell King (Oracle) Reviewed-by: Daniel Machon Link: https://lore.kernel.org/r/E1s15s0-00AHyq-8E@rmk-PC.armlinux.org.uk Signed-off-by: Paolo Abeni --- drivers/net/phy/sfp-bus.c | 2 +- include/linux/sfp.h | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index c6e3baf00f23..37c85f1e6534 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c @@ -355,7 +355,7 @@ EXPORT_SYMBOL_GPL(sfp_parse_support); * modes mask. */ phy_interface_t sfp_select_interface(struct sfp_bus *bus, - unsigned long *link_modes) + const unsigned long *link_modes) { if (phylink_test(link_modes, 25000baseCR_Full) || phylink_test(link_modes, 25000baseKR_Full) || diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 55c0ab17c9e2..5ebc57f78c95 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -554,7 +554,7 @@ bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id); void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id, unsigned long *support, unsigned long *interfaces); phy_interface_t sfp_select_interface(struct sfp_bus *bus, - unsigned long *link_modes); + const unsigned long *link_modes); int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo); int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, @@ -593,7 +593,7 @@ static inline void sfp_parse_support(struct sfp_bus *bus, } static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus, - unsigned long *link_modes) + const unsigned long *link_modes) { return PHY_INTERFACE_MODE_NA; } -- cgit From faba7db431294e0684d08a51b1f04cda75473d93 Mon Sep 17 00:00:00 2001 From: Olivia Wen Date: Tue, 30 Apr 2024 09:15:34 +0800 Subject: remoteproc: mediatek: Add IMGSYS IPI command Add an IPI command definition for communication with IMGSYS through SCP mailbox. Signed-off-by: Olivia Wen Reviewed-by: AngeloGioacchino Del Regno Link: https://lore.kernel.org/r/20240430011534.9587-5-olivia.wen@mediatek.com Signed-off-by: Mathieu Poirier --- include/linux/remoteproc/mtk_scp.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h index 7c2b7cc9fe6c..344ff41c22c7 100644 --- a/include/linux/remoteproc/mtk_scp.h +++ b/include/linux/remoteproc/mtk_scp.h @@ -43,6 +43,7 @@ enum scp_ipi_id { SCP_IPI_CROS_HOST_CMD, SCP_IPI_VDEC_LAT, SCP_IPI_VDEC_CORE, + SCP_IPI_IMGSYS_CMD, SCP_IPI_NS_SERVICE = 0xFF, SCP_IPI_MAX = 0x100, }; -- cgit From 38ae7142e35165571123997addd71393a9dabde5 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Sun, 17 Mar 2024 11:40:37 +0100 Subject: pwm: Make pwmchip_[sg]et_drvdata() a wrapper around dev_set_drvdata() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that a pwm_chip has a dedicated struct device, pwmchip_set_drvdata() and pwmchip_get_drvdata() can be made thin wrappers around dev_set_drvdata() and dev_get_drvdata() respectively and the previously needed pointer can be dropped from struct pwm_chip. Link: https://lore.kernel.org/r/a5e05bd2d83421a26fdef6a87d69253c0f98becf.1710670958.git.u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König --- include/linux/pwm.h | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 495e23761f34..60b92c2c75ef 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -274,7 +274,6 @@ struct pwm_ops { * @of_xlate: request a PWM device given a device tree PWM specifier * @atomic: can the driver's ->apply() be called in atomic context * @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip - * @driver_data: Private pointer for driver specific info * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { @@ -290,7 +289,6 @@ struct pwm_chip { /* only used internally by the PWM framework */ bool uses_pwmchip_alloc; - void *driver_data; struct pwm_device pwms[] __counted_by(npwm); }; @@ -301,20 +299,12 @@ static inline struct device *pwmchip_parent(const struct pwm_chip *chip) static inline void *pwmchip_get_drvdata(struct pwm_chip *chip) { - /* - * After pwm_chip got a dedicated struct device, this can be replaced by - * dev_get_drvdata(&chip->dev); - */ - return chip->driver_data; + return dev_get_drvdata(&chip->dev); } static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data) { - /* - * After pwm_chip got a dedicated struct device, this can be replaced by - * dev_set_drvdata(&chip->dev, data); - */ - chip->driver_data = data; + dev_set_drvdata(&chip->dev, data); } #if IS_ENABLED(CONFIG_PWM) -- cgit From 26f812ba75890c48644a31e3cfe3dd9762138968 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 29 Apr 2024 12:43:41 -0700 Subject: kunit/fortify: Add memcpy() tests Add fortify tests for memcpy() and memmove(). This can use a similar method to the fortify_panic() replacement, only we can do it for what was the WARN_ONCE(), which can be redefined. Since this is primarily testing the fortify behaviors of the memcpy() and memmove() defenses, the tests for memcpy() and memmove() are identical. Link: https://lore.kernel.org/r/20240429194342.2421639-3-keescook@chromium.org Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 6 ++- lib/fortify_kunit.c | 85 ++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 87 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index 6aeebe0a6777..a0bb13825109 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -15,10 +15,14 @@ #define FORTIFY_REASON(func, write) (FIELD_PREP(BIT(0), write) | \ FIELD_PREP(GENMASK(7, 1), func)) +/* Overridden by KUnit tests. */ #ifndef fortify_panic # define fortify_panic(func, write, avail, size, retfail) \ __fortify_panic(FORTIFY_REASON(func, write), avail, size) #endif +#ifndef fortify_warn_once +# define fortify_warn_once(x...) WARN_ONCE(x) +#endif #define FORTIFY_READ 0 #define FORTIFY_WRITE 1 @@ -609,7 +613,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size, const size_t __q_size = (q_size); \ const size_t __p_size_field = (p_size_field); \ const size_t __q_size_field = (q_size_field); \ - WARN_ONCE(fortify_memcpy_chk(__fortify_size, __p_size, \ + fortify_warn_once(fortify_memcpy_chk(__fortify_size, __p_size, \ __q_size, __p_size_field, \ __q_size_field, FORTIFY_FUNC_ ##op), \ #op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \ diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index 601fa327c5b7..ef3e4c68b759 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(), - * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). + * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy() + * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c). * * For corner cases with UBSAN, try testing with: * @@ -18,8 +18,10 @@ /* We don't need to fill dmesg with the fortify WARNs during testing. */ #ifdef DEBUG # define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x) +# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x) #else # define FORTIFY_REPORT_KUNIT(x...) do { } while (0) +# define FORTIFY_WARN_KUNIT(x...) do { } while (0) #endif /* Redefine fortify_panic() to track failures. */ @@ -30,6 +32,14 @@ void fortify_add_kunit_error(int write); return (retfail); \ } while (0) +/* Redefine fortify_warn_once() to track memcpy() failures. */ +#define fortify_warn_once(chk_func, x...) do { \ + bool __result = chk_func; \ + FORTIFY_WARN_KUNIT(__result, x); \ + if (__result) \ + fortify_add_kunit_error(1); \ +} while (0) + #include #include #include @@ -818,6 +828,74 @@ static void fortify_test_strlcat(struct kunit *test) KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); } +/* Check for 0-sized arrays... */ +struct fortify_zero_sized { + unsigned long bytes_before; + char buf[0]; + unsigned long bytes_after; +}; + +#define __fortify_test(memfunc) \ +static void fortify_test_##memfunc(struct kunit *test) \ +{ \ + struct fortify_zero_sized zero = { }; \ + struct fortify_padding pad = { }; \ + char srcA[sizeof(pad.buf) + 2]; \ + char srcB[sizeof(pad.buf) + 2]; \ + size_t len = sizeof(pad.buf) + unconst; \ + \ + memset(srcA, 'A', sizeof(srcA)); \ + KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \ + memset(srcB, 'B', sizeof(srcB)); \ + KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \ + \ + memfunc(pad.buf, srcA, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf + 1, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len - 1); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len); \ + KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \ + KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + memfunc(pad.buf, srcA, len + 1); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \ + memfunc(pad.buf + 1, srcB, len); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \ + \ + /* Reset error counter. */ \ + fortify_write_overflows = 0; \ + /* Copy nothing into nothing: no errors. */ \ + memfunc(zero.buf, srcB, 0 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ + /* We currently explicitly ignore zero-sized dests. */ \ + memfunc(zero.buf, srcB, 1 + unconst); \ + KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \ + KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \ +} +__fortify_test(memcpy) +__fortify_test(memmove) + static void fortify_test_memscan(struct kunit *test) { char haystack[] = "Where oh where is my memory range?"; @@ -977,7 +1055,8 @@ static struct kunit_case fortify_test_cases[] = { KUNIT_CASE(fortify_test_strncat), KUNIT_CASE(fortify_test_strlcat), /* skip memset: performs bounds checking on whole structs */ - /* skip memcpy: still using warn-and-overwrite instead of hard-fail */ + KUNIT_CASE(fortify_test_memcpy), + KUNIT_CASE(fortify_test_memmove), KUNIT_CASE(fortify_test_memscan), KUNIT_CASE(fortify_test_memchr), KUNIT_CASE(fortify_test_memchr_inv), -- cgit From 0e081a0ec04df58ac9caec1916ae6d9d048d50a7 Mon Sep 17 00:00:00 2001 From: Sangyun Kim Date: Wed, 17 Apr 2024 12:50:43 +0900 Subject: cxl/cxl-event: include missing and The linux/cxl-event.h header file uses the u8, u16, and uuid_t types, but it doesn't include the necessary header files, and . Currently, cxl-event.h is only used by drivers/cxl/cxlmem.h, and it doesn't cause any errors because cxlmem.h indirectly includes the required types. However, cxl-event.h may be used by other CXL-related code in the future, so it's important to fix this issue by including the missing header files directly in cxl-event.h. Signed-off-by: Sangyun Kim Reviewed-by: Ira Weiny Link: https://lore.kernel.org/r/20240417035043.2791431-1-sangyun.kim@snu.ac.kr Signed-off-by: Dave Jiang --- include/linux/cxl-event.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'include/linux') diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h index 03fa6d50d46f..812ed16ffc2f 100644 --- a/include/linux/cxl-event.h +++ b/include/linux/cxl-event.h @@ -3,6 +3,9 @@ #ifndef _LINUX_CXL_EVENT_H #define _LINUX_CXL_EVENT_H +#include +#include + /* * Common Event Record Format * CXL rev 3.0 section 8.2.9.2.1; Table 8-42 -- cgit From 575024a8aa7cf1dff49b94092f774ed1c90586be Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Sun, 28 Apr 2024 17:24:26 +0800 Subject: powercap: intel_rapl: Introduce APIs for PMU support Introduce two new APIs rapl_package_add_pmu()/rapl_package_remove_pmu(). RAPL driver can invoke these APIs to expose its supported energy counters via perf PMU. The new RAPL PMU is fully compatible with current MSR RAPL PMU, including using the same PMU name and events name/id/unit/scale, etc. For example, use below command perf stat -e power/energy-pkg/ -e power/energy-ram/ FOO to get the energy consumption if power/energy-pkg/ and power/energy-ram/ events are available in the "perf list" output. This does not introduce any conflict because TPMI RAPL is the only user of these APIs currently, and it never co-exists with MSR RAPL. Note that RAPL Packages can be probed/removed dynamically, and the events supported by each TPMI RAPL device can be different. Thus the RAPL PMU support is done on demand, which means 1. PMU is registered only if it is needed by a RAPL Package. PMU events for unsupported counters are not exposed. 2. PMU is unregistered and registered when a new RAPL Package is probed and supports new counters that are not supported by current PMU. For example, on a dual-package system using TPMI RAPL, it is possible that Package 1 behaves as TPMI domain root and supports Psys domain. In this case, register PMU without Psys event when probing Package 0, and re-register the PMU with Psys event when probing Package 1. 3. PMU is unregistered when all registered RAPL Packages don't need PMU. Signed-off-by: Zhang Rui Signed-off-by: Rafael J. Wysocki --- drivers/powercap/intel_rapl_common.c | 582 +++++++++++++++++++++++++++++++++++ include/linux/intel_rapl.h | 32 ++ 2 files changed, 614 insertions(+) (limited to 'include/linux') diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index c4302caeb631..aac0744011a3 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -15,6 +15,8 @@ #include #include #include +#include +#include #include #include #include @@ -1507,6 +1509,586 @@ static int rapl_detect_domains(struct rapl_package *rp) return 0; } +#ifdef CONFIG_PERF_EVENTS + +/* + * Support for RAPL PMU + * + * Register a PMU if any of the registered RAPL Packages have the requirement + * of exposing its energy counters via Perf PMU. + * + * PMU Name: + * power + * + * Events: + * Name Event id RAPL Domain + * energy_cores 0x01 RAPL_DOMAIN_PP0 + * energy_pkg 0x02 RAPL_DOMAIN_PACKAGE + * energy_ram 0x03 RAPL_DOMAIN_DRAM + * energy_gpu 0x04 RAPL_DOMAIN_PP1 + * energy_psys 0x05 RAPL_DOMAIN_PLATFORM + * + * Unit: + * Joules + * + * Scale: + * 2.3283064365386962890625e-10 + * The same RAPL domain in different RAPL Packages may have different + * energy units. Use 2.3283064365386962890625e-10 (2^-32) Joules as + * the fixed unit for all energy counters, and covert each hardware + * counter increase to N times of PMU event counter increases. + * + * This is fully compatible with the current MSR RAPL PMU. This means that + * userspace programs like turbostat can use the same code to handle RAPL Perf + * PMU, no matter what RAPL Interface driver (MSR/TPMI, etc) is running + * underlying on the platform. + * + * Note that RAPL Packages can be probed/removed dynamically, and the events + * supported by each TPMI RAPL device can be different. Thus the RAPL PMU + * support is done on demand, which means + * 1. PMU is registered only if it is needed by a RAPL Package. PMU events for + * unsupported counters are not exposed. + * 2. PMU is unregistered and registered when a new RAPL Package is probed and + * supports new counters that are not supported by current PMU. + * 3. PMU is unregistered when all registered RAPL Packages don't need PMU. + */ + +struct rapl_pmu { + struct pmu pmu; /* Perf PMU structure */ + u64 timer_ms; /* Maximum expiration time to avoid counter overflow */ + unsigned long domain_map; /* Events supported by current registered PMU */ + bool registered; /* Whether the PMU has been registered or not */ +}; + +static struct rapl_pmu rapl_pmu; + +/* PMU helpers */ + +static int get_pmu_cpu(struct rapl_package *rp) +{ + int cpu; + + if (!rp->has_pmu) + return nr_cpu_ids; + + /* Only TPMI RAPL is supported for now */ + if (rp->priv->type != RAPL_IF_TPMI) + return nr_cpu_ids; + + /* TPMI RAPL uses any CPU in the package for PMU */ + for_each_online_cpu(cpu) + if (topology_physical_package_id(cpu) == rp->id) + return cpu; + + return nr_cpu_ids; +} + +static bool is_rp_pmu_cpu(struct rapl_package *rp, int cpu) +{ + if (!rp->has_pmu) + return false; + + /* Only TPMI RAPL is supported for now */ + if (rp->priv->type != RAPL_IF_TPMI) + return false; + + /* TPMI RAPL uses any CPU in the package for PMU */ + return topology_physical_package_id(cpu) == rp->id; +} + +static struct rapl_package_pmu_data *event_to_pmu_data(struct perf_event *event) +{ + struct rapl_package *rp = event->pmu_private; + + return &rp->pmu_data; +} + +/* PMU event callbacks */ + +static u64 event_read_counter(struct perf_event *event) +{ + struct rapl_package *rp = event->pmu_private; + u64 val; + int ret; + + /* Return 0 for unsupported events */ + if (event->hw.idx < 0) + return 0; + + ret = rapl_read_data_raw(&rp->domains[event->hw.idx], ENERGY_COUNTER, false, &val); + + /* Return 0 for failed read */ + if (ret) + return 0; + + return val; +} + +static void __rapl_pmu_event_start(struct perf_event *event) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + + list_add_tail(&event->active_entry, &data->active_list); + + local64_set(&event->hw.prev_count, event_read_counter(event)); + if (++data->n_active == 1) + hrtimer_start(&data->hrtimer, data->timer_interval, + HRTIMER_MODE_REL_PINNED); +} + +static void rapl_pmu_event_start(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + __rapl_pmu_event_start(event); + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static u64 rapl_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + u64 prev_raw_count, new_raw_count; + s64 delta, sdelta; + + /* + * Follow the generic code to drain hwc->prev_count. + * The loop is not expected to run for multiple times. + */ + prev_raw_count = local64_read(&hwc->prev_count); + do { + new_raw_count = event_read_counter(event); + } while (!local64_try_cmpxchg(&hwc->prev_count, + &prev_raw_count, new_raw_count)); + + + /* + * Now we have the new raw value and have updated the prev + * timestamp already. We can now calculate the elapsed delta + * (event-)time and add that to the generic event. + */ + delta = new_raw_count - prev_raw_count; + + /* + * Scale delta to smallest unit (2^-32) + * users must then scale back: count * 1/(1e9*2^32) to get Joules + * or use ldexp(count, -32). + * Watts = Joules/Time delta + */ + sdelta = delta * data->scale[event->hw.flags]; + + local64_add(sdelta, &event->count); + + return new_raw_count; +} + +static void rapl_pmu_event_stop(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + + /* Mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { + WARN_ON_ONCE(data->n_active <= 0); + if (--data->n_active == 0) + hrtimer_cancel(&data->hrtimer); + + list_del(&event->active_entry); + + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + } + + /* Check if update of sw counter is necessary */ + if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + rapl_event_update(event); + hwc->state |= PERF_HES_UPTODATE; + } + + raw_spin_unlock_irqrestore(&data->lock, flags); +} + +static int rapl_pmu_event_add(struct perf_event *event, int mode) +{ + struct rapl_package_pmu_data *data = event_to_pmu_data(event); + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + + raw_spin_lock_irqsave(&data->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) + __rapl_pmu_event_start(event); + + raw_spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static void rapl_pmu_event_del(struct perf_event *event, int flags) +{ + rapl_pmu_event_stop(event, PERF_EF_UPDATE); +} + +/* RAPL PMU event ids, same as shown in sysfs */ +enum perf_rapl_events { + PERF_RAPL_PP0 = 1, /* all cores */ + PERF_RAPL_PKG, /* entire package */ + PERF_RAPL_RAM, /* DRAM */ + PERF_RAPL_PP1, /* gpu */ + PERF_RAPL_PSYS, /* psys */ + PERF_RAPL_MAX +}; +#define RAPL_EVENT_MASK GENMASK(7, 0) + +static const int event_to_domain[PERF_RAPL_MAX] = { + [PERF_RAPL_PP0] = RAPL_DOMAIN_PP0, + [PERF_RAPL_PKG] = RAPL_DOMAIN_PACKAGE, + [PERF_RAPL_RAM] = RAPL_DOMAIN_DRAM, + [PERF_RAPL_PP1] = RAPL_DOMAIN_PP1, + [PERF_RAPL_PSYS] = RAPL_DOMAIN_PLATFORM, +}; + +static int rapl_pmu_event_init(struct perf_event *event) +{ + struct rapl_package *pos, *rp = NULL; + u64 cfg = event->attr.config & RAPL_EVENT_MASK; + int domain, idx; + + /* Only look at RAPL events */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* Check for supported events only */ + if (!cfg || cfg >= PERF_RAPL_MAX) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + + /* Find out which Package the event belongs to */ + list_for_each_entry(pos, &rapl_packages, plist) { + if (is_rp_pmu_cpu(pos, event->cpu)) { + rp = pos; + break; + } + } + if (!rp) + return -ENODEV; + + /* Find out which RAPL Domain the event belongs to */ + domain = event_to_domain[cfg]; + + event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + event->pmu_private = rp; /* Which package */ + event->hw.flags = domain; /* Which domain */ + + event->hw.idx = -1; + /* Find out the index in rp->domains[] to get domain pointer */ + for (idx = 0; idx < rp->nr_domains; idx++) { + if (rp->domains[idx].id == domain) { + event->hw.idx = idx; + break; + } + } + + return 0; +} + +static void rapl_pmu_event_read(struct perf_event *event) +{ + rapl_event_update(event); +} + +static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer) +{ + struct rapl_package_pmu_data *data = + container_of(hrtimer, struct rapl_package_pmu_data, hrtimer); + struct perf_event *event; + unsigned long flags; + + if (!data->n_active) + return HRTIMER_NORESTART; + + raw_spin_lock_irqsave(&data->lock, flags); + + list_for_each_entry(event, &data->active_list, active_entry) + rapl_event_update(event); + + raw_spin_unlock_irqrestore(&data->lock, flags); + + hrtimer_forward_now(hrtimer, data->timer_interval); + + return HRTIMER_RESTART; +} + +/* PMU sysfs attributes */ + +/* + * There are no default events, but we need to create "events" group (with + * empty attrs) before updating it with detected events. + */ +static struct attribute *attrs_empty[] = { + NULL, +}; + +static struct attribute_group pmu_events_group = { + .name = "events", + .attrs = attrs_empty, +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rapl_package *rp; + cpumask_var_t cpu_mask; + int cpu; + int ret; + + if (!alloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; + + cpus_read_lock(); + + cpumask_clear(cpu_mask); + + /* Choose a cpu for each RAPL Package */ + list_for_each_entry(rp, &rapl_packages, plist) { + cpu = get_pmu_cpu(rp); + if (cpu < nr_cpu_ids) + cpumask_set_cpu(cpu, cpu_mask); + } + cpus_read_unlock(); + + ret = cpumap_print_to_pagebuf(true, buf, cpu_mask); + + free_cpumask_var(cpu_mask); + + return ret; +} + +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group pmu_cpumask_group = { + .attrs = pmu_cpumask_attrs, +}; + +PMU_FORMAT_ATTR(event, "config:0-7"); +static struct attribute *pmu_format_attr[] = { + &format_attr_event.attr, + NULL +}; + +static struct attribute_group pmu_format_group = { + .name = "format", + .attrs = pmu_format_attr, +}; + +static const struct attribute_group *pmu_attr_groups[] = { + &pmu_events_group, + &pmu_cpumask_group, + &pmu_format_group, + NULL +}; + +#define RAPL_EVENT_ATTR_STR(_name, v, str) \ +static struct perf_pmu_events_attr event_attr_##v = { \ + .attr = __ATTR(_name, 0444, perf_event_sysfs_show, NULL), \ + .event_str = str, \ +} + +RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); +RAPL_EVENT_ATTR_STR(energy-pkg, rapl_pkg, "event=0x02"); +RAPL_EVENT_ATTR_STR(energy-ram, rapl_ram, "event=0x03"); +RAPL_EVENT_ATTR_STR(energy-gpu, rapl_gpu, "event=0x04"); +RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05"); + +RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_unit_cores, "Joules"); +RAPL_EVENT_ATTR_STR(energy-pkg.unit, rapl_unit_pkg, "Joules"); +RAPL_EVENT_ATTR_STR(energy-ram.unit, rapl_unit_ram, "Joules"); +RAPL_EVENT_ATTR_STR(energy-gpu.unit, rapl_unit_gpu, "Joules"); +RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_unit_psys, "Joules"); + +RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_scale_cores, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_scale_pkg, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_scale_ram, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_scale_gpu, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_scale_psys, "2.3283064365386962890625e-10"); + +#define RAPL_EVENT_GROUP(_name, domain) \ +static struct attribute *pmu_attr_##_name[] = { \ + &event_attr_rapl_##_name.attr.attr, \ + &event_attr_rapl_unit_##_name.attr.attr, \ + &event_attr_rapl_scale_##_name.attr.attr, \ + NULL \ +}; \ +static umode_t is_visible_##_name(struct kobject *kobj, struct attribute *attr, int event) \ +{ \ + return rapl_pmu.domain_map & BIT(domain) ? attr->mode : 0; \ +} \ +static struct attribute_group pmu_group_##_name = { \ + .name = "events", \ + .attrs = pmu_attr_##_name, \ + .is_visible = is_visible_##_name, \ +} + +RAPL_EVENT_GROUP(cores, RAPL_DOMAIN_PP0); +RAPL_EVENT_GROUP(pkg, RAPL_DOMAIN_PACKAGE); +RAPL_EVENT_GROUP(ram, RAPL_DOMAIN_DRAM); +RAPL_EVENT_GROUP(gpu, RAPL_DOMAIN_PP1); +RAPL_EVENT_GROUP(psys, RAPL_DOMAIN_PLATFORM); + +static const struct attribute_group *pmu_attr_update[] = { + &pmu_group_cores, + &pmu_group_pkg, + &pmu_group_ram, + &pmu_group_gpu, + &pmu_group_psys, + NULL +}; + +static int rapl_pmu_update(struct rapl_package *rp) +{ + int ret = 0; + + /* Return if PMU already covers all events supported by current RAPL Package */ + if (rapl_pmu.registered && !(rp->domain_map & (~rapl_pmu.domain_map))) + goto end; + + /* Unregister previous registered PMU */ + if (rapl_pmu.registered) + perf_pmu_unregister(&rapl_pmu.pmu); + + rapl_pmu.registered = false; + rapl_pmu.domain_map |= rp->domain_map; + + memset(&rapl_pmu.pmu, 0, sizeof(struct pmu)); + rapl_pmu.pmu.attr_groups = pmu_attr_groups; + rapl_pmu.pmu.attr_update = pmu_attr_update; + rapl_pmu.pmu.task_ctx_nr = perf_invalid_context; + rapl_pmu.pmu.event_init = rapl_pmu_event_init; + rapl_pmu.pmu.add = rapl_pmu_event_add; + rapl_pmu.pmu.del = rapl_pmu_event_del; + rapl_pmu.pmu.start = rapl_pmu_event_start; + rapl_pmu.pmu.stop = rapl_pmu_event_stop; + rapl_pmu.pmu.read = rapl_pmu_event_read; + rapl_pmu.pmu.module = THIS_MODULE; + rapl_pmu.pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT; + ret = perf_pmu_register(&rapl_pmu.pmu, "power", -1); + if (ret) { + pr_info("Failed to register PMU\n"); + return ret; + } + + rapl_pmu.registered = true; +end: + rp->has_pmu = true; + return ret; +} + +int rapl_package_add_pmu(struct rapl_package *rp) +{ + struct rapl_package_pmu_data *data = &rp->pmu_data; + int idx; + + if (rp->has_pmu) + return -EEXIST; + + guard(cpus_read_lock)(); + + for (idx = 0; idx < rp->nr_domains; idx++) { + struct rapl_domain *rd = &rp->domains[idx]; + int domain = rd->id; + u64 val; + + if (!test_bit(domain, &rp->domain_map)) + continue; + + /* + * The RAPL PMU granularity is 2^-32 Joules + * data->scale[]: times of 2^-32 Joules for each ENERGY COUNTER increase + */ + val = rd->energy_unit * (1ULL << 32); + do_div(val, ENERGY_UNIT_SCALE * 1000000); + data->scale[domain] = val; + + if (!rapl_pmu.timer_ms) { + struct rapl_primitive_info *rpi = get_rpi(rp, ENERGY_COUNTER); + + /* + * Calculate the timer rate: + * Use reference of 200W for scaling the timeout to avoid counter + * overflows. + * + * max_count = rpi->mask >> rpi->shift + 1 + * max_energy_pj = max_count * rd->energy_unit + * max_time_sec = (max_energy_pj / 1000000000) / 200w + * + * rapl_pmu.timer_ms = max_time_sec * 1000 / 2 + */ + val = (rpi->mask >> rpi->shift) + 1; + val *= rd->energy_unit; + do_div(val, 1000000 * 200 * 2); + rapl_pmu.timer_ms = val; + + pr_debug("%llu ms overflow timer\n", rapl_pmu.timer_ms); + } + + pr_debug("Domain %s: hw unit %lld * 2^-32 Joules\n", rd->name, data->scale[domain]); + } + + /* Initialize per package PMU data */ + raw_spin_lock_init(&data->lock); + INIT_LIST_HEAD(&data->active_list); + data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms); + hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + data->hrtimer.function = rapl_hrtimer_handle; + + return rapl_pmu_update(rp); +} +EXPORT_SYMBOL_GPL(rapl_package_add_pmu); + +void rapl_package_remove_pmu(struct rapl_package *rp) +{ + struct rapl_package *pos; + + if (!rp->has_pmu) + return; + + guard(cpus_read_lock)(); + + list_for_each_entry(pos, &rapl_packages, plist) { + /* PMU is still needed */ + if (pos->has_pmu && pos != rp) + return; + } + + perf_pmu_unregister(&rapl_pmu.pmu); + memset(&rapl_pmu, 0, sizeof(struct rapl_pmu)); +} +EXPORT_SYMBOL_GPL(rapl_package_remove_pmu); +#endif + /* called from CPU hotplug notifier, hotplug lock held */ void rapl_remove_package_cpuslocked(struct rapl_package *rp) { diff --git a/include/linux/intel_rapl.h b/include/linux/intel_rapl.h index f3196f82fd8a..c0397423d3a8 100644 --- a/include/linux/intel_rapl.h +++ b/include/linux/intel_rapl.h @@ -158,6 +158,26 @@ struct rapl_if_priv { void *rpi; }; +#ifdef CONFIG_PERF_EVENTS +/** + * struct rapl_package_pmu_data: Per package data for PMU support + * @scale: Scale of 2^-32 Joules for each energy counter increase. + * @lock: Lock to protect n_active and active_list. + * @n_active: Number of active events. + * @active_list: List of active events. + * @timer_interval: Maximum timer expiration time before counter overflow. + * @hrtimer: Periodically update the counter to prevent overflow. + */ +struct rapl_package_pmu_data { + u64 scale[RAPL_DOMAIN_MAX]; + raw_spinlock_t lock; + int n_active; + struct list_head active_list; + ktime_t timer_interval; + struct hrtimer hrtimer; +}; +#endif + /* maximum rapl package domain name: package-%d-die-%d */ #define PACKAGE_DOMAIN_NAME_LENGTH 30 @@ -176,6 +196,10 @@ struct rapl_package { struct cpumask cpumask; char name[PACKAGE_DOMAIN_NAME_LENGTH]; struct rapl_if_priv *priv; +#ifdef CONFIG_PERF_EVENTS + bool has_pmu; + struct rapl_package_pmu_data pmu_data; +#endif }; struct rapl_package *rapl_find_package_domain_cpuslocked(int id, struct rapl_if_priv *priv, @@ -188,4 +212,12 @@ struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu); void rapl_remove_package(struct rapl_package *rp); +#ifdef CONFIG_PERF_EVENTS +int rapl_package_add_pmu(struct rapl_package *rp); +void rapl_package_remove_pmu(struct rapl_package *rp); +#else +static inline int rapl_package_add_pmu(struct rapl_package *rp) { return 0; } +static inline void rapl_package_remove_pmu(struct rapl_package *rp) { } +#endif + #endif /* __INTEL_RAPL_H__ */ -- cgit From 6aec00139d3a83e2394d4bcb0084e872b4036e8f Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Tue, 30 Apr 2024 10:28:06 -0700 Subject: cxl/core: Add region info to cxl_general_media and cxl_dram events User space may need to know which region, if any, maps the DPAs (device physical addresses) reported in a cxl_general_media or cxl_dram event. Since the mapping can change, the kernel provides this information at the time the event occurs. This informs user space that at event this mapped this to this . Add the same region info that is included in the cxl_poison trace event: the DPA->HPA translation, region name, and region uuid. The new fields are inserted in the trace event and no existing fields are modified. If the DPA is not mapped, user will see: hpa=ULLONG_MAX, region="", and uuid=0 This work must be protected by dpa_rwsem & region_rwsem since it is looking up region mappings. Signed-off-by: Alison Schofield Reviewed-by: Dan Williams Reviewed-by: Ira Weiny Reviewed-by: Jonathan Cameron Link: https://lore.kernel.org/r/dd8d708b7a7ebfb64a27020a5eb338091336b34d.1714496730.git.alison.schofield@intel.com Signed-off-by: Dave Jiang --- drivers/cxl/core/mbox.c | 36 ++++++++++++++++++++++++++++++------ drivers/cxl/core/trace.h | 44 +++++++++++++++++++++++++++++++++++--------- include/linux/cxl-event.h | 10 ++++++++++ 3 files changed, 75 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 65185c9fa001..97e938657773 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -842,14 +842,38 @@ void cxl_event_trace_record(const struct cxl_memdev *cxlmd, enum cxl_event_type event_type, const uuid_t *uuid, union cxl_event *evt) { - if (event_type == CXL_CPER_EVENT_GEN_MEDIA) - trace_cxl_general_media(cxlmd, type, &evt->gen_media); - else if (event_type == CXL_CPER_EVENT_DRAM) - trace_cxl_dram(cxlmd, type, &evt->dram); - else if (event_type == CXL_CPER_EVENT_MEM_MODULE) + if (event_type == CXL_CPER_EVENT_MEM_MODULE) { trace_cxl_memory_module(cxlmd, type, &evt->mem_module); - else + return; + } + if (event_type == CXL_CPER_EVENT_GENERIC) { trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic); + return; + } + + if (trace_cxl_general_media_enabled() || trace_cxl_dram_enabled()) { + u64 dpa, hpa = ULLONG_MAX; + struct cxl_region *cxlr; + + /* + * These trace points are annotated with HPA and region + * translations. Take topology mutation locks and lookup + * { HPA, REGION } from { DPA, MEMDEV } in the event record. + */ + guard(rwsem_read)(&cxl_region_rwsem); + guard(rwsem_read)(&cxl_dpa_rwsem); + + dpa = le64_to_cpu(evt->common.phys_addr) & CXL_DPA_MASK; + cxlr = cxl_dpa_to_region(cxlmd, dpa); + if (cxlr) + hpa = cxl_trace_hpa(cxlr, cxlmd, dpa); + + if (event_type == CXL_CPER_EVENT_GEN_MEDIA) + trace_cxl_general_media(cxlmd, type, cxlr, hpa, + &evt->gen_media); + else if (event_type == CXL_CPER_EVENT_DRAM) + trace_cxl_dram(cxlmd, type, cxlr, hpa, &evt->dram); + } } EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL); diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h index e303e618aa05..07a0394b1d99 100644 --- a/drivers/cxl/core/trace.h +++ b/drivers/cxl/core/trace.h @@ -316,9 +316,9 @@ TRACE_EVENT(cxl_generic_event, TRACE_EVENT(cxl_general_media, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_gen_media *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_gen_media *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -330,10 +330,13 @@ TRACE_EVENT(cxl_general_media, __field(u8, channel) __field(u32, device) __array(u8, comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE) - __field(u16, validity_flags) /* Following are out of order to pack trace record */ + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) + __field(u16, validity_flags) __field(u8, rank) __field(u8, dpa_flags) + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -354,18 +357,28 @@ TRACE_EVENT(cxl_general_media, memcpy(__entry->comp_id, &rec->component_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE); __entry->validity_flags = get_unaligned_le16(&rec->validity_flags); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name, dev_name(&cxlr->dev)); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name, ""); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' " \ "descriptor='%s' type='%s' transaction_type='%s' channel=%u rank=%u " \ - "device=%x comp_id=%s validity_flags='%s'", + "device=%x comp_id=%s validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), show_trans_type(__entry->transaction_type), __entry->channel, __entry->rank, __entry->device, __print_hex(__entry->comp_id, CXL_EVENT_GEN_MED_COMP_ID_SIZE), - show_valid_flags(__entry->validity_flags) + show_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); @@ -400,9 +413,9 @@ TRACE_EVENT(cxl_general_media, TRACE_EVENT(cxl_dram, TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log, - struct cxl_event_dram *rec), + struct cxl_region *cxlr, u64 hpa, struct cxl_event_dram *rec), - TP_ARGS(cxlmd, log, rec), + TP_ARGS(cxlmd, log, cxlr, hpa, rec), TP_STRUCT__entry( CXL_EVT_TP_entry @@ -417,10 +430,13 @@ TRACE_EVENT(cxl_dram, __field(u32, nibble_mask) __field(u32, row) __array(u8, cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE) + __field(u64, hpa) + __field_struct(uuid_t, region_uuid) __field(u8, rank) /* Out of order to pack trace record */ __field(u8, bank_group) /* Out of order to pack trace record */ __field(u8, bank) /* Out of order to pack trace record */ __field(u8, dpa_flags) /* Out of order to pack trace record */ + __string(region_name, cxlr ? dev_name(&cxlr->dev) : "") ), TP_fast_assign( @@ -444,12 +460,21 @@ TRACE_EVENT(cxl_dram, __entry->column = get_unaligned_le16(rec->column); memcpy(__entry->cor_mask, &rec->correction_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE); + __entry->hpa = hpa; + if (cxlr) { + __assign_str(region_name, dev_name(&cxlr->dev)); + uuid_copy(&__entry->region_uuid, &cxlr->params.uuid); + } else { + __assign_str(region_name, ""); + uuid_copy(&__entry->region_uuid, &uuid_null); + } ), CXL_EVT_TP_printk("dpa=%llx dpa_flags='%s' descriptor='%s' type='%s' " \ "transaction_type='%s' channel=%u rank=%u nibble_mask=%x " \ "bank_group=%u bank=%u row=%u column=%u cor_mask=%s " \ - "validity_flags='%s'", + "validity_flags='%s' " \ + "hpa=%llx region=%s region_uuid=%pUb", __entry->dpa, show_dpa_flags(__entry->dpa_flags), show_event_desc_flags(__entry->descriptor), show_mem_event_type(__entry->type), @@ -458,7 +483,8 @@ TRACE_EVENT(cxl_dram, __entry->bank_group, __entry->bank, __entry->row, __entry->column, __print_hex(__entry->cor_mask, CXL_EVENT_DER_CORRECTION_MASK_SIZE), - show_dram_valid_flags(__entry->validity_flags) + show_dram_valid_flags(__entry->validity_flags), + __entry->hpa, __get_str(region_name), &__entry->region_uuid ) ); diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h index 03fa6d50d46f..5342755777cc 100644 --- a/include/linux/cxl-event.h +++ b/include/linux/cxl-event.h @@ -91,11 +91,21 @@ struct cxl_event_mem_module { u8 reserved[0x3d]; } __packed; +/* + * General Media or DRAM Event Common Fields + * - provides common access to phys_addr + */ +struct cxl_event_common { + struct cxl_event_record_hdr hdr; + __le64 phys_addr; +} __packed; + union cxl_event { struct cxl_event_generic generic; struct cxl_event_gen_media gen_media; struct cxl_event_dram dram; struct cxl_event_mem_module mem_module; + struct cxl_event_common common; } __packed; /* -- cgit From dfac21b1eeefe8ae472e3fee511cfdcf7d03bb4b Mon Sep 17 00:00:00 2001 From: Kuppuswamy Sathyanarayanan Date: Sun, 28 Apr 2024 21:04:41 -0700 Subject: ACPI: Move acpi_blacklisted() declaration to asm/acpi.h The function acpi_blacklisted() is defined only when CONFIG_X86 is enabled and is only used by X86 arch code. To align with its usage and definition conditions, move its declaration to asm/acpi.h Signed-off-by: Kuppuswamy Sathyanarayanan Reviewed-by: Andy Shevchenko [ rjw: Added empty code line in a header file ] Signed-off-by: Rafael J. Wysocki --- arch/x86/include/asm/acpi.h | 2 ++ include/linux/acpi.h | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index f896eed4516c..5af926c050f0 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -56,6 +56,8 @@ static inline void disable_acpi(void) extern int acpi_gsi_to_irq(u32 gsi, unsigned int *irq); +extern int acpi_blacklisted(void); + static inline void acpi_noirq_set(void) { acpi_noirq = 1; } static inline void acpi_disable_pci(void) { diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 34829f2c517a..2009f7ea5963 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -421,7 +421,7 @@ extern char *wmi_get_acpi_device_uid(const char *guid); extern char acpi_video_backlight_string[]; extern long acpi_is_video_device(acpi_handle handle); -extern int acpi_blacklisted(void); + extern void acpi_osi_setup(char *str); extern bool acpi_osi_is_win8(void); -- cgit From 05d6d492097c55f2d153fc3fd33cbe78e1e28e0a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 29 Apr 2024 13:30:09 +0000 Subject: inet: introduce dst_rtable() helper I added dst_rt6_info() in commit e8dfd42c17fa ("ipv6: introduce dst_rt6_info() helper") This patch does a similar change for IPv4. Instead of (struct rtable *)dst casts, we can use : #define dst_rtable(_ptr) \ container_of_const(_ptr, struct rtable, dst) Patch is smaller than IPv6 one, because IPv4 has skb_rtable() helper. Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Reviewed-by: Sabrina Dubroca Link: https://lore.kernel.org/r/20240429133009.1227754-1-edumazet@google.com Signed-off-by: Jakub Kicinski --- drivers/infiniband/core/addr.c | 12 +++--------- drivers/net/vrf.c | 2 +- drivers/s390/net/qeth_core.h | 5 ++--- include/linux/skbuff.h | 9 --------- include/net/ip.h | 4 ++-- include/net/route.h | 11 +++++++++++ net/atm/clip.c | 2 +- net/core/dst_cache.c | 2 +- net/core/filter.c | 3 +-- net/ipv4/af_inet.c | 2 +- net/ipv4/icmp.c | 26 ++++++++++++++------------ net/ipv4/ip_input.c | 2 +- net/ipv4/ip_output.c | 8 ++++---- net/ipv4/route.c | 24 +++++++++++------------- net/ipv4/udp.c | 2 +- net/ipv4/xfrm4_policy.c | 2 +- net/l2tp/l2tp_ip.c | 2 +- net/mpls/mpls_iptunnel.c | 2 +- net/netfilter/ipvs/ip_vs_xmit.c | 2 +- net/netfilter/nf_flow_table_ip.c | 4 ++-- net/netfilter/nft_rt.c | 2 +- net/sctp/protocol.c | 4 ++-- net/tipc/udp_media.c | 2 +- 23 files changed, 64 insertions(+), 70 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index f20dfe70fa0e..be0743dac3ff 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -348,16 +348,10 @@ static int dst_fetch_ha(const struct dst_entry *dst, static bool has_gateway(const struct dst_entry *dst, sa_family_t family) { - const struct rtable *rt; - const struct rt6_info *rt6; + if (family == AF_INET) + return dst_rtable(dst)->rt_uses_gateway; - if (family == AF_INET) { - rt = container_of(dst, struct rtable, dst); - return rt->rt_uses_gateway; - } - - rt6 = dst_rt6_info(dst); - return rt6->rt6i_flags & RTF_GATEWAY; + return dst_rt6_info(dst)->rt6i_flags & RTF_GATEWAY; } static int fetch_ha(const struct dst_entry *dst, struct rdma_dev_addr *dev_addr, diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 784b9b2d275e..3a252ac5dd28 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -860,7 +860,7 @@ static int vrf_rt6_create(struct net_device *dev) static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - struct rtable *rt = (struct rtable *)dst; + struct rtable *rt = dst_rtable(dst); struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 5f17a2a5d0e3..41fe8a043d61 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -970,9 +970,8 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb, static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb, struct dst_entry *dst) { - struct rtable *rt = (struct rtable *) dst; - - return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr; + return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) : + ip_hdr(skb)->daddr; } static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index f76825e5b92a..adf75d69770c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1180,15 +1180,6 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb) return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); } -/** - * skb_rtable - Returns the skb &rtable - * @skb: buffer - */ -static inline struct rtable *skb_rtable(const struct sk_buff *skb) -{ - return (struct rtable *)skb_dst(skb); -} - /* For mangling skb->pkt_type from user space side from applications * such as nft, tc, etc, we only allow a conservative subset of * possible pkt_types to be set. diff --git a/include/net/ip.h b/include/net/ip.h index 25cb688bdc62..6d735e00d3f3 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -423,7 +423,7 @@ int ip_decrease_ttl(struct iphdr *iph) static inline int ip_mtu_locked(const struct dst_entry *dst) { - const struct rtable *rt = (const struct rtable *)dst; + const struct rtable *rt = dst_rtable(dst); return rt->rt_mtu_locked || dst_metric_locked(dst, RTAX_MTU); } @@ -461,7 +461,7 @@ static inline bool ip_sk_ignore_df(const struct sock *sk) static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, bool forwarding) { - const struct rtable *rt = container_of(dst, struct rtable, dst); + const struct rtable *rt = dst_rtable(dst); struct net *net = dev_net(dst->dev); unsigned int mtu; diff --git a/include/net/route.h b/include/net/route.h index 630d1ef6868a..93833cfe9c96 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -75,6 +75,17 @@ struct rtable { rt_pmtu:31; }; +#define dst_rtable(_ptr) container_of_const(_ptr, struct rtable, dst) + +/** + * skb_rtable - Returns the skb &rtable + * @skb: buffer + */ +static inline struct rtable *skb_rtable(const struct sk_buff *skb) +{ + return dst_rtable(skb_dst(skb)); +} + static inline bool rt_is_input_route(const struct rtable *rt) { return rt->rt_is_input != 0; diff --git a/net/atm/clip.c b/net/atm/clip.c index 362e8d25a79e..42b910cb4e8e 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -345,7 +345,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb, dev->stats.tx_dropped++; return NETDEV_TX_OK; } - rt = (struct rtable *) dst; + rt = dst_rtable(dst); if (rt->rt_gw_family == AF_INET) daddr = &rt->rt_gw4; else diff --git a/net/core/dst_cache.c b/net/core/dst_cache.c index b17171345d64..0c0bdb058c5b 100644 --- a/net/core/dst_cache.c +++ b/net/core/dst_cache.c @@ -83,7 +83,7 @@ struct rtable *dst_cache_get_ip4(struct dst_cache *dst_cache, __be32 *saddr) return NULL; *saddr = idst->in_saddr.s_addr; - return container_of(dst, struct rtable, dst); + return dst_rtable(dst); } EXPORT_SYMBOL_GPL(dst_cache_get_ip4); diff --git a/net/core/filter.c b/net/core/filter.c index 6d319c76188b..29165744c505 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2317,8 +2317,7 @@ static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb, rcu_read_lock(); if (!nh) { - struct dst_entry *dst = skb_dst(skb); - struct rtable *rt = container_of(dst, struct rtable, dst); + struct rtable *rt = skb_rtable(skb); neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); } else if (nh->nh_family == AF_INET6) { diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index a7cfeda28bb2..486a8d4f53b1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1307,8 +1307,8 @@ static int inet_sk_reselect_saddr(struct sock *sk) int inet_sk_rebuild_header(struct sock *sk) { + struct rtable *rt = dst_rtable(__sk_dst_check(sk, 0)); struct inet_sock *inet = inet_sk(sk); - struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0); __be32 daddr; struct ip_options_rcu *inet_opt; struct flowi4 *fl4; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 437e782b9663..207482d30dc7 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -483,6 +483,7 @@ static struct rtable *icmp_route_lookup(struct net *net, struct icmp_bxm *param) { struct net_device *route_lookup_dev; + struct dst_entry *dst, *dst2; struct rtable *rt, *rt2; struct flowi4 fl4_dec; int err; @@ -508,16 +509,17 @@ static struct rtable *icmp_route_lookup(struct net *net, /* No need to clone since we're just using its address. */ rt2 = rt; - rt = (struct rtable *) xfrm_lookup(net, &rt->dst, - flowi4_to_flowi(fl4), NULL, 0); - if (!IS_ERR(rt)) { + dst = xfrm_lookup(net, &rt->dst, + flowi4_to_flowi(fl4), NULL, 0); + rt = dst_rtable(dst); + if (!IS_ERR(dst)) { if (rt != rt2) return rt; - } else if (PTR_ERR(rt) == -EPERM) { + } else if (PTR_ERR(dst) == -EPERM) { rt = NULL; - } else + } else { return rt; - + } err = xfrm_decode_session_reverse(net, skb_in, flowi4_to_flowi(&fl4_dec), AF_INET); if (err) goto relookup_failed; @@ -551,19 +553,19 @@ static struct rtable *icmp_route_lookup(struct net *net, if (err) goto relookup_failed; - rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst, - flowi4_to_flowi(&fl4_dec), NULL, - XFRM_LOOKUP_ICMP); - if (!IS_ERR(rt2)) { + dst2 = xfrm_lookup(net, &rt2->dst, flowi4_to_flowi(&fl4_dec), NULL, + XFRM_LOOKUP_ICMP); + rt2 = dst_rtable(dst2); + if (!IS_ERR(dst2)) { dst_release(&rt->dst); memcpy(fl4, &fl4_dec, sizeof(*fl4)); rt = rt2; - } else if (PTR_ERR(rt2) == -EPERM) { + } else if (PTR_ERR(dst2) == -EPERM) { if (rt) dst_release(&rt->dst); return rt2; } else { - err = PTR_ERR(rt2); + err = PTR_ERR(dst2); goto relookup_failed; } return rt; diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 5e9c8156656a..d6fbcbd2358a 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -616,7 +616,7 @@ static void ip_list_rcv_finish(struct net *net, struct sock *sk, dst = skb_dst(skb); if (curr_dst != dst) { hint = ip_extract_route_hint(net, skb, - ((struct rtable *)dst)->rt_type); + dst_rtable(dst)->rt_type); /* dispatch old sublist */ if (!list_empty(&sublist)) diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 1fe794967211..b455bd05a7d5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -198,7 +198,7 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); - struct rtable *rt = (struct rtable *)dst; + struct rtable *rt = dst_rtable(dst); struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; @@ -475,7 +475,7 @@ int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, goto packet_routed; /* Make sure we can route this packet. */ - rt = (struct rtable *)__sk_dst_check(sk, 0); + rt = dst_rtable(__sk_dst_check(sk, 0)); if (!rt) { __be32 daddr; @@ -971,7 +971,7 @@ static int __ip_append_data(struct sock *sk, bool zc = false; unsigned int maxfraglen, fragheaderlen, maxnonfragsize; int csummode = CHECKSUM_NONE; - struct rtable *rt = (struct rtable *)cork->dst; + struct rtable *rt = dst_rtable(cork->dst); bool paged, hold_tskey, extra_uref = false; unsigned int wmem_alloc_delta = 0; u32 tskey = 0; @@ -1390,7 +1390,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ip_options *opt = NULL; - struct rtable *rt = (struct rtable *)cork->dst; + struct rtable *rt = dst_rtable(cork->dst); struct iphdr *iph; u8 pmtudisc, ttl; __be16 df = 0; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index f89ff2e5a05b..0fd9a3d7ac4a 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -819,7 +819,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf u32 mark = skb->mark; __u8 tos = iph->tos; - rt = (struct rtable *) dst; + rt = dst_rtable(dst); __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0); __ip_do_redirect(rt, skb, &fl4, true); @@ -827,7 +827,7 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) { - struct rtable *rt = (struct rtable *)dst; + struct rtable *rt = dst_rtable(dst); struct dst_entry *ret = dst; if (rt) { @@ -1044,7 +1044,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu, bool confirm_neigh) { - struct rtable *rt = (struct rtable *) dst; + struct rtable *rt = dst_rtable(dst); struct flowi4 fl4; ip_rt_build_flow_key(&fl4, sk, skb); @@ -1115,7 +1115,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0); - rt = (struct rtable *)odst; + rt = dst_rtable(odst); if (odst->obsolete && !odst->ops->check(odst, 0)) { rt = ip_route_output_flow(sock_net(sk), &fl4, sk); if (IS_ERR(rt)) @@ -1124,7 +1124,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) new = true; } - __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu); + __ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu); if (!dst_check(&rt->dst, 0)) { if (new) @@ -1181,7 +1181,7 @@ EXPORT_SYMBOL_GPL(ipv4_sk_redirect); INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) { - struct rtable *rt = (struct rtable *) dst; + struct rtable *rt = dst_rtable(dst); /* All IPV4 dsts are created with ->obsolete set to the value * DST_OBSOLETE_FORCE_CHK which forces validation calls down @@ -1516,10 +1516,8 @@ void rt_del_uncached_list(struct rtable *rt) static void ipv4_dst_destroy(struct dst_entry *dst) { - struct rtable *rt = (struct rtable *)dst; - ip_dst_metrics_put(dst); - rt_del_uncached_list(rt); + rt_del_uncached_list(dst_rtable(dst)); } void rt_flush_dev(struct net_device *dev) @@ -2820,7 +2818,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig) { - struct rtable *ort = (struct rtable *) dst_orig; + struct rtable *ort = dst_rtable(dst_orig); struct rtable *rt; rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0); @@ -2865,9 +2863,9 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4, if (flp4->flowi4_proto) { flp4->flowi4_oif = rt->dst.dev->ifindex; - rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst, - flowi4_to_flowi(flp4), - sk, 0); + rt = dst_rtable(xfrm_lookup_route(net, &rt->dst, + flowi4_to_flowi(flp4), + sk, 0)); } return rt; diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 6e2446295089..fe55ff5d379b 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1217,7 +1217,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) } if (connected) - rt = (struct rtable *)sk_dst_check(sk, 0); + rt = dst_rtable(sk_dst_check(sk, 0)); if (!rt) { struct net *net = sock_net(sk); diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 1dda59e0aeab..fccbbd3e1a4b 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c @@ -69,7 +69,7 @@ static int xfrm4_get_saddr(struct net *net, int oif, static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, const struct flowi *fl) { - struct rtable *rt = (struct rtable *)xdst->route; + struct rtable *rt = dst_rtable(xdst->route); const struct flowi4 *fl4 = &fl->u.ip4; xdst->u.rt.rt_iif = fl4->flowi4_iif; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 970af3983d11..19c8cc5289d5 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -459,7 +459,7 @@ static int l2tp_ip_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) fl4 = &inet->cork.fl.u.ip4; if (connected) - rt = (struct rtable *)__sk_dst_check(sk, 0); + rt = dst_rtable(__sk_dst_check(sk, 0)); rcu_read_lock(); if (!rt) { diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c index 606349c8df0e..4385fd3b13be 100644 --- a/net/mpls/mpls_iptunnel.c +++ b/net/mpls/mpls_iptunnel.c @@ -81,7 +81,7 @@ static int mpls_xmit(struct sk_buff *skb) ttl = net->mpls.default_ttl; else ttl = ip_hdr(skb)->ttl; - rt = (struct rtable *)dst; + rt = dst_rtable(dst); } else if (dst->ops->family == AF_INET6) { if (tun_encap_info->ttl_propagate == MPLS_TTL_PROP_DISABLED) ttl = tun_encap_info->default_ttl; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 6e8b9d100ad2..3313bceb6cc9 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -318,7 +318,7 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb, if (dest) { dest_dst = __ip_vs_dst_check(dest); if (likely(dest_dst)) - rt = (struct rtable *) dest_dst->dst_cache; + rt = dst_rtable(dest_dst->dst_cache); else { dest_dst = ip_vs_dest_dst_alloc(); spin_lock_bh(&dest->dst_lock); diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c index 100887beed31..c2c005234dcd 100644 --- a/net/netfilter/nf_flow_table_ip.c +++ b/net/netfilter/nf_flow_table_ip.c @@ -434,7 +434,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, return NF_ACCEPT; if (unlikely(tuplehash->tuple.xmit_type == FLOW_OFFLOAD_XMIT_XFRM)) { - rt = (struct rtable *)tuplehash->tuple.dst_cache; + rt = dst_rtable(tuplehash->tuple.dst_cache); memset(skb->cb, 0, sizeof(struct inet_skb_parm)); IPCB(skb)->iif = skb->dev->ifindex; IPCB(skb)->flags = IPSKB_FORWARDED; @@ -446,7 +446,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, switch (tuplehash->tuple.xmit_type) { case FLOW_OFFLOAD_XMIT_NEIGH: - rt = (struct rtable *)tuplehash->tuple.dst_cache; + rt = dst_rtable(tuplehash->tuple.dst_cache); outdev = rt->dst.dev; skb->dev = outdev; nexthop = rt_nexthop(rt, flow->tuplehash[!dir].tuple.src_v4.s_addr); diff --git a/net/netfilter/nft_rt.c b/net/netfilter/nft_rt.c index 2434c624aafd..14d88394bcb7 100644 --- a/net/netfilter/nft_rt.c +++ b/net/netfilter/nft_rt.c @@ -73,7 +73,7 @@ void nft_rt_get_eval(const struct nft_expr *expr, if (nft_pf(pkt) != NFPROTO_IPV4) goto err; - *dest = (__force u32)rt_nexthop((const struct rtable *)dst, + *dest = (__force u32)rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr); break; case NFT_RT_NEXTHOP6: diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index e849f368ed91..5a7436a13b74 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -552,7 +552,7 @@ static void sctp_v4_get_saddr(struct sctp_sock *sk, struct flowi *fl) { union sctp_addr *saddr = &t->saddr; - struct rtable *rt = (struct rtable *)t->dst; + struct rtable *rt = dst_rtable(t->dst); if (rt) { saddr->v4.sin_family = AF_INET; @@ -1085,7 +1085,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb, struct sctp_transport *t) skb_reset_inner_mac_header(skb); skb_reset_inner_transport_header(skb); skb_set_inner_ipproto(skb, IPPROTO_SCTP); - udp_tunnel_xmit_skb((struct rtable *)dst, sk, skb, fl4->saddr, + udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr, fl4->daddr, dscp, ip4_dst_hoplimit(dst), df, sctp_sk(sk)->udp_port, t->encap_port, false, false); return 0; diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c index f892b0903dba..b849a3d133a0 100644 --- a/net/tipc/udp_media.c +++ b/net/tipc/udp_media.c @@ -174,7 +174,7 @@ static int tipc_udp_xmit(struct net *net, struct sk_buff *skb, local_bh_disable(); ndst = dst_cache_get(cache); if (dst->proto == htons(ETH_P_IP)) { - struct rtable *rt = (struct rtable *)ndst; + struct rtable *rt = dst_rtable(ndst); if (!rt) { struct flowi4 fl = { -- cgit From a86a0661b86f310c0b73a30c829648864f0b2619 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 29 Apr 2024 13:40:21 +0000 Subject: net: move sysctl_max_skb_frags to net_hotdata sysctl_max_skb_frags is used in TCP and MPTCP fast paths, move it to net_hodata for better cache locality. Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20240429134025.1233626-2-edumazet@google.com Signed-off-by: Jakub Kicinski --- include/linux/skbuff.h | 2 -- include/net/hotdata.h | 1 + net/core/hotdata.c | 1 + net/core/skbuff.c | 5 +---- net/core/sysctl_net_core.c | 2 +- net/ipv4/tcp.c | 3 ++- net/mptcp/protocol.c | 3 ++- 7 files changed, 8 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index adf75d69770c..36b133f04d30 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -353,8 +353,6 @@ struct sk_buff; #define MAX_SKB_FRAGS CONFIG_MAX_SKB_FRAGS -extern int sysctl_max_skb_frags; - /* Set skb_shinfo(skb)->gso_size to this in case you want skb_segment to * segment using its current segmentation instead. */ diff --git a/include/net/hotdata.h b/include/net/hotdata.h index 003667a1efd6..a6cff6590426 100644 --- a/include/net/hotdata.h +++ b/include/net/hotdata.h @@ -38,6 +38,7 @@ struct net_hotdata { int max_backlog; int dev_tx_weight; int dev_rx_weight; + int sysctl_max_skb_frags; }; #define inet_ehash_secret net_hotdata.tcp_protocol.secret diff --git a/net/core/hotdata.c b/net/core/hotdata.c index c8a7a451c18a..f17cbb4807b9 100644 --- a/net/core/hotdata.c +++ b/net/core/hotdata.c @@ -18,5 +18,6 @@ struct net_hotdata net_hotdata __cacheline_aligned = { .max_backlog = 1000, .dev_tx_weight = 64, .dev_rx_weight = 64, + .sysctl_max_skb_frags = MAX_SKB_FRAGS, }; EXPORT_SYMBOL(net_hotdata); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0c8b82750000..65779b8f0b12 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -109,9 +109,6 @@ static struct kmem_cache *skbuff_ext_cache __ro_after_init; #define SKB_SMALL_HEAD_HEADROOM \ SKB_WITH_OVERHEAD(SKB_SMALL_HEAD_CACHE_SIZE) -int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS; -EXPORT_SYMBOL(sysctl_max_skb_frags); - /* kcm_write_msgs() relies on casting paged frags to bio_vec to use * iov_iter_bvec(). These static asserts ensure the cast is valid is long as the * netmem is a page. @@ -7040,7 +7037,7 @@ static void skb_splice_csum_page(struct sk_buff *skb, struct page *page, ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter, ssize_t maxsize, gfp_t gfp) { - size_t frag_limit = READ_ONCE(sysctl_max_skb_frags); + size_t frag_limit = READ_ONCE(net_hotdata.sysctl_max_skb_frags); struct page *pages[8], **ppages = pages; ssize_t spliced = 0, ret = 0; unsigned int i; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 903ab4a51c17..e75375d54b9e 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -595,7 +595,7 @@ static struct ctl_table net_core_table[] = { }, { .procname = "max_skb_frags", - .data = &sysctl_max_skb_frags, + .data = &net_hotdata.sysctl_max_skb_frags, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 4ec0f4feee00..388f6e115bf1 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -280,6 +280,7 @@ #include #include #include +#include #include /* Track pending CMSGs. */ @@ -1188,7 +1189,7 @@ new_segment: if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { - if (i >= READ_ONCE(sysctl_max_skb_frags)) { + if (i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { tcp_mark_push(tp, skb); goto new_segment; } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 4b13ca362efa..aff17597e6a7 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -20,6 +20,7 @@ #include #endif #include +#include #include #include #include "protocol.h" @@ -1272,7 +1273,7 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, dfrag->page, offset); - if (!can_coalesce && i >= READ_ONCE(sysctl_max_skb_frags)) { + if (!can_coalesce && i >= READ_ONCE(net_hotdata.sysctl_max_skb_frags)) { tcp_mark_push(tcp_sk(ssk), skb); goto alloc_skb; } -- cgit From 73142cab3af1b99157837297f437b306d7a70bff Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Wed, 7 Feb 2024 16:35:47 +0100 Subject: fprobe: Add entry/exit callbacks types We are going to store callbacks in following change, so this will ease up the code. Link: https://lore.kernel.org/all/20240207153550.856536-2-jolsa@kernel.org/ Signed-off-by: Jiri Olsa Acked-by: Masami Hiramatsu (Google) Signed-off-by: Masami Hiramatsu (Google) --- include/linux/fprobe.h | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h index 3e03758151f4..f39869588117 100644 --- a/include/linux/fprobe.h +++ b/include/linux/fprobe.h @@ -7,6 +7,16 @@ #include #include +struct fprobe; + +typedef int (*fprobe_entry_cb)(struct fprobe *fp, unsigned long entry_ip, + unsigned long ret_ip, struct pt_regs *regs, + void *entry_data); + +typedef void (*fprobe_exit_cb)(struct fprobe *fp, unsigned long entry_ip, + unsigned long ret_ip, struct pt_regs *regs, + void *entry_data); + /** * struct fprobe - ftrace based probe. * @ops: The ftrace_ops. @@ -34,12 +44,8 @@ struct fprobe { size_t entry_data_size; int nr_maxactive; - int (*entry_handler)(struct fprobe *fp, unsigned long entry_ip, - unsigned long ret_ip, struct pt_regs *regs, - void *entry_data); - void (*exit_handler)(struct fprobe *fp, unsigned long entry_ip, - unsigned long ret_ip, struct pt_regs *regs, - void *entry_data); + fprobe_entry_cb entry_handler; + fprobe_exit_cb exit_handler; }; /* This fprobe is soft-disabled. */ -- cgit From b0e28a4b5becea84ae6fca5cbd8a6b80a134e223 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Thu, 18 Apr 2024 12:09:08 -0700 Subject: ftrace: make extra rcu_is_watching() validation check optional Introduce CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING config option to control whether ftrace low-level code performs additional rcu_is_watching()-based validation logic in an attempt to catch noinstr violations. This check is expected to never be true and is mostly useful for low-level validation of ftrace subsystem invariants. For most users it should probably be kept disabled to eliminate unnecessary runtime overhead. This improves BPF multi-kretprobe (relying on ftrace and rethook infrastructure) runtime throughput by 2%, according to BPF benchmarks ([0]). [0] https://lore.kernel.org/bpf/CAEf4BzauQ2WKMjZdc9s0rBWa01BYbgwHN6aNDXQSHYia47pQ-w@mail.gmail.com/ Link: https://lore.kernel.org/all/20240418190909.704286-1-andrii@kernel.org/ Cc: Steven Rostedt Cc: Masami Hiramatsu Cc: Paul E. McKenney Acked-by: Masami Hiramatsu (Google) Signed-off-by: Andrii Nakryiko Signed-off-by: Masami Hiramatsu (Google) --- include/linux/trace_recursion.h | 2 +- kernel/trace/Kconfig | 13 +++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/trace_recursion.h b/include/linux/trace_recursion.h index d48cd92d2364..24ea8ac049b4 100644 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@ -135,7 +135,7 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip); # define do_ftrace_record_recursion(ip, pip) do { } while (0) #endif -#ifdef CONFIG_ARCH_WANTS_NO_INSTR +#ifdef CONFIG_FTRACE_VALIDATE_RCU_IS_WATCHING # define trace_warn_on_no_rcu(ip) \ ({ \ bool __ret = !rcu_is_watching(); \ diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 47345bf1d4a9..d093e16d01c1 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -974,6 +974,19 @@ config FTRACE_RECORD_RECURSION_SIZE This file can be reset, but the limit can not change in size at runtime. +config FTRACE_VALIDATE_RCU_IS_WATCHING + bool "Validate RCU is on during ftrace execution" + depends on FUNCTION_TRACER + depends on ARCH_WANTS_NO_INSTR + help + All callbacks that attach to the function tracing have some sort of + protection against recursion. This option is only to verify that + ftrace (and other users of ftrace_test_recursion_trylock()) are not + called outside of RCU, as if they are, it can cause a race. But it + also has a noticeable overhead when enabled. + + If unsure, say N + config RING_BUFFER_RECORD_RECURSION bool "Record functions that recurse in the ring buffer" depends on FTRACE_RECORD_RECURSION -- cgit From a3b00f10da808bd4a354f890b551cba471082d0e Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 24 Apr 2024 14:52:13 -0700 Subject: objpool: enable inlining objpool_push() and objpool_pop() operations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit objpool_push() and objpool_pop() are very performance-critical functions and can be called very frequently in kretprobe triggering path. As such, it makes sense to allow compiler to inline them completely to eliminate function calls overhead. Luckily, their logic is quite well isolated and doesn't have any sprawling dependencies. This patch moves both objpool_push() and objpool_pop() into include/linux/objpool.h and marks them as static inline functions, enabling inlining. To avoid anyone using internal helpers (objpool_try_get_slot, objpool_try_add_slot), rename them to use leading underscores. We used kretprobe microbenchmark from BPF selftests (bench trig-kprobe and trig-kprobe-multi benchmarks) running no-op BPF kretprobe/kretprobe.multi programs in a tight loop to evaluate the effect. BPF own overhead in this case is minimal and it mostly stresses the rest of in-kernel kretprobe infrastructure overhead. Results are in millions of calls per second. This is not super scientific, but shows the trend nevertheless. BEFORE ====== kretprobe : 9.794 ± 0.086M/s kretprobe-multi: 10.219 ± 0.032M/s AFTER ===== kretprobe : 9.937 ± 0.174M/s (+1.5%) kretprobe-multi: 10.440 ± 0.108M/s (+2.2%) Link: https://lore.kernel.org/all/20240424215214.3956041-2-andrii@kernel.org/ Cc: Matt (Qiang) Wu Signed-off-by: Andrii Nakryiko Signed-off-by: Masami Hiramatsu (Google) --- include/linux/objpool.h | 101 +++++++++++++++++++++++++++++++++++++++++++++++- lib/objpool.c | 100 ----------------------------------------------- 2 files changed, 99 insertions(+), 102 deletions(-) (limited to 'include/linux') diff --git a/include/linux/objpool.h b/include/linux/objpool.h index 15aff4a17f0c..d8b1f7b91128 100644 --- a/include/linux/objpool.h +++ b/include/linux/objpool.h @@ -5,6 +5,10 @@ #include #include +#include +#include +#include +#include /* * objpool: ring-array based lockless MPMC queue @@ -118,13 +122,94 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, gfp_t gfp, void *context, objpool_init_obj_cb objinit, objpool_fini_cb release); +/* try to retrieve object from slot */ +static inline void *__objpool_try_get_slot(struct objpool_head *pool, int cpu) +{ + struct objpool_slot *slot = pool->cpu_slots[cpu]; + /* load head snapshot, other cpus may change it */ + uint32_t head = smp_load_acquire(&slot->head); + + while (head != READ_ONCE(slot->last)) { + void *obj; + + /* + * data visibility of 'last' and 'head' could be out of + * order since memory updating of 'last' and 'head' are + * performed in push() and pop() independently + * + * before any retrieving attempts, pop() must guarantee + * 'last' is behind 'head', that is to say, there must + * be available objects in slot, which could be ensured + * by condition 'last != head && last - head <= nr_objs' + * that is equivalent to 'last - head - 1 < nr_objs' as + * 'last' and 'head' are both unsigned int32 + */ + if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { + head = READ_ONCE(slot->head); + continue; + } + + /* obj must be retrieved before moving forward head */ + obj = READ_ONCE(slot->entries[head & slot->mask]); + + /* move head forward to mark it's consumption */ + if (try_cmpxchg_release(&slot->head, &head, head + 1)) + return obj; + } + + return NULL; +} + /** * objpool_pop() - allocate an object from objpool * @pool: object pool * * return value: object ptr or NULL if failed */ -void *objpool_pop(struct objpool_head *pool); +static inline void *objpool_pop(struct objpool_head *pool) +{ + void *obj = NULL; + unsigned long flags; + int i, cpu; + + /* disable local irq to avoid preemption & interruption */ + raw_local_irq_save(flags); + + cpu = raw_smp_processor_id(); + for (i = 0; i < num_possible_cpus(); i++) { + obj = __objpool_try_get_slot(pool, cpu); + if (obj) + break; + cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1); + } + raw_local_irq_restore(flags); + + return obj; +} + +/* adding object to slot, abort if the slot was already full */ +static inline int +__objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu) +{ + struct objpool_slot *slot = pool->cpu_slots[cpu]; + uint32_t head, tail; + + /* loading tail and head as a local snapshot, tail first */ + tail = READ_ONCE(slot->tail); + + do { + head = READ_ONCE(slot->head); + /* fault caught: something must be wrong */ + WARN_ON_ONCE(tail - head > pool->nr_objs); + } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1)); + + /* now the tail position is reserved for the given obj */ + WRITE_ONCE(slot->entries[tail & slot->mask], obj); + /* update sequence to make this obj available for pop() */ + smp_store_release(&slot->last, tail + 1); + + return 0; +} /** * objpool_push() - reclaim the object and return back to objpool @@ -134,7 +219,19 @@ void *objpool_pop(struct objpool_head *pool); * return: 0 or error code (it fails only when user tries to push * the same object multiple times or wrong "objects" into objpool) */ -int objpool_push(void *obj, struct objpool_head *pool); +static inline int objpool_push(void *obj, struct objpool_head *pool) +{ + unsigned long flags; + int rc; + + /* disable local irq to avoid preemption & interruption */ + raw_local_irq_save(flags); + rc = __objpool_try_add_slot(obj, pool, raw_smp_processor_id()); + raw_local_irq_restore(flags); + + return rc; +} + /** * objpool_drop() - discard the object and deref objpool diff --git a/lib/objpool.c b/lib/objpool.c index cfdc02420884..f696308fc026 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -152,106 +152,6 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, } EXPORT_SYMBOL_GPL(objpool_init); -/* adding object to slot, abort if the slot was already full */ -static inline int -objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu) -{ - struct objpool_slot *slot = pool->cpu_slots[cpu]; - uint32_t head, tail; - - /* loading tail and head as a local snapshot, tail first */ - tail = READ_ONCE(slot->tail); - - do { - head = READ_ONCE(slot->head); - /* fault caught: something must be wrong */ - WARN_ON_ONCE(tail - head > pool->nr_objs); - } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1)); - - /* now the tail position is reserved for the given obj */ - WRITE_ONCE(slot->entries[tail & slot->mask], obj); - /* update sequence to make this obj available for pop() */ - smp_store_release(&slot->last, tail + 1); - - return 0; -} - -/* reclaim an object to object pool */ -int objpool_push(void *obj, struct objpool_head *pool) -{ - unsigned long flags; - int rc; - - /* disable local irq to avoid preemption & interruption */ - raw_local_irq_save(flags); - rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id()); - raw_local_irq_restore(flags); - - return rc; -} -EXPORT_SYMBOL_GPL(objpool_push); - -/* try to retrieve object from slot */ -static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu) -{ - struct objpool_slot *slot = pool->cpu_slots[cpu]; - /* load head snapshot, other cpus may change it */ - uint32_t head = smp_load_acquire(&slot->head); - - while (head != READ_ONCE(slot->last)) { - void *obj; - - /* - * data visibility of 'last' and 'head' could be out of - * order since memory updating of 'last' and 'head' are - * performed in push() and pop() independently - * - * before any retrieving attempts, pop() must guarantee - * 'last' is behind 'head', that is to say, there must - * be available objects in slot, which could be ensured - * by condition 'last != head && last - head <= nr_objs' - * that is equivalent to 'last - head - 1 < nr_objs' as - * 'last' and 'head' are both unsigned int32 - */ - if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) { - head = READ_ONCE(slot->head); - continue; - } - - /* obj must be retrieved before moving forward head */ - obj = READ_ONCE(slot->entries[head & slot->mask]); - - /* move head forward to mark it's consumption */ - if (try_cmpxchg_release(&slot->head, &head, head + 1)) - return obj; - } - - return NULL; -} - -/* allocate an object from object pool */ -void *objpool_pop(struct objpool_head *pool) -{ - void *obj = NULL; - unsigned long flags; - int i, cpu; - - /* disable local irq to avoid preemption & interruption */ - raw_local_irq_save(flags); - - cpu = raw_smp_processor_id(); - for (i = 0; i < num_possible_cpus(); i++) { - obj = objpool_try_get_slot(pool, cpu); - if (obj) - break; - cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1); - } - raw_local_irq_restore(flags); - - return obj; -} -EXPORT_SYMBOL_GPL(objpool_pop); - /* release whole objpool forcely */ void objpool_free(struct objpool_head *pool) { -- cgit From 78d0b16127daa26d016c215a089ae330878291f7 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Wed, 24 Apr 2024 14:52:14 -0700 Subject: objpool: cache nr_possible_cpus() and avoid caching nr_cpu_ids MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Profiling shows that calling nr_possible_cpus() in objpool_pop() takes a noticeable amount of CPU (when profiled on 80-core machine), as we need to recalculate number of set bits in a CPU bit mask. This number can't change, so there is no point in paying the price for recalculating it. As such, cache this value in struct objpool_head and use it in objpool_pop(). On the other hand, cached pool->nr_cpus isn't necessary, as it's not used in hot path and is also a pretty trivial value to retrieve. So drop pool->nr_cpus in favor of using nr_cpu_ids everywhere. This way the size of struct objpool_head remains the same, which is a nice bonus. Same BPF selftests benchmarks were used to evaluate the effect. Using changes in previous patch (inlining of objpool_pop/objpool_push) as baseline, here are the differences: BASELINE ======== kretprobe : 9.937 ± 0.174M/s kretprobe-multi: 10.440 ± 0.108M/s AFTER ===== kretprobe : 10.106 ± 0.120M/s (+1.7%) kretprobe-multi: 10.515 ± 0.180M/s (+0.7%) Link: https://lore.kernel.org/all/20240424215214.3956041-3-andrii@kernel.org/ Cc: Matt (Qiang) Wu Signed-off-by: Andrii Nakryiko Signed-off-by: Masami Hiramatsu (Google) --- include/linux/objpool.h | 6 +++--- lib/objpool.c | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/include/linux/objpool.h b/include/linux/objpool.h index d8b1f7b91128..cb1758eaa2d3 100644 --- a/include/linux/objpool.h +++ b/include/linux/objpool.h @@ -73,7 +73,7 @@ typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context); * struct objpool_head - object pooling metadata * @obj_size: object size, aligned to sizeof(void *) * @nr_objs: total objs (to be pre-allocated with objpool) - * @nr_cpus: local copy of nr_cpu_ids + * @nr_possible_cpus: cached value of num_possible_cpus() * @capacity: max objs can be managed by one objpool_slot * @gfp: gfp flags for kmalloc & vmalloc * @ref: refcount of objpool @@ -85,7 +85,7 @@ typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context); struct objpool_head { int obj_size; int nr_objs; - int nr_cpus; + int nr_possible_cpus; int capacity; gfp_t gfp; refcount_t ref; @@ -176,7 +176,7 @@ static inline void *objpool_pop(struct objpool_head *pool) raw_local_irq_save(flags); cpu = raw_smp_processor_id(); - for (i = 0; i < num_possible_cpus(); i++) { + for (i = 0; i < pool->nr_possible_cpus; i++) { obj = __objpool_try_get_slot(pool, cpu); if (obj) break; diff --git a/lib/objpool.c b/lib/objpool.c index f696308fc026..234f9d0bd081 100644 --- a/lib/objpool.c +++ b/lib/objpool.c @@ -50,7 +50,7 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, { int i, cpu_count = 0; - for (i = 0; i < pool->nr_cpus; i++) { + for (i = 0; i < nr_cpu_ids; i++) { struct objpool_slot *slot; int nodes, size, rc; @@ -60,8 +60,8 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs, continue; /* compute how many objects to be allocated with this slot */ - nodes = nr_objs / num_possible_cpus(); - if (cpu_count < (nr_objs % num_possible_cpus())) + nodes = nr_objs / pool->nr_possible_cpus; + if (cpu_count < (nr_objs % pool->nr_possible_cpus)) nodes++; cpu_count++; @@ -103,7 +103,7 @@ static void objpool_fini_percpu_slots(struct objpool_head *pool) if (!pool->cpu_slots) return; - for (i = 0; i < pool->nr_cpus; i++) + for (i = 0; i < nr_cpu_ids; i++) kvfree(pool->cpu_slots[i]); kfree(pool->cpu_slots); } @@ -130,13 +130,13 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size, /* initialize objpool pool */ memset(pool, 0, sizeof(struct objpool_head)); - pool->nr_cpus = nr_cpu_ids; + pool->nr_possible_cpus = num_possible_cpus(); pool->obj_size = object_size; pool->capacity = capacity; pool->gfp = gfp & ~__GFP_ZERO; pool->context = context; pool->release = release; - slot_size = pool->nr_cpus * sizeof(struct objpool_slot); + slot_size = nr_cpu_ids * sizeof(struct objpool_slot); pool->cpu_slots = kzalloc(slot_size, pool->gfp); if (!pool->cpu_slots) return -ENOMEM; -- cgit From cd7eb8f83fcf258f71e293f7fc52a70be8ed0128 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sun, 28 Apr 2024 17:26:44 +0300 Subject: mm/slab: make __free(kfree) accept error pointers Currently, if an automatically freed allocation is an error pointer that will lead to a crash. An example of this is in wm831x_gpio_dbg_show(). 171 char *label __free(kfree) = gpiochip_dup_line_label(chip, i); 172 if (IS_ERR(label)) { 173 dev_err(wm831x->dev, "Failed to duplicate label\n"); 174 continue; 175 } The auto clean up function should check for error pointers as well, otherwise we're going to keep hitting issues like this. Fixes: 54da6a092431 ("locking: Introduce __cleanup() based infrastructure") Cc: Signed-off-by: Dan Carpenter Acked-by: David Rientjes Signed-off-by: Vlastimil Babka --- include/linux/slab.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/slab.h b/include/linux/slab.h index e53cbfa18325..739b21262507 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -266,7 +266,7 @@ void kfree(const void *objp); void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); -DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) +DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) /** * ksize - Report actual allocation size of associated object @@ -792,7 +792,7 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags) __realloc_size(3); extern void kvfree(const void *addr); -DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T)) +DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T)) extern void kvfree_sensitive(const void *addr, size_t len); -- cgit From 5e4a264bf8b55d5a1b4c8cd96ac10b99f98d7487 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 26 Apr 2024 20:34:00 -0700 Subject: acpi/ghes: Process CXL Component Events BIOS can configure memory devices as firmware first. This will send CXL events to the firmware instead of the OS. The firmware can then inform the OS of these events via UEFI. UEFI v2.10 section N.2.14 defines a Common Platform Error Record (CPER) format for CXL Component Events. The format is mostly the same as the CXL Common Event Record Format. The difference lies in the use of a GUID as the CPER Section Type which matches the UUID defined in CXL 3.1 Table 8-43. Currently a configuration such as this will trace a non standard event in the log omitting useful details of the event. In addition the CXL sub-system contains additional region and HPA information useful to the user.[0] The CXL code is required to be called from process context as it needs to take a device lock. The GHES code may be in interrupt context. This complicated the use of a callback. Dan Williams suggested the use of work items as an atomic way of switching between the callback execution and a default handler.[1] The use of a kfifo simplifies queue processing by providing lock free fifo operations. cxl_cper_kfifo_get() allows easier management of the kfifo between the ghes and cxl modules. CXL 3.1 Table 8-127 requires a device to have a queue depth of 1 for each of the four event logs. A combined queue depth of 32 is chosen to provide room for 8 entries of each log type. Add GHES support to detect CXL CPER records. Add the ability for the CXL sub-system to register a work queue to process the events. This patch adds back the functionality which was removed to fix the report by Dan Carpenter[2]. Cc: Ard Biesheuvel Cc: Rafael J. Wysocki Cc: Tony Luck Cc: Borislav Petkov Suggested-by: Dan Carpenter Suggested-by: Dan Williams Link: http://lore.kernel.org/r/cover.1711598777.git.alison.schofield@intel.com [0] Link: http://lore.kernel.org/r/65d111eb87115_6c745294ac@dwillia2-xfh.jf.intel.com.notmuch [1] Link: http://lore.kernel.org/r/b963c490-2c13-4b79-bbe7-34c6568423c7@moroto.mountain [2] Reviewed-by: Dan Williams Signed-off-by: Ira Weiny Reviewed-by: Jonathan Cameron Reviewed-by: Tony Luck Tested-by: Smita Koralahalli Link: https://lore.kernel.org/r/20240426-cxl-cper3-v4-1-58076cce1624@intel.com Signed-off-by: Dave Jiang --- drivers/acpi/apei/ghes.c | 110 ++++++++++++++++++++++++++++++++++++++++++++++ include/linux/cxl-event.h | 27 ++++++++++++ 2 files changed, 137 insertions(+) (limited to 'include/linux') diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 512067cac170..2247a1535b52 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -26,6 +26,8 @@ #include #include #include +#include +#include #include #include #include @@ -33,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -673,6 +676,101 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); } +/* CXL Event record UUIDs are formated as GUIDs and reported in section type */ + +/* + * General Media Event Record + * CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43 + */ +#define CPER_SEC_CXL_GEN_MEDIA_GUID \ + GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \ + 0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6) + +/* + * DRAM Event Record + * CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44 + */ +#define CPER_SEC_CXL_DRAM_GUID \ + GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \ + 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24) + +/* + * Memory Module Event Record + * CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45 + */ +#define CPER_SEC_CXL_MEM_MODULE_GUID \ + GUID_INIT(0xfe927475, 0xdd59, 0x4339, \ + 0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74) + +/* Room for 8 entries for each of the 4 event log queues */ +#define CXL_CPER_FIFO_DEPTH 32 +DEFINE_KFIFO(cxl_cper_fifo, struct cxl_cper_work_data, CXL_CPER_FIFO_DEPTH); + +/* Synchronize schedule_work() with cxl_cper_work changes */ +static DEFINE_SPINLOCK(cxl_cper_work_lock); +struct work_struct *cxl_cper_work; + +static void cxl_cper_post_event(enum cxl_event_type event_type, + struct cxl_cper_event_rec *rec) +{ + struct cxl_cper_work_data wd; + + if (rec->hdr.length <= sizeof(rec->hdr) || + rec->hdr.length > sizeof(*rec)) { + pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n", + rec->hdr.length); + return; + } + + if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) { + pr_err(FW_WARN "CXL CPER invalid event\n"); + return; + } + + guard(spinlock_irqsave)(&cxl_cper_work_lock); + + if (!cxl_cper_work) + return; + + wd.event_type = event_type; + memcpy(&wd.rec, rec, sizeof(wd.rec)); + + if (!kfifo_put(&cxl_cper_fifo, wd)) { + pr_err_ratelimited("CXL CPER kfifo overflow\n"); + return; + } + + schedule_work(cxl_cper_work); +} + +int cxl_cper_register_work(struct work_struct *work) +{ + if (cxl_cper_work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_work_lock); + cxl_cper_work = work; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_register_work, CXL); + +int cxl_cper_unregister_work(struct work_struct *work) +{ + if (cxl_cper_work != work) + return -EINVAL; + + guard(spinlock)(&cxl_cper_work_lock); + cxl_cper_work = NULL; + return 0; +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_work, CXL); + +int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) +{ + return kfifo_get(&cxl_cper_fifo, wd); +} +EXPORT_SYMBOL_NS_GPL(cxl_cper_kfifo_get, CXL); + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -707,6 +805,18 @@ static bool ghes_do_proc(struct ghes *ghes, } else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) { queued = ghes_handle_arm_hw_error(gdata, sev, sync); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec); + } else if (guid_equal(sec_type, &CPER_SEC_CXL_MEM_MODULE_GUID)) { + struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); + + cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); } else { void *err = acpi_hest_get_payload(gdata); diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h index 03fa6d50d46f..a0067c49e2ca 100644 --- a/include/linux/cxl-event.h +++ b/include/linux/cxl-event.h @@ -3,6 +3,8 @@ #ifndef _LINUX_CXL_EVENT_H #define _LINUX_CXL_EVENT_H +#include + /* * Common Event Record Format * CXL rev 3.0 section 8.2.9.2.1; Table 8-42 @@ -140,4 +142,29 @@ struct cxl_cper_event_rec { union cxl_event event; } __packed; +struct cxl_cper_work_data { + enum cxl_event_type event_type; + struct cxl_cper_event_rec rec; +}; + +#ifdef CONFIG_ACPI_APEI_GHES +int cxl_cper_register_work(struct work_struct *work); +int cxl_cper_unregister_work(struct work_struct *work); +int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd); +#else +static inline int cxl_cper_register_work(struct work_struct *work); +{ + return 0; +} + +static inline int cxl_cper_unregister_work(struct work_struct *work); +{ + return 0; +} +static inline int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd) +{ + return 0; +} +#endif + #endif /* _LINUX_CXL_EVENT_H */ -- cgit From 74e797d79cf131d9b1a54001cf64a0b492a83e60 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 27 Mar 2024 08:51:38 +0000 Subject: mm: Provide a means of invalidation without using launder_folio Implement a replacement for launder_folio. The key feature of invalidate_inode_pages2() is that it locks each folio individually, unmaps it to prevent mmap'd accesses interfering and calls the ->launder_folio() address_space op to flush it. This has problems: firstly, each folio is written individually as one or more small writes; secondly, adjacent folios cannot be added so easily into the laundry; thirdly, it's yet another op to implement. Instead, use the invalidate lock to cause anyone wanting to add a folio to the inode to wait, then unmap all the folios if we have mmaps, then, conditionally, use ->writepages() to flush any dirty data back and then discard all pages. The invalidate lock prevents ->read_iter(), ->write_iter() and faulting through mmap all from adding pages for the duration. This is then used from netfslib to handle the flusing in unbuffered and direct writes. Signed-off-by: David Howells cc: Matthew Wilcox cc: Miklos Szeredi cc: Trond Myklebust cc: Christoph Hellwig cc: Andrew Morton cc: Alexander Viro cc: Christian Brauner cc: Jeff Layton cc: linux-mm@kvack.org cc: linux-fsdevel@vger.kernel.org cc: netfs@lists.linux.dev cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: ceph-devel@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: linux-nfs@vger.kernel.org cc: devel@lists.orangefs.org --- fs/netfs/direct_write.c | 28 +++++++++++++++++++++---- include/linux/pagemap.h | 2 ++ mm/filemap.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c index bee047e20f5d..2b81cd4aae6e 100644 --- a/fs/netfs/direct_write.c +++ b/fs/netfs/direct_write.c @@ -132,12 +132,14 @@ out: ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; - struct inode *inode = file->f_mapping->host; + struct address_space *mapping = file->f_mapping; + struct inode *inode = mapping->host; struct netfs_inode *ictx = netfs_inode(inode); - unsigned long long end; ssize_t ret; + loff_t pos = iocb->ki_pos; + unsigned long long end = pos + iov_iter_count(from) - 1; - _enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); + _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode)); if (!iov_iter_count(from)) return 0; @@ -157,7 +159,25 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) ret = file_update_time(file); if (ret < 0) goto out; - ret = kiocb_invalidate_pages(iocb, iov_iter_count(from)); + if (iocb->ki_flags & IOCB_NOWAIT) { + /* We could block if there are any pages in the range. */ + ret = -EAGAIN; + if (filemap_range_has_page(mapping, pos, end)) + if (filemap_invalidate_inode(inode, true, pos, end)) + goto out; + } else { + ret = filemap_write_and_wait_range(mapping, pos, end); + if (ret < 0) + goto out; + } + + /* + * After a write we want buffered reads to be sure to go to disk to get + * the new data. We invalidate clean cached page from the region we're + * about to write. We do this *before* the write so that we can return + * without clobbering -EIOCBQUEUED from ->direct_IO(). + */ + ret = filemap_invalidate_inode(inode, true, pos, end); if (ret < 0) goto out; end = iocb->ki_pos + iov_iter_count(from); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 2df35e65557d..c5e33e2ca48a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping); int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); int filemap_fdatawait_range_keep_errors(struct address_space *mapping, loff_t start_byte, loff_t end_byte); +int filemap_invalidate_inode(struct inode *inode, bool flush, + loff_t start, loff_t end); static inline int filemap_fdatawait(struct address_space *mapping) { diff --git a/mm/filemap.c b/mm/filemap.c index 9a2e28bf298a..1d6b3a369077 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -4134,6 +4134,60 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) } EXPORT_SYMBOL(filemap_release_folio); +/** + * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache + * @inode: The inode to flush + * @flush: Set to write back rather than simply invalidate. + * @start: First byte to in range. + * @end: Last byte in range (inclusive), or LLONG_MAX for everything from start + * onwards. + * + * Invalidate all the folios on an inode that contribute to the specified + * range, possibly writing them back first. Whilst the operation is + * undertaken, the invalidate lock is held to prevent new folios from being + * installed. + */ +int filemap_invalidate_inode(struct inode *inode, bool flush, + loff_t start, loff_t end) +{ + struct address_space *mapping = inode->i_mapping; + pgoff_t first = start >> PAGE_SHIFT; + pgoff_t last = end >> PAGE_SHIFT; + pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1; + + if (!mapping || !mapping->nrpages || end < start) + goto out; + + /* Prevent new folios from being added to the inode. */ + filemap_invalidate_lock(mapping); + + if (!mapping->nrpages) + goto unlock; + + unmap_mapping_pages(mapping, first, nr, false); + + /* Write back the data if we're asked to. */ + if (flush) { + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = LONG_MAX, + .range_start = start, + .range_end = end, + }; + + filemap_fdatawrite_wbc(mapping, &wbc); + } + + /* Wait for writeback to complete on all folios and discard. */ + truncate_inode_pages_range(mapping, start, end); + +unlock: + filemap_invalidate_unlock(mapping); +out: + return filemap_check_errors(mapping); +} +EXPORT_SYMBOL_GPL(filemap_invalidate_inode); + #ifdef CONFIG_CACHESTAT_SYSCALL /** * filemap_cachestat() - compute the page cache statistics of a mapping -- cgit From b4ff7b178bda0ce4ec9f799c6a85579ba17f0df3 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 27 Mar 2024 17:58:39 +0000 Subject: netfs: Remove ->launder_folio() support Remove support for ->launder_folio() from netfslib and expect filesystems to use filemap_invalidate_inode() instead. netfs_launder_folio() can then be got rid of. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Eric Van Hensbergen cc: Latchesar Ionkov cc: Dominique Martinet cc: Christian Schoenebeck cc: David Howells cc: Marc Dionne cc: Steve French cc: Matthew Wilcox cc: linux-mm@kvack.org cc: linux-fsdevel@vger.kernel.org cc: netfs@lists.linux.dev cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: ceph-devel@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: devel@lists.orangefs.org --- fs/netfs/buffered_write.c | 74 -------------------------------------------- fs/netfs/main.c | 1 - include/linux/netfs.h | 2 -- include/trace/events/netfs.h | 3 -- 4 files changed, 80 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index 57c6eab01261..d8f66ce94575 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -1200,77 +1200,3 @@ out: return ret; } EXPORT_SYMBOL(netfs_writepages); - -/* - * Deal with the disposition of a laundered folio. - */ -static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq) -{ - if (wreq->error) { - pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error); - mapping_set_error(wreq->mapping, wreq->error); - } -} - -/** - * netfs_launder_folio - Clean up a dirty folio that's being invalidated - * @folio: The folio to clean - * - * This is called to write back a folio that's being invalidated when an inode - * is getting torn down. Ideally, writepages would be used instead. - */ -int netfs_launder_folio(struct folio *folio) -{ - struct netfs_io_request *wreq; - struct address_space *mapping = folio->mapping; - struct netfs_folio *finfo = netfs_folio_info(folio); - struct netfs_group *group = netfs_folio_group(folio); - struct bio_vec bvec; - unsigned long long i_size = i_size_read(mapping->host); - unsigned long long start = folio_pos(folio); - size_t offset = 0, len; - int ret = 0; - - if (finfo) { - offset = finfo->dirty_offset; - start += offset; - len = finfo->dirty_len; - } else { - len = folio_size(folio); - } - len = min_t(unsigned long long, len, i_size - start); - - wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE); - if (IS_ERR(wreq)) { - ret = PTR_ERR(wreq); - goto out; - } - - if (!folio_clear_dirty_for_io(folio)) - goto out_put; - - trace_netfs_folio(folio, netfs_folio_trace_launder); - - _debug("launder %llx-%llx", start, start + len - 1); - - /* Speculatively write to the cache. We have to fix this up later if - * the store fails. - */ - wreq->cleanup = netfs_cleanup_launder_folio; - - bvec_set_folio(&bvec, folio, len, offset); - iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len); - if (group != NETFS_FOLIO_COPY_TO_CACHE) - __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); - ret = netfs_begin_write(wreq, true, netfs_write_trace_launder); - -out_put: - folio_detach_private(folio); - netfs_put_group(group); - kfree(finfo); - netfs_put_request(wreq, false, netfs_rreq_trace_put_return); -out: - _leave(" = %d", ret); - return ret; -} -EXPORT_SYMBOL(netfs_launder_folio); diff --git a/fs/netfs/main.c b/fs/netfs/main.c index c5a73c9ed126..844efbb2e7a2 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -34,7 +34,6 @@ static const char *netfs_origins[nr__netfs_io_origin] = { [NETFS_COPY_TO_CACHE] = "CC", [NETFS_WRITEBACK] = "WB", [NETFS_WRITETHROUGH] = "WT", - [NETFS_LAUNDER_WRITE] = "LW", [NETFS_UNBUFFERED_WRITE] = "UW", [NETFS_DIO_READ] = "DR", [NETFS_DIO_WRITE] = "DW", diff --git a/include/linux/netfs.h b/include/linux/netfs.h index ddafc6ebff42..3af589dabd7f 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -172,7 +172,6 @@ enum netfs_io_origin { NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */ NETFS_WRITEBACK, /* This write was triggered by writepages */ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ - NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ NETFS_DIO_READ, /* This is a direct I/O read */ NETFS_DIO_WRITE, /* This is a direct I/O write */ @@ -352,7 +351,6 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); void netfs_clear_inode_writeback(struct inode *inode, const void *aux); void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length); bool netfs_release_folio(struct folio *folio, gfp_t gfp); -int netfs_launder_folio(struct folio *folio); /* VMA operations API. */ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index e03fafb0c1e3..30769103638f 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -26,7 +26,6 @@ #define netfs_write_traces \ EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \ EM(netfs_write_trace_dio_write, "DIO-WRITE") \ - EM(netfs_write_trace_launder, "LAUNDER ") \ EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \ EM(netfs_write_trace_writeback, "WRITEBACK") \ E_(netfs_write_trace_writethrough, "WRITETHRU") @@ -38,7 +37,6 @@ EM(NETFS_COPY_TO_CACHE, "CC") \ EM(NETFS_WRITEBACK, "WB") \ EM(NETFS_WRITETHROUGH, "WT") \ - EM(NETFS_LAUNDER_WRITE, "LW") \ EM(NETFS_UNBUFFERED_WRITE, "UW") \ EM(NETFS_DIO_READ, "DR") \ E_(NETFS_DIO_WRITE, "DW") @@ -135,7 +133,6 @@ EM(netfs_folio_trace_end_copy, "end-copy") \ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \ EM(netfs_folio_trace_kill, "kill") \ - EM(netfs_folio_trace_launder, "launder") \ EM(netfs_folio_trace_mkwrite, "mkwrite") \ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \ EM(netfs_folio_trace_read_gaps, "read-gaps") \ -- cgit From d9f85a04fb0eee0171f451fb4c4875b8a00eeaec Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 15 Mar 2024 14:37:18 +0000 Subject: netfs: Use mempools for allocating requests and subrequests Use mempools for allocating requests and subrequests in an effort to make sure that allocation always succeeds so that when performing writeback we can always make progress. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- fs/netfs/internal.h | 2 ++ fs/netfs/main.c | 51 ++++++++++++++++++++++++++++++++++++++------ fs/netfs/objects.c | 59 ++++++++++++++++++++++++++++++++++----------------- include/linux/netfs.h | 5 +++-- 4 files changed, 89 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index 156ab138e224..c67da478cd2b 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -37,6 +37,8 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync); extern unsigned int netfs_debug; extern struct list_head netfs_io_requests; extern spinlock_t netfs_proc_lock; +extern mempool_t netfs_request_pool; +extern mempool_t netfs_subrequest_pool; #ifdef CONFIG_PROC_FS static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) diff --git a/fs/netfs/main.c b/fs/netfs/main.c index 844efbb2e7a2..4805b9377364 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include "internal.h" @@ -23,6 +24,11 @@ unsigned netfs_debug; module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask"); +static struct kmem_cache *netfs_request_slab; +static struct kmem_cache *netfs_subrequest_slab; +mempool_t netfs_request_pool; +mempool_t netfs_subrequest_pool; + #ifdef CONFIG_PROC_FS LIST_HEAD(netfs_io_requests); DEFINE_SPINLOCK(netfs_proc_lock); @@ -98,25 +104,54 @@ static int __init netfs_init(void) { int ret = -ENOMEM; + netfs_request_slab = kmem_cache_create("netfs_request", + sizeof(struct netfs_io_request), 0, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, + NULL); + if (!netfs_request_slab) + goto error_req; + + if (mempool_init_slab_pool(&netfs_request_pool, 100, netfs_request_slab) < 0) + goto error_reqpool; + + netfs_subrequest_slab = kmem_cache_create("netfs_subrequest", + sizeof(struct netfs_io_subrequest), 0, + SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, + NULL); + if (!netfs_subrequest_slab) + goto error_subreq; + + if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0) + goto error_subreqpool; + if (!proc_mkdir("fs/netfs", NULL)) - goto error; + goto error_proc; if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL, &netfs_requests_seq_ops)) - goto error_proc; + goto error_procfile; #ifdef CONFIG_FSCACHE_STATS if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL, netfs_stats_show)) - goto error_proc; + goto error_procfile; #endif ret = fscache_init(); if (ret < 0) - goto error_proc; + goto error_fscache; return 0; -error_proc: +error_fscache: +error_procfile: remove_proc_entry("fs/netfs", NULL); -error: +error_proc: + mempool_exit(&netfs_subrequest_pool); +error_subreqpool: + kmem_cache_destroy(netfs_subrequest_slab); +error_subreq: + mempool_exit(&netfs_request_pool); +error_reqpool: + kmem_cache_destroy(netfs_request_slab); +error_req: return ret; } fs_initcall(netfs_init); @@ -125,5 +160,9 @@ static void __exit netfs_exit(void) { fscache_exit(); remove_proc_entry("fs/netfs", NULL); + mempool_exit(&netfs_subrequest_pool); + kmem_cache_destroy(netfs_subrequest_slab); + mempool_exit(&netfs_request_pool); + kmem_cache_destroy(netfs_request_slab); } module_exit(netfs_exit); diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 8acc03a64059..1a4e2ce735ce 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -6,6 +6,8 @@ */ #include +#include +#include #include "internal.h" /* @@ -20,17 +22,22 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, struct inode *inode = file ? file_inode(file) : mapping->host; struct netfs_inode *ctx = netfs_inode(inode); struct netfs_io_request *rreq; + mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool; + struct kmem_cache *cache = mempool->pool_data; bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE || origin == NETFS_DIO_READ || origin == NETFS_DIO_WRITE); bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx); int ret; - rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request), - GFP_KERNEL); - if (!rreq) - return ERR_PTR(-ENOMEM); + for (;;) { + rreq = mempool_alloc(mempool, GFP_KERNEL); + if (rreq) + break; + msleep(10); + } + memset(rreq, 0, kmem_cache_size(cache)); rreq->start = start; rreq->len = len; rreq->upper_len = len; @@ -56,7 +63,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, if (rreq->netfs_ops->init_request) { ret = rreq->netfs_ops->init_request(rreq, file); if (ret < 0) { - kfree(rreq); + mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool); return ERR_PTR(ret); } } @@ -88,6 +95,14 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) } } +static void netfs_free_request_rcu(struct rcu_head *rcu) +{ + struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu); + + mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool); + netfs_stat_d(&netfs_n_rh_rreq); +} + static void netfs_free_request(struct work_struct *work) { struct netfs_io_request *rreq = @@ -110,8 +125,7 @@ static void netfs_free_request(struct work_struct *work) } kvfree(rreq->direct_bv); } - kfree_rcu(rreq, rcu); - netfs_stat_d(&netfs_n_rh_rreq); + call_rcu(&rreq->rcu, netfs_free_request_rcu); } void netfs_put_request(struct netfs_io_request *rreq, bool was_async, @@ -143,20 +157,25 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async, struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq) { struct netfs_io_subrequest *subreq; - - subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?: - sizeof(struct netfs_io_subrequest), - GFP_KERNEL); - if (subreq) { - INIT_WORK(&subreq->work, NULL); - INIT_LIST_HEAD(&subreq->rreq_link); - refcount_set(&subreq->ref, 2); - subreq->rreq = rreq; - subreq->debug_index = atomic_inc_return(&rreq->subreq_counter); - netfs_get_request(rreq, netfs_rreq_trace_get_subreq); - netfs_stat(&netfs_n_rh_sreq); + mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool; + struct kmem_cache *cache = mempool->pool_data; + + for (;;) { + subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool, + GFP_KERNEL); + if (subreq) + break; + msleep(10); } + memset(subreq, 0, kmem_cache_size(cache)); + INIT_WORK(&subreq->work, NULL); + INIT_LIST_HEAD(&subreq->rreq_link); + refcount_set(&subreq->ref, 2); + subreq->rreq = rreq; + subreq->debug_index = atomic_inc_return(&rreq->subreq_counter); + netfs_get_request(rreq, netfs_rreq_trace_get_subreq); + netfs_stat(&netfs_n_rh_sreq); return subreq; } @@ -178,7 +197,7 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq, trace_netfs_sreq(subreq, netfs_sreq_trace_free); if (rreq->netfs_ops->free_subrequest) rreq->netfs_ops->free_subrequest(subreq); - kfree(subreq); + mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool); netfs_stat_d(&netfs_n_rh_sreq); netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); } diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 3af589dabd7f..0b6c2c2d3c23 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -20,6 +20,7 @@ #include enum netfs_sreq_ref_trace; +typedef struct mempool_s mempool_t; /** * folio_start_private_2 - Start an fscache write on a folio. [DEPRECATED] @@ -236,8 +237,8 @@ struct netfs_io_request { * Operations the network filesystem can/must provide to the helpers. */ struct netfs_request_ops { - unsigned int io_request_size; /* Alloc size for netfs_io_request struct */ - unsigned int io_subrequest_size; /* Alloc size for netfs_io_subrequest struct */ + mempool_t *request_pool; + mempool_t *subrequest_pool; int (*init_request)(struct netfs_io_request *rreq, struct file *file); void (*free_request)(struct netfs_io_request *rreq); void (*free_subrequest)(struct netfs_io_subrequest *rreq); -- cgit From 7ba167c4c73ed96eb002c98a9d7d49317dfb0191 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 18 Mar 2024 16:57:31 +0000 Subject: netfs: Switch to using unsigned long long rather than loff_t Switch to using unsigned long long rather than loff_t in netfslib to avoid problems with the sign flipping in the maths when we're dealing with the byte at position 0x7fffffffffffffff. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Ilya Dryomov cc: Xiubo Li cc: netfs@lists.linux.dev cc: ceph-devel@vger.kernel.org cc: linux-fsdevel@vger.kernel.org --- fs/cachefiles/io.c | 2 +- fs/ceph/addr.c | 2 +- fs/netfs/buffered_read.c | 4 +++- fs/netfs/buffered_write.c | 2 +- fs/netfs/io.c | 6 +++--- fs/netfs/main.c | 2 +- fs/netfs/output.c | 4 ++-- include/linux/netfs.h | 16 +++++++++------- include/trace/events/netfs.h | 6 +++--- 9 files changed, 24 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 1d685357e67f..5ba5c7814fe4 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -493,7 +493,7 @@ out_no_object: * boundary as appropriate. */ static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq, - loff_t i_size) + unsigned long long i_size) { return cachefiles_do_prepare_read(&subreq->rreq->cache_resources, subreq->start, &subreq->len, i_size, diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 74bfd10b1b1a..8c16bc5250ef 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -193,7 +193,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) * block, but do not exceed the file size, unless the original * request already exceeds it. */ - new_end = min(round_up(end, lo->stripe_unit), rreq->i_size); + new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size); if (new_end > end && new_end <= rreq->start + max_len) rreq->len = new_end - rreq->start; diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c index 1622cce535a3..47603f08680e 100644 --- a/fs/netfs/buffered_read.c +++ b/fs/netfs/buffered_read.c @@ -130,7 +130,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) } static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, - loff_t *_start, size_t *_len, loff_t i_size) + unsigned long long *_start, + unsigned long long *_len, + unsigned long long i_size) { struct netfs_cache_resources *cres = &rreq->cache_resources; diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index d8f66ce94575..eba49bfafe64 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -664,7 +664,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq) last = (wreq->start + wreq->len - 1) / PAGE_SIZE; xas_for_each(&xas, folio, last) { WARN(!folio_test_writeback(folio), - "bad %zx @%llx page %lx %lx\n", + "bad %llx @%llx page %lx %lx\n", wreq->len, wreq->start, folio->index, last); if ((finfo = netfs_folio_info(folio))) { diff --git a/fs/netfs/io.c b/fs/netfs/io.c index 8de581ac0cfb..6cfecfcd02e1 100644 --- a/fs/netfs/io.c +++ b/fs/netfs/io.c @@ -476,7 +476,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq, set: if (subreq->len > rreq->len) - pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n", + pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n", rreq->debug_id, subreq->debug_index, subreq->len, rreq->len); @@ -513,7 +513,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, subreq->start = rreq->start + rreq->submitted; subreq->len = io_iter->count; - _debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted); + _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted); list_add_tail(&subreq->rreq_link, &rreq->subrequests); /* Call out to the cache to find out what it can do with the remaining @@ -588,7 +588,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync) atomic_set(&rreq->nr_outstanding, 1); io_iter = rreq->io_iter; do { - _debug("submit %llx + %zx >= %llx", + _debug("submit %llx + %llx >= %llx", rreq->start, rreq->submitted, rreq->i_size); if (rreq->origin == NETFS_DIO_READ && rreq->start + rreq->submitted >= rreq->i_size) diff --git a/fs/netfs/main.c b/fs/netfs/main.c index 4805b9377364..5f0f438e5d21 100644 --- a/fs/netfs/main.c +++ b/fs/netfs/main.c @@ -62,7 +62,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v) rreq = list_entry(v, struct netfs_io_request, proc_link); seq_printf(m, - "%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx", + "%08x %s %3d %2lx %4d %3d @%04llx %llx/%llx", rreq->debug_id, netfs_origins[rreq->origin], refcount_read(&rreq->ref), diff --git a/fs/netfs/output.c b/fs/netfs/output.c index e586396d6b72..85374322f10f 100644 --- a/fs/netfs/output.c +++ b/fs/netfs/output.c @@ -439,7 +439,7 @@ static void netfs_submit_writethrough(struct netfs_io_request *wreq, bool final) */ int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end) { - _enter("ic=%zu sb=%zu ws=%u cp=%zu tp=%u", + _enter("ic=%zu sb=%llu ws=%u cp=%zu tp=%u", wreq->iter.count, wreq->submitted, wreq->wsize, copied, to_page_end); wreq->iter.count += copied; @@ -457,7 +457,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb) { int ret = -EIOCBQUEUED; - _enter("ic=%zu sb=%zu ws=%u", + _enter("ic=%zu sb=%llu ws=%u", wreq->iter.count, wreq->submitted, wreq->wsize); if (wreq->submitted < wreq->io_iter.count) diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 0b6c2c2d3c23..88269681d4fc 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -149,7 +149,7 @@ struct netfs_io_subrequest { struct work_struct work; struct list_head rreq_link; /* Link in rreq->subrequests */ struct iov_iter io_iter; /* Iterator for this subrequest */ - loff_t start; /* Where to start the I/O */ + unsigned long long start; /* Where to start the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ refcount_t ref; @@ -205,15 +205,15 @@ struct netfs_io_request { atomic_t subreq_counter; /* Next subreq->debug_index */ atomic_t nr_outstanding; /* Number of ops in progress */ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ - size_t submitted; /* Amount submitted for I/O so far */ - size_t len; /* Length of the request */ size_t upper_len; /* Length can be extended to here */ + unsigned long long submitted; /* Amount submitted for I/O so far */ + unsigned long long len; /* Length of the request */ size_t transferred; /* Amount to be indicated as transferred */ short error; /* 0 or error that occurred */ enum netfs_io_origin origin; /* Origin of the request */ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ - loff_t i_size; /* Size of the file */ - loff_t start; /* Start position */ + unsigned long long i_size; /* Size of the file */ + unsigned long long start; /* Start position */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ refcount_t ref; unsigned long flags; @@ -294,13 +294,15 @@ struct netfs_cache_ops { /* Expand readahead request */ void (*expand_readahead)(struct netfs_cache_resources *cres, - loff_t *_start, size_t *_len, loff_t i_size); + unsigned long long *_start, + unsigned long long *_len, + unsigned long long i_size); /* Prepare a read operation, shortening it to a cached/uncached * boundary as appropriate. */ enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, - loff_t i_size); + unsigned long long i_size); /* Prepare a write operation, working out what part of the write we can * actually do. diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 30769103638f..7126d2ea459c 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -280,7 +280,7 @@ TRACE_EVENT(netfs_sreq, __entry->start = sreq->start; ), - TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx/%zx e=%d", + TP_printk("R=%08x[%x] %s %s f=%02x s=%llx %zx/%zx e=%d", __entry->rreq, __entry->index, __print_symbolic(__entry->source, netfs_sreq_sources), __print_symbolic(__entry->what, netfs_sreq_traces), @@ -320,7 +320,7 @@ TRACE_EVENT(netfs_failure, __entry->start = sreq ? sreq->start : 0; ), - TP_printk("R=%08x[%d] %s f=%02x s=%llx %zx/%zx %s e=%d", + TP_printk("R=%08x[%x] %s f=%02x s=%llx %zx/%zx %s e=%d", __entry->rreq, __entry->index, __print_symbolic(__entry->source, netfs_sreq_sources), __entry->flags, @@ -436,7 +436,7 @@ TRACE_EVENT(netfs_write, __field(unsigned int, cookie ) __field(enum netfs_write_trace, what ) __field(unsigned long long, start ) - __field(size_t, len ) + __field(unsigned long long, len ) ), TP_fast_assign( -- cgit From 288ace2f57c9d06dd2e42bd80d03747d879a4068 Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 18 Mar 2024 16:52:05 +0000 Subject: netfs: New writeback implementation The current netfslib writeback implementation creates writeback requests of contiguous folio data and then separately tiles subrequests over the space twice, once for the server and once for the cache. This creates a few issues: (1) Every time there's a discontiguity or a change between writing to only one destination or writing to both, it must create a new request. This makes it harder to do vectored writes. (2) The folios don't have the writeback mark removed until the end of the request - and a request could be hundreds of megabytes. (3) In future, I want to support a larger cache granularity, which will require aggregation of some folios that contain unmodified data (which only need to go to the cache) and some which contain modifications (which need to be uploaded and stored to the cache) - but, currently, these are treated as discontiguous. There's also a move to get everyone to use writeback_iter() to extract writable folios from the pagecache. That said, currently writeback_iter() has some issues that make it less than ideal: (1) there's no way to cancel the iteration, even if you find a "temporary" error that means the current folio and all subsequent folios are going to fail; (2) there's no way to filter the folios being written back - something that will impact Ceph with it's ordered snap system; (3) and if you get a folio you can't immediately deal with (say you need to flush the preceding writes), you are left with a folio hanging in the locked state for the duration, when really we should unlock it and relock it later. In this new implementation, I use writeback_iter() to pump folios, progressively creating two parallel, but separate streams and cleaning up the finished folios as the subrequests complete. Either or both streams can contain gaps, and the subrequests in each stream can be of variable size, don't need to align with each other and don't need to align with the folios. Indeed, subrequests can cross folio boundaries, may cover several folios or a folio may be spanned by multiple folios, e.g.: +---+---+-----+-----+---+----------+ Folios: | | | | | | | +---+---+-----+-----+---+----------+ +------+------+ +----+----+ Upload: | | |.....| | | +------+------+ +----+----+ +------+------+------+------+------+ Cache: | | | | | | +------+------+------+------+------+ The progressive subrequest construction permits the algorithm to be preparing both the next upload to the server and the next write to the cache whilst the previous ones are already in progress. Throttling can be applied to control the rate of production of subrequests - and, in any case, we probably want to write them to the server in ascending order, particularly if the file will be extended. Content crypto can also be prepared at the same time as the subrequests and run asynchronously, with the prepped requests being stalled until the crypto catches up with them. This might also be useful for transport crypto, but that happens at a lower layer, so probably would be harder to pull off. The algorithm is split into three parts: (1) The issuer. This walks through the data, packaging it up, encrypting it and creating subrequests. The part of this that generates subrequests only deals with file positions and spans and so is usable for DIO/unbuffered writes as well as buffered writes. (2) The collector. This asynchronously collects completed subrequests, unlocks folios, frees crypto buffers and performs any retries. This runs in a work queue so that the issuer can return to the caller for writeback (so that the VM can have its kswapd thread back) or async writes. (3) The retryer. This pauses the issuer, waits for all outstanding subrequests to complete and then goes through the failed subrequests to reissue them. This may involve reprepping them (with cifs, the credits must be renegotiated, and a subrequest may need splitting), and doing RMW for content crypto if there's a conflicting change on the server. [!] Note that some of the functions are prefixed with "new_" to avoid clashes with existing functions. These will be renamed in a later patch that cuts over to the new algorithm. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Eric Van Hensbergen cc: Latchesar Ionkov cc: Dominique Martinet cc: Christian Schoenebeck cc: Marc Dionne cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org --- fs/netfs/Makefile | 4 +- fs/netfs/buffered_write.c | 4 - fs/netfs/internal.h | 27 ++ fs/netfs/objects.c | 17 + fs/netfs/write_collect.c | 803 +++++++++++++++++++++++++++++++++++++++++++ fs/netfs/write_issue.c | 683 ++++++++++++++++++++++++++++++++++++ include/linux/netfs.h | 68 +++- include/trace/events/netfs.h | 232 ++++++++++++- 8 files changed, 1829 insertions(+), 9 deletions(-) create mode 100644 fs/netfs/write_collect.c create mode 100644 fs/netfs/write_issue.c (limited to 'include/linux') diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile index d4d1d799819e..1eb86e34b5a9 100644 --- a/fs/netfs/Makefile +++ b/fs/netfs/Makefile @@ -11,7 +11,9 @@ netfs-y := \ main.o \ misc.o \ objects.o \ - output.o + output.o \ + write_collect.o \ + write_issue.o netfs-$(CONFIG_NETFS_STATS) += stats.o diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index eba49bfafe64..84ac95ee4b4d 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -74,16 +74,12 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx, if (file->f_mode & FMODE_READ) goto no_write_streaming; - if (test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags)) - goto no_write_streaming; if (netfs_is_cache_enabled(ctx)) { /* We don't want to get a streaming write on a file that loses * caching service temporarily because the backing store got * culled. */ - if (!test_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags)) - set_bit(NETFS_ICTX_NO_WRITE_STREAMING, &ctx->flags); goto no_write_streaming; } diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index c67da478cd2b..dc11d1f67363 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -150,6 +150,33 @@ static inline void netfs_stat_d(atomic_t *stat) #define netfs_stat_d(x) do {} while(0) #endif +/* + * write_collect.c + */ +int netfs_folio_written_back(struct folio *folio); +void netfs_write_collection_worker(struct work_struct *work); +void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async); + +/* + * write_issue.c + */ +struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, + struct file *file, + loff_t start, + enum netfs_io_origin origin); +void netfs_reissue_write(struct netfs_io_stream *stream, + struct netfs_io_subrequest *subreq); +int netfs_advance_write(struct netfs_io_request *wreq, + struct netfs_io_stream *stream, + loff_t start, size_t len, bool to_eof); +struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len); +int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *folio, size_t copied, bool to_page_end, + struct folio **writethrough_cache); +int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *writethrough_cache); +int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); + /* * Miscellaneous functions. */ diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c index 1a4e2ce735ce..c90d482b1650 100644 --- a/fs/netfs/objects.c +++ b/fs/netfs/objects.c @@ -47,6 +47,10 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, rreq->inode = inode; rreq->i_size = i_size_read(inode); rreq->debug_id = atomic_inc_return(&debug_ids); + rreq->wsize = INT_MAX; + spin_lock_init(&rreq->lock); + INIT_LIST_HEAD(&rreq->io_streams[0].subrequests); + INIT_LIST_HEAD(&rreq->io_streams[1].subrequests); INIT_LIST_HEAD(&rreq->subrequests); INIT_WORK(&rreq->work, NULL); refcount_set(&rreq->ref, 1); @@ -85,6 +89,8 @@ void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) { struct netfs_io_subrequest *subreq; + struct netfs_io_stream *stream; + int s; while (!list_empty(&rreq->subrequests)) { subreq = list_first_entry(&rreq->subrequests, @@ -93,6 +99,17 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_clear); } + + for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) { + stream = &rreq->io_streams[s]; + while (!list_empty(&stream->subrequests)) { + subreq = list_first_entry(&stream->subrequests, + struct netfs_io_subrequest, rreq_link); + list_del(&subreq->rreq_link); + netfs_put_subrequest(subreq, was_async, + netfs_sreq_trace_put_clear); + } + } } static void netfs_free_request_rcu(struct rcu_head *rcu) diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c new file mode 100644 index 000000000000..b8c1d3ca724a --- /dev/null +++ b/fs/netfs/write_collect.c @@ -0,0 +1,803 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Network filesystem write subrequest result collection, assessment + * and retrying. + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#include +#include +#include +#include +#include +#include "internal.h" + +/* Notes made in the collector */ +#define HIT_PENDING 0x01 /* A front op was still pending */ +#define SOME_EMPTY 0x02 /* One of more streams are empty */ +#define ALL_EMPTY 0x04 /* All streams are empty */ +#define MAYBE_DISCONTIG 0x08 /* A front op may be discontiguous (rounded to PAGE_SIZE) */ +#define NEED_REASSESS 0x10 /* Need to loop round and reassess */ +#define REASSESS_DISCONTIG 0x20 /* Reassess discontiguity if contiguity advances */ +#define MADE_PROGRESS 0x40 /* Made progress cleaning up a stream or the folio set */ +#define BUFFERED 0x80 /* The pagecache needs cleaning up */ +#define NEED_RETRY 0x100 /* A front op requests retrying */ +#define SAW_FAILURE 0x200 /* One stream or hit a permanent failure */ + +/* + * Successful completion of write of a folio to the server and/or cache. Note + * that we are not allowed to lock the folio here on pain of deadlocking with + * truncate. + */ +int netfs_folio_written_back(struct folio *folio) +{ + enum netfs_folio_trace why = netfs_folio_trace_clear; + struct netfs_folio *finfo; + struct netfs_group *group = NULL; + int gcount = 0; + + if ((finfo = netfs_folio_info(folio))) { + /* Streaming writes cannot be redirtied whilst under writeback, + * so discard the streaming record. + */ + folio_detach_private(folio); + group = finfo->netfs_group; + gcount++; + kfree(finfo); + why = netfs_folio_trace_clear_s; + goto end_wb; + } + + if ((group = netfs_folio_group(folio))) { + if (group == NETFS_FOLIO_COPY_TO_CACHE) { + why = netfs_folio_trace_clear_cc; + folio_detach_private(folio); + goto end_wb; + } + + /* Need to detach the group pointer if the page didn't get + * redirtied. If it has been redirtied, then it must be within + * the same group. + */ + why = netfs_folio_trace_redirtied; + if (!folio_test_dirty(folio)) { + folio_detach_private(folio); + gcount++; + why = netfs_folio_trace_clear_g; + } + } + +end_wb: + trace_netfs_folio(folio, why); + folio_end_writeback(folio); + return gcount; +} + +/* + * Get hold of a folio we have under writeback. We don't want to get the + * refcount on it. + */ +static struct folio *netfs_writeback_lookup_folio(struct netfs_io_request *wreq, loff_t pos) +{ + XA_STATE(xas, &wreq->mapping->i_pages, pos / PAGE_SIZE); + struct folio *folio; + + rcu_read_lock(); + + for (;;) { + xas_reset(&xas); + folio = xas_load(&xas); + if (xas_retry(&xas, folio)) + continue; + + if (!folio || xa_is_value(folio)) + kdebug("R=%08x: folio %lx (%llx) not present", + wreq->debug_id, xas.xa_index, pos / PAGE_SIZE); + BUG_ON(!folio || xa_is_value(folio)); + + if (folio == xas_reload(&xas)) + break; + } + + rcu_read_unlock(); + + if (WARN_ONCE(!folio_test_writeback(folio), + "R=%08x: folio %lx is not under writeback\n", + wreq->debug_id, folio->index)) { + trace_netfs_folio(folio, netfs_folio_trace_not_under_wback); + } + return folio; +} + +/* + * Unlock any folios we've finished with. + */ +static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq, + unsigned long long collected_to, + unsigned int *notes) +{ + for (;;) { + struct folio *folio; + struct netfs_folio *finfo; + unsigned long long fpos, fend; + size_t fsize, flen; + + folio = netfs_writeback_lookup_folio(wreq, wreq->cleaned_to); + + fpos = folio_pos(folio); + fsize = folio_size(folio); + finfo = netfs_folio_info(folio); + flen = finfo ? finfo->dirty_offset + finfo->dirty_len : fsize; + + fend = min_t(unsigned long long, fpos + flen, wreq->i_size); + + trace_netfs_collect_folio(wreq, folio, fend, collected_to); + + if (fpos + fsize > wreq->contiguity) { + trace_netfs_collect_contig(wreq, fpos + fsize, + netfs_contig_trace_unlock); + wreq->contiguity = fpos + fsize; + } + + /* Unlock any folio we've transferred all of. */ + if (collected_to < fend) + break; + + wreq->nr_group_rel += netfs_folio_written_back(folio); + wreq->cleaned_to = fpos + fsize; + *notes |= MADE_PROGRESS; + + if (fpos + fsize >= collected_to) + break; + } +} + +/* + * Perform retries on the streams that need it. + */ +static void netfs_retry_write_stream(struct netfs_io_request *wreq, + struct netfs_io_stream *stream) +{ + struct list_head *next; + + _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); + + if (unlikely(stream->failed)) + return; + + /* If there's no renegotiation to do, just resend each failed subreq. */ + if (!stream->prepare_write) { + struct netfs_io_subrequest *subreq; + + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { + if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) + break; + if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) { + __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + netfs_reissue_write(stream, subreq); + } + } + return; + } + + if (list_empty(&stream->subrequests)) + return; + next = stream->subrequests.next; + + do { + struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp; + unsigned long long start, len; + size_t part; + bool boundary = false; + + /* Go through the stream and find the next span of contiguous + * data that we then rejig (cifs, for example, needs the wsize + * renegotiating) and reissue. + */ + from = list_entry(next, struct netfs_io_subrequest, rreq_link); + to = from; + start = from->start + from->transferred; + len = from->len - from->transferred; + + if (test_bit(NETFS_SREQ_FAILED, &from->flags) || + !test_bit(NETFS_SREQ_NEED_RETRY, &from->flags)) + return; + + list_for_each_continue(next, &stream->subrequests) { + subreq = list_entry(next, struct netfs_io_subrequest, rreq_link); + if (subreq->start + subreq->transferred != start + len || + test_bit(NETFS_SREQ_BOUNDARY, &subreq->flags) || + !test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) + break; + to = subreq; + len += to->len; + } + + /* Work through the sublist. */ + subreq = from; + list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) { + if (!len) + break; + /* Renegotiate max_len (wsize) */ + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); + __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); + __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); + stream->prepare_write(subreq); + + part = min(len, subreq->max_len); + subreq->len = part; + subreq->start = start; + subreq->transferred = 0; + len -= part; + start += part; + if (len && subreq == to && + __test_and_clear_bit(NETFS_SREQ_BOUNDARY, &to->flags)) + boundary = true; + + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + netfs_reissue_write(stream, subreq); + if (subreq == to) + break; + } + + /* If we managed to use fewer subreqs, we can discard the + * excess; if we used the same number, then we're done. + */ + if (!len) { + if (subreq == to) + continue; + list_for_each_entry_safe_from(subreq, tmp, + &stream->subrequests, rreq_link) { + trace_netfs_sreq(subreq, netfs_sreq_trace_discard); + list_del(&subreq->rreq_link); + netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_done); + if (subreq == to) + break; + } + continue; + } + + /* We ran out of subrequests, so we need to allocate some more + * and insert them after. + */ + do { + subreq = netfs_alloc_subrequest(wreq); + subreq->source = to->source; + subreq->start = start; + subreq->max_len = len; + subreq->max_nr_segs = INT_MAX; + subreq->debug_index = atomic_inc_return(&wreq->subreq_counter); + subreq->stream_nr = to->stream_nr; + __set_bit(NETFS_SREQ_RETRYING, &subreq->flags); + + trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, + refcount_read(&subreq->ref), + netfs_sreq_trace_new); + netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit); + + list_add(&subreq->rreq_link, &to->rreq_link); + to = list_next_entry(to, rreq_link); + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); + + switch (stream->source) { + case NETFS_UPLOAD_TO_SERVER: + netfs_stat(&netfs_n_wh_upload); + subreq->max_len = min(len, wreq->wsize); + break; + case NETFS_WRITE_TO_CACHE: + netfs_stat(&netfs_n_wh_write); + break; + default: + WARN_ON_ONCE(1); + } + + stream->prepare_write(subreq); + + part = min(len, subreq->max_len); + subreq->len = subreq->transferred + part; + len -= part; + start += part; + if (!len && boundary) { + __set_bit(NETFS_SREQ_BOUNDARY, &to->flags); + boundary = false; + } + + netfs_reissue_write(stream, subreq); + if (!len) + break; + + } while (len); + + } while (!list_is_head(next, &stream->subrequests)); +} + +/* + * Perform retries on the streams that need it. If we're doing content + * encryption and the server copy changed due to a third-party write, we may + * need to do an RMW cycle and also rewrite the data to the cache. + */ +static void netfs_retry_writes(struct netfs_io_request *wreq) +{ + struct netfs_io_subrequest *subreq; + struct netfs_io_stream *stream; + int s; + + /* Wait for all outstanding I/O to quiesce before performing retries as + * we may need to renegotiate the I/O sizes. + */ + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (!stream->active) + continue; + + list_for_each_entry(subreq, &stream->subrequests, rreq_link) { + wait_on_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS, + TASK_UNINTERRUPTIBLE); + } + } + + // TODO: Enc: Fetch changed partial pages + // TODO: Enc: Reencrypt content if needed. + // TODO: Enc: Wind back transferred point. + // TODO: Enc: Mark cache pages for retry. + + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->need_retry) { + stream->need_retry = false; + netfs_retry_write_stream(wreq, stream); + } + } +} + +/* + * Collect and assess the results of various write subrequests. We may need to + * retry some of the results - or even do an RMW cycle for content crypto. + * + * Note that we have a number of parallel, overlapping lists of subrequests, + * one to the server and one to the local cache for example, which may not be + * the same size or starting position and may not even correspond in boundary + * alignment. + */ +static void netfs_collect_write_results(struct netfs_io_request *wreq) +{ + struct netfs_io_subrequest *front, *remove; + struct netfs_io_stream *stream; + unsigned long long collected_to; + unsigned int notes; + int s; + + _enter("%llx-%llx", wreq->start, wreq->start + wreq->len); + trace_netfs_collect(wreq); + trace_netfs_rreq(wreq, netfs_rreq_trace_collect); + +reassess_streams: + smp_rmb(); + collected_to = ULLONG_MAX; + if (wreq->origin == NETFS_WRITEBACK) + notes = ALL_EMPTY | BUFFERED | MAYBE_DISCONTIG; + else if (wreq->origin == NETFS_WRITETHROUGH) + notes = ALL_EMPTY | BUFFERED; + else + notes = ALL_EMPTY; + + /* Remove completed subrequests from the front of the streams and + * advance the completion point on each stream. We stop when we hit + * something that's in progress. The issuer thread may be adding stuff + * to the tail whilst we're doing this. + * + * We must not, however, merge in discontiguities that span whole + * folios that aren't under writeback. This is made more complicated + * by the folios in the gap being of unpredictable sizes - if they even + * exist - but we don't want to look them up. + */ + for (s = 0; s < NR_IO_STREAMS; s++) { + loff_t rstart, rend; + + stream = &wreq->io_streams[s]; + /* Read active flag before list pointers */ + if (!smp_load_acquire(&stream->active)) + continue; + + front = stream->front; + while (front) { + trace_netfs_collect_sreq(wreq, front); + //_debug("sreq [%x] %llx %zx/%zx", + // front->debug_index, front->start, front->transferred, front->len); + + /* Stall if there may be a discontinuity. */ + rstart = round_down(front->start, PAGE_SIZE); + if (rstart > wreq->contiguity) { + if (wreq->contiguity > stream->collected_to) { + trace_netfs_collect_gap(wreq, stream, + wreq->contiguity, 'D'); + stream->collected_to = wreq->contiguity; + } + notes |= REASSESS_DISCONTIG; + break; + } + rend = round_up(front->start + front->len, PAGE_SIZE); + if (rend > wreq->contiguity) { + trace_netfs_collect_contig(wreq, rend, + netfs_contig_trace_collect); + wreq->contiguity = rend; + if (notes & REASSESS_DISCONTIG) + notes |= NEED_REASSESS; + } + notes &= ~MAYBE_DISCONTIG; + + /* Stall if the front is still undergoing I/O. */ + if (test_bit(NETFS_SREQ_IN_PROGRESS, &front->flags)) { + notes |= HIT_PENDING; + break; + } + smp_rmb(); /* Read counters after I-P flag. */ + + if (stream->failed) { + stream->collected_to = front->start + front->len; + notes |= MADE_PROGRESS | SAW_FAILURE; + goto cancel; + } + if (front->start + front->transferred > stream->collected_to) { + stream->collected_to = front->start + front->transferred; + stream->transferred = stream->collected_to - wreq->start; + notes |= MADE_PROGRESS; + } + if (test_bit(NETFS_SREQ_FAILED, &front->flags)) { + stream->failed = true; + stream->error = front->error; + if (stream->source == NETFS_UPLOAD_TO_SERVER) + mapping_set_error(wreq->mapping, front->error); + notes |= NEED_REASSESS | SAW_FAILURE; + break; + } + if (front->transferred < front->len) { + stream->need_retry = true; + notes |= NEED_RETRY | MADE_PROGRESS; + break; + } + + cancel: + /* Remove if completely consumed. */ + spin_lock(&wreq->lock); + + remove = front; + list_del_init(&front->rreq_link); + front = list_first_entry_or_null(&stream->subrequests, + struct netfs_io_subrequest, rreq_link); + stream->front = front; + if (!front) { + unsigned long long jump_to = atomic64_read(&wreq->issued_to); + + if (stream->collected_to < jump_to) { + trace_netfs_collect_gap(wreq, stream, jump_to, 'A'); + stream->collected_to = jump_to; + } + } + + spin_unlock(&wreq->lock); + netfs_put_subrequest(remove, false, + notes & SAW_FAILURE ? + netfs_sreq_trace_put_cancel : + netfs_sreq_trace_put_done); + } + + if (front) + notes &= ~ALL_EMPTY; + else + notes |= SOME_EMPTY; + + if (stream->collected_to < collected_to) + collected_to = stream->collected_to; + } + + if (collected_to != ULLONG_MAX && collected_to > wreq->collected_to) + wreq->collected_to = collected_to; + + /* If we have an empty stream, we need to jump it forward over any gap + * otherwise the collection point will never advance. + * + * Note that the issuer always adds to the stream with the lowest + * so-far submitted start, so if we see two consecutive subreqs in one + * stream with nothing between then in another stream, then the second + * stream has a gap that can be jumped. + */ + if (notes & SOME_EMPTY) { + unsigned long long jump_to = wreq->start + wreq->len; + + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->active && + stream->front && + stream->front->start < jump_to) + jump_to = stream->front->start; + } + + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->active && + !stream->front && + stream->collected_to < jump_to) { + trace_netfs_collect_gap(wreq, stream, jump_to, 'B'); + stream->collected_to = jump_to; + } + } + } + + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->active) + trace_netfs_collect_stream(wreq, stream); + } + + trace_netfs_collect_state(wreq, wreq->collected_to, notes); + + /* Unlock any folios that we have now finished with. */ + if (notes & BUFFERED) { + unsigned long long clean_to = min(wreq->collected_to, wreq->contiguity); + + if (wreq->cleaned_to < clean_to) + netfs_writeback_unlock_folios(wreq, clean_to, ¬es); + } else { + wreq->cleaned_to = wreq->collected_to; + } + + // TODO: Discard encryption buffers + + /* If all streams are discontiguous with the last folio we cleared, we + * may need to skip a set of folios. + */ + if ((notes & (MAYBE_DISCONTIG | ALL_EMPTY)) == MAYBE_DISCONTIG) { + unsigned long long jump_to = ULLONG_MAX; + + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->active && stream->front && + stream->front->start < jump_to) + jump_to = stream->front->start; + } + + trace_netfs_collect_contig(wreq, jump_to, netfs_contig_trace_jump); + wreq->contiguity = jump_to; + wreq->cleaned_to = jump_to; + wreq->collected_to = jump_to; + for (s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->collected_to < jump_to) + stream->collected_to = jump_to; + } + //cond_resched(); + notes |= MADE_PROGRESS; + goto reassess_streams; + } + + if (notes & NEED_RETRY) + goto need_retry; + if ((notes & MADE_PROGRESS) && test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { + trace_netfs_rreq(wreq, netfs_rreq_trace_unpause); + clear_bit_unlock(NETFS_RREQ_PAUSE, &wreq->flags); + wake_up_bit(&wreq->flags, NETFS_RREQ_PAUSE); + } + + if (notes & NEED_REASSESS) { + //cond_resched(); + goto reassess_streams; + } + if (notes & MADE_PROGRESS) { + //cond_resched(); + goto reassess_streams; + } + +out: + netfs_put_group_many(wreq->group, wreq->nr_group_rel); + wreq->nr_group_rel = 0; + _leave(" = %x", notes); + return; + +need_retry: + /* Okay... We're going to have to retry one or both streams. Note + * that any partially completed op will have had any wholly transferred + * folios removed from it. + */ + _debug("retry"); + netfs_retry_writes(wreq); + goto out; +} + +/* + * Perform the collection of subrequests, folios and encryption buffers. + */ +void netfs_write_collection_worker(struct work_struct *work) +{ + struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work); + struct netfs_inode *ictx = netfs_inode(wreq->inode); + size_t transferred; + int s; + + _enter("R=%x", wreq->debug_id); + + netfs_see_request(wreq, netfs_rreq_trace_see_work); + if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) { + netfs_put_request(wreq, false, netfs_rreq_trace_put_work); + return; + } + + netfs_collect_write_results(wreq); + + /* We're done when the app thread has finished posting subreqs and all + * the queues in all the streams are empty. + */ + if (!test_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags)) { + netfs_put_request(wreq, false, netfs_rreq_trace_put_work); + return; + } + smp_rmb(); /* Read ALL_QUEUED before lists. */ + + transferred = LONG_MAX; + for (s = 0; s < NR_IO_STREAMS; s++) { + struct netfs_io_stream *stream = &wreq->io_streams[s]; + if (!stream->active) + continue; + if (!list_empty(&stream->subrequests)) { + netfs_put_request(wreq, false, netfs_rreq_trace_put_work); + return; + } + if (stream->transferred < transferred) + transferred = stream->transferred; + } + + /* Okay, declare that all I/O is complete. */ + wreq->transferred = transferred; + trace_netfs_rreq(wreq, netfs_rreq_trace_write_done); + + if (wreq->io_streams[1].active && + wreq->io_streams[1].failed) { + /* Cache write failure doesn't prevent writeback completion + * unless we're in disconnected mode. + */ + ictx->ops->invalidate_cache(wreq); + } + + if (wreq->cleanup) + wreq->cleanup(wreq); + + if (wreq->origin == NETFS_DIO_WRITE && + wreq->mapping->nrpages) { + /* mmap may have got underfoot and we may now have folios + * locally covering the region we just wrote. Attempt to + * discard the folios, but leave in place any modified locally. + * ->write_iter() is prevented from interfering by the DIO + * counter. + */ + pgoff_t first = wreq->start >> PAGE_SHIFT; + pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT; + invalidate_inode_pages2_range(wreq->mapping, first, last); + } + + if (wreq->origin == NETFS_DIO_WRITE) + inode_dio_end(wreq->inode); + + _debug("finished"); + trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip); + clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags); + wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS); + + if (wreq->iocb) { + wreq->iocb->ki_pos += wreq->transferred; + if (wreq->iocb->ki_complete) + wreq->iocb->ki_complete( + wreq->iocb, wreq->error ? wreq->error : wreq->transferred); + wreq->iocb = VFS_PTR_POISON; + } + + netfs_clear_subrequests(wreq, false); + netfs_put_request(wreq, false, netfs_rreq_trace_put_work_complete); +} + +/* + * Wake the collection work item. + */ +void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async) +{ + if (!work_pending(&wreq->work)) { + netfs_get_request(wreq, netfs_rreq_trace_get_work); + if (!queue_work(system_unbound_wq, &wreq->work)) + netfs_put_request(wreq, was_async, netfs_rreq_trace_put_work_nq); + } +} + +/** + * new_netfs_write_subrequest_terminated - Note the termination of a write operation. + * @_op: The I/O request that has terminated. + * @transferred_or_error: The amount of data transferred or an error code. + * @was_async: The termination was asynchronous + * + * This tells the library that a contributory write I/O operation has + * terminated, one way or another, and that it should collect the results. + * + * The caller indicates in @transferred_or_error the outcome of the operation, + * supplying a positive value to indicate the number of bytes transferred or a + * negative error code. The library will look after reissuing I/O operations + * as appropriate and writing downloaded data to the cache. + * + * If @was_async is true, the caller might be running in softirq or interrupt + * context and we can't sleep. + * + * When this is called, ownership of the subrequest is transferred back to the + * library, along with a ref. + * + * Note that %_op is a void* so that the function can be passed to + * kiocb::term_func without the need for a casting wrapper. + */ +void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, + bool was_async) +{ + struct netfs_io_subrequest *subreq = _op; + struct netfs_io_request *wreq = subreq->rreq; + struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr]; + + _enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); + + switch (subreq->source) { + case NETFS_UPLOAD_TO_SERVER: + netfs_stat(&netfs_n_wh_upload_done); + break; + case NETFS_WRITE_TO_CACHE: + netfs_stat(&netfs_n_wh_write_done); + break; + case NETFS_INVALID_WRITE: + break; + default: + BUG(); + } + + if (IS_ERR_VALUE(transferred_or_error)) { + subreq->error = transferred_or_error; + if (subreq->error == -EAGAIN) + set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); + else + set_bit(NETFS_SREQ_FAILED, &subreq->flags); + trace_netfs_failure(wreq, subreq, transferred_or_error, netfs_fail_write); + + switch (subreq->source) { + case NETFS_WRITE_TO_CACHE: + netfs_stat(&netfs_n_wh_write_failed); + break; + case NETFS_UPLOAD_TO_SERVER: + netfs_stat(&netfs_n_wh_upload_failed); + break; + default: + break; + } + trace_netfs_rreq(wreq, netfs_rreq_trace_set_pause); + set_bit(NETFS_RREQ_PAUSE, &wreq->flags); + } else { + if (WARN(transferred_or_error > subreq->len - subreq->transferred, + "Subreq excess write: R=%x[%x] %zd > %zu - %zu", + wreq->debug_id, subreq->debug_index, + transferred_or_error, subreq->len, subreq->transferred)) + transferred_or_error = subreq->len - subreq->transferred; + + subreq->error = 0; + subreq->transferred += transferred_or_error; + + if (subreq->transferred < subreq->len) + set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); + } + + trace_netfs_sreq(subreq, netfs_sreq_trace_terminated); + + clear_bit_unlock(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + wake_up_bit(&subreq->flags, NETFS_SREQ_IN_PROGRESS); + + /* If we are at the head of the queue, wake up the collector, + * transferring a ref to it if we were the ones to do so. + */ + if (list_is_first(&subreq->rreq_link, &stream->subrequests)) + netfs_wake_write_collector(wreq, was_async); + + netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); +} +EXPORT_SYMBOL(new_netfs_write_subrequest_terminated); diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c new file mode 100644 index 000000000000..1b2e69934fbd --- /dev/null +++ b/fs/netfs/write_issue.c @@ -0,0 +1,683 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Network filesystem high-level (buffered) writeback. + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * + * To support network filesystems with local caching, we manage a situation + * that can be envisioned like the following: + * + * +---+---+-----+-----+---+----------+ + * Folios: | | | | | | | + * +---+---+-----+-----+---+----------+ + * + * +------+------+ +----+----+ + * Upload: | | |.....| | | + * (Stream 0) +------+------+ +----+----+ + * + * +------+------+------+------+------+ + * Cache: | | | | | | + * (Stream 1) +------+------+------+------+------+ + * + * Where we have a sequence of folios of varying sizes that we need to overlay + * with multiple parallel streams of I/O requests, where the I/O requests in a + * stream may also be of various sizes (in cifs, for example, the sizes are + * negotiated with the server; in something like ceph, they may represent the + * sizes of storage objects). + * + * The sequence in each stream may contain gaps and noncontiguous subrequests + * may be glued together into single vectored write RPCs. + */ + +#include +#include +#include +#include +#include "internal.h" + +/* + * Kill all dirty folios in the event of an unrecoverable error, starting with + * a locked folio we've already obtained from writeback_iter(). + */ +static void netfs_kill_dirty_pages(struct address_space *mapping, + struct writeback_control *wbc, + struct folio *folio) +{ + int error = 0; + + do { + enum netfs_folio_trace why = netfs_folio_trace_kill; + struct netfs_group *group = NULL; + struct netfs_folio *finfo = NULL; + void *priv; + + priv = folio_detach_private(folio); + if (priv) { + finfo = __netfs_folio_info(priv); + if (finfo) { + /* Kill folio from streaming write. */ + group = finfo->netfs_group; + why = netfs_folio_trace_kill_s; + } else { + group = priv; + if (group == NETFS_FOLIO_COPY_TO_CACHE) { + /* Kill copy-to-cache folio */ + why = netfs_folio_trace_kill_cc; + group = NULL; + } else { + /* Kill folio with group */ + why = netfs_folio_trace_kill_g; + } + } + } + + trace_netfs_folio(folio, why); + + folio_start_writeback(folio); + folio_unlock(folio); + folio_end_writeback(folio); + + netfs_put_group(group); + kfree(finfo); + + } while ((folio = writeback_iter(mapping, wbc, folio, &error))); +} + +/* + * Create a write request and set it up appropriately for the origin type. + */ +struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, + struct file *file, + loff_t start, + enum netfs_io_origin origin) +{ + struct netfs_io_request *wreq; + struct netfs_inode *ictx; + + wreq = netfs_alloc_request(mapping, file, start, 0, origin); + if (IS_ERR(wreq)) + return wreq; + + _enter("R=%x", wreq->debug_id); + + ictx = netfs_inode(wreq->inode); + if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags)) + fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx)); + + wreq->contiguity = wreq->start; + wreq->cleaned_to = wreq->start; + INIT_WORK(&wreq->work, netfs_write_collection_worker); + + wreq->io_streams[0].stream_nr = 0; + wreq->io_streams[0].source = NETFS_UPLOAD_TO_SERVER; + wreq->io_streams[0].prepare_write = ictx->ops->prepare_write; + wreq->io_streams[0].issue_write = ictx->ops->issue_write; + wreq->io_streams[0].collected_to = start; + wreq->io_streams[0].transferred = LONG_MAX; + + wreq->io_streams[1].stream_nr = 1; + wreq->io_streams[1].source = NETFS_WRITE_TO_CACHE; + wreq->io_streams[1].collected_to = start; + wreq->io_streams[1].transferred = LONG_MAX; + if (fscache_resources_valid(&wreq->cache_resources)) { + wreq->io_streams[1].avail = true; + wreq->io_streams[1].prepare_write = wreq->cache_resources.ops->prepare_write_subreq; + wreq->io_streams[1].issue_write = wreq->cache_resources.ops->issue_write; + } + + return wreq; +} + +/** + * netfs_prepare_write_failed - Note write preparation failed + * @subreq: The subrequest to mark + * + * Mark a subrequest to note that preparation for write failed. + */ +void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq) +{ + __set_bit(NETFS_SREQ_FAILED, &subreq->flags); + trace_netfs_sreq(subreq, netfs_sreq_trace_prep_failed); +} +EXPORT_SYMBOL(netfs_prepare_write_failed); + +/* + * Prepare a write subrequest. We need to allocate a new subrequest + * if we don't have one. + */ +static void netfs_prepare_write(struct netfs_io_request *wreq, + struct netfs_io_stream *stream, + loff_t start) +{ + struct netfs_io_subrequest *subreq; + + subreq = netfs_alloc_subrequest(wreq); + subreq->source = stream->source; + subreq->start = start; + subreq->max_len = ULONG_MAX; + subreq->max_nr_segs = INT_MAX; + subreq->stream_nr = stream->stream_nr; + + _enter("R=%x[%x]", wreq->debug_id, subreq->debug_index); + + trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, + refcount_read(&subreq->ref), + netfs_sreq_trace_new); + + trace_netfs_sreq(subreq, netfs_sreq_trace_prepare); + + switch (stream->source) { + case NETFS_UPLOAD_TO_SERVER: + netfs_stat(&netfs_n_wh_upload); + subreq->max_len = wreq->wsize; + break; + case NETFS_WRITE_TO_CACHE: + netfs_stat(&netfs_n_wh_write); + break; + default: + WARN_ON_ONCE(1); + break; + } + + if (stream->prepare_write) + stream->prepare_write(subreq); + + __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + + /* We add to the end of the list whilst the collector may be walking + * the list. The collector only goes nextwards and uses the lock to + * remove entries off of the front. + */ + spin_lock(&wreq->lock); + list_add_tail(&subreq->rreq_link, &stream->subrequests); + if (list_is_first(&subreq->rreq_link, &stream->subrequests)) { + stream->front = subreq; + if (!stream->active) { + stream->collected_to = stream->front->start; + /* Write list pointers before active flag */ + smp_store_release(&stream->active, true); + } + } + + spin_unlock(&wreq->lock); + + stream->construct = subreq; +} + +/* + * Set the I/O iterator for the filesystem/cache to use and dispatch the I/O + * operation. The operation may be asynchronous and should call + * netfs_write_subrequest_terminated() when complete. + */ +static void netfs_do_issue_write(struct netfs_io_stream *stream, + struct netfs_io_subrequest *subreq) +{ + struct netfs_io_request *wreq = subreq->rreq; + + _enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); + + if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) + return netfs_write_subrequest_terminated(subreq, subreq->error, false); + + // TODO: Use encrypted buffer + if (test_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags)) { + subreq->io_iter = wreq->io_iter; + iov_iter_advance(&subreq->io_iter, + subreq->start + subreq->transferred - wreq->start); + iov_iter_truncate(&subreq->io_iter, + subreq->len - subreq->transferred); + } else { + iov_iter_xarray(&subreq->io_iter, ITER_SOURCE, &wreq->mapping->i_pages, + subreq->start + subreq->transferred, + subreq->len - subreq->transferred); + } + + trace_netfs_sreq(subreq, netfs_sreq_trace_submit); + stream->issue_write(subreq); +} + +void netfs_reissue_write(struct netfs_io_stream *stream, + struct netfs_io_subrequest *subreq) +{ + __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags); + netfs_do_issue_write(stream, subreq); +} + +static void netfs_issue_write(struct netfs_io_request *wreq, + struct netfs_io_stream *stream) +{ + struct netfs_io_subrequest *subreq = stream->construct; + + if (!subreq) + return; + stream->construct = NULL; + + if (subreq->start + subreq->len > wreq->start + wreq->submitted) + wreq->len = wreq->submitted = subreq->start + subreq->len - wreq->start; + netfs_do_issue_write(stream, subreq); +} + +/* + * Add data to the write subrequest, dispatching each as we fill it up or if it + * is discontiguous with the previous. We only fill one part at a time so that + * we can avoid overrunning the credits obtained (cifs) and try to parallelise + * content-crypto preparation with network writes. + */ +int netfs_advance_write(struct netfs_io_request *wreq, + struct netfs_io_stream *stream, + loff_t start, size_t len, bool to_eof) +{ + struct netfs_io_subrequest *subreq = stream->construct; + size_t part; + + if (!stream->avail) { + _leave("no write"); + return len; + } + + _enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); + + if (subreq && start != subreq->start + subreq->len) { + netfs_issue_write(wreq, stream); + subreq = NULL; + } + + if (!stream->construct) + netfs_prepare_write(wreq, stream, start); + subreq = stream->construct; + + part = min(subreq->max_len - subreq->len, len); + _debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len); + subreq->len += part; + subreq->nr_segs++; + + if (subreq->len >= subreq->max_len || + subreq->nr_segs >= subreq->max_nr_segs || + to_eof) { + netfs_issue_write(wreq, stream); + subreq = NULL; + } + + return part; +} + +/* + * Write some of a pending folio data back to the server. + */ +static int netfs_write_folio(struct netfs_io_request *wreq, + struct writeback_control *wbc, + struct folio *folio) +{ + struct netfs_io_stream *upload = &wreq->io_streams[0]; + struct netfs_io_stream *cache = &wreq->io_streams[1]; + struct netfs_io_stream *stream; + struct netfs_group *fgroup; /* TODO: Use this with ceph */ + struct netfs_folio *finfo; + size_t fsize = folio_size(folio), flen = fsize, foff = 0; + loff_t fpos = folio_pos(folio), i_size; + bool to_eof = false, streamw = false; + bool debug = false; + + _enter(""); + + /* netfs_perform_write() may shift i_size around the page or from out + * of the page to beyond it, but cannot move i_size into or through the + * page since we have it locked. + */ + i_size = i_size_read(wreq->inode); + + if (fpos >= i_size) { + /* mmap beyond eof. */ + _debug("beyond eof"); + folio_start_writeback(folio); + folio_unlock(folio); + wreq->nr_group_rel += netfs_folio_written_back(folio); + netfs_put_group_many(wreq->group, wreq->nr_group_rel); + wreq->nr_group_rel = 0; + return 0; + } + + if (fpos + fsize > wreq->i_size) + wreq->i_size = i_size; + + fgroup = netfs_folio_group(folio); + finfo = netfs_folio_info(folio); + if (finfo) { + foff = finfo->dirty_offset; + flen = foff + finfo->dirty_len; + streamw = true; + } + + if (wreq->origin == NETFS_WRITETHROUGH) { + to_eof = false; + if (flen > i_size - fpos) + flen = i_size - fpos; + } else if (flen > i_size - fpos) { + flen = i_size - fpos; + if (!streamw) + folio_zero_segment(folio, flen, fsize); + to_eof = true; + } else if (flen == i_size - fpos) { + to_eof = true; + } + flen -= foff; + + _debug("folio %zx %zx %zx", foff, flen, fsize); + + /* Deal with discontinuities in the stream of dirty pages. These can + * arise from a number of sources: + * + * (1) Intervening non-dirty pages from random-access writes, multiple + * flushers writing back different parts simultaneously and manual + * syncing. + * + * (2) Partially-written pages from write-streaming. + * + * (3) Pages that belong to a different write-back group (eg. Ceph + * snapshots). + * + * (4) Actually-clean pages that were marked for write to the cache + * when they were read. Note that these appear as a special + * write-back group. + */ + if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) { + netfs_issue_write(wreq, upload); + } else if (fgroup != wreq->group) { + /* We can't write this page to the server yet. */ + kdebug("wrong group"); + folio_redirty_for_writepage(wbc, folio); + folio_unlock(folio); + netfs_issue_write(wreq, upload); + netfs_issue_write(wreq, cache); + return 0; + } + + if (foff > 0) + netfs_issue_write(wreq, upload); + if (streamw) + netfs_issue_write(wreq, cache); + + /* Flip the page to the writeback state and unlock. If we're called + * from write-through, then the page has already been put into the wb + * state. + */ + if (wreq->origin == NETFS_WRITEBACK) + folio_start_writeback(folio); + folio_unlock(folio); + + if (fgroup == NETFS_FOLIO_COPY_TO_CACHE) { + if (!fscache_resources_valid(&wreq->cache_resources)) { + trace_netfs_folio(folio, netfs_folio_trace_cancel_copy); + netfs_issue_write(wreq, upload); + netfs_folio_written_back(folio); + return 0; + } + trace_netfs_folio(folio, netfs_folio_trace_store_copy); + } else if (!upload->construct) { + trace_netfs_folio(folio, netfs_folio_trace_store); + } else { + trace_netfs_folio(folio, netfs_folio_trace_store_plus); + } + + /* Move the submission point forward to allow for write-streaming data + * not starting at the front of the page. We don't do write-streaming + * with the cache as the cache requires DIO alignment. + * + * Also skip uploading for data that's been read and just needs copying + * to the cache. + */ + for (int s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + stream->submit_max_len = fsize; + stream->submit_off = foff; + stream->submit_len = flen; + if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) || + (stream->source == NETFS_UPLOAD_TO_SERVER && + fgroup == NETFS_FOLIO_COPY_TO_CACHE)) { + stream->submit_off = UINT_MAX; + stream->submit_len = 0; + stream->submit_max_len = 0; + } + } + + /* Attach the folio to one or more subrequests. For a big folio, we + * could end up with thousands of subrequests if the wsize is small - + * but we might need to wait during the creation of subrequests for + * network resources (eg. SMB credits). + */ + for (;;) { + ssize_t part; + size_t lowest_off = ULONG_MAX; + int choose_s = -1; + + /* Always add to the lowest-submitted stream first. */ + for (int s = 0; s < NR_IO_STREAMS; s++) { + stream = &wreq->io_streams[s]; + if (stream->submit_len > 0 && + stream->submit_off < lowest_off) { + lowest_off = stream->submit_off; + choose_s = s; + } + } + + if (choose_s < 0) + break; + stream = &wreq->io_streams[choose_s]; + + part = netfs_advance_write(wreq, stream, fpos + stream->submit_off, + stream->submit_len, to_eof); + atomic64_set(&wreq->issued_to, fpos + stream->submit_off); + stream->submit_off += part; + stream->submit_max_len -= part; + if (part > stream->submit_len) + stream->submit_len = 0; + else + stream->submit_len -= part; + if (part > 0) + debug = true; + } + + atomic64_set(&wreq->issued_to, fpos + fsize); + + if (!debug) + kdebug("R=%x: No submit", wreq->debug_id); + + if (flen < fsize) + for (int s = 0; s < NR_IO_STREAMS; s++) + netfs_issue_write(wreq, &wreq->io_streams[s]); + + _leave(" = 0"); + return 0; +} + +/* + * Write some of the pending data back to the server + */ +int new_netfs_writepages(struct address_space *mapping, + struct writeback_control *wbc) +{ + struct netfs_inode *ictx = netfs_inode(mapping->host); + struct netfs_io_request *wreq = NULL; + struct folio *folio; + int error = 0; + + if (wbc->sync_mode == WB_SYNC_ALL) + mutex_lock(&ictx->wb_lock); + else if (!mutex_trylock(&ictx->wb_lock)) + return 0; + + /* Need the first folio to be able to set up the op. */ + folio = writeback_iter(mapping, wbc, NULL, &error); + if (!folio) + goto out; + + wreq = netfs_create_write_req(mapping, NULL, folio_pos(folio), NETFS_WRITEBACK); + if (IS_ERR(wreq)) { + error = PTR_ERR(wreq); + goto couldnt_start; + } + + trace_netfs_write(wreq, netfs_write_trace_writeback); + + do { + _debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted); + + /* It appears we don't have to handle cyclic writeback wrapping. */ + WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted); + + if (netfs_folio_group(folio) != NETFS_FOLIO_COPY_TO_CACHE && + unlikely(!test_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags))) { + set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags); + wreq->netfs_ops->begin_writeback(wreq); + } + + error = netfs_write_folio(wreq, wbc, folio); + if (error < 0) + break; + } while ((folio = writeback_iter(mapping, wbc, folio, &error))); + + for (int s = 0; s < NR_IO_STREAMS; s++) + netfs_issue_write(wreq, &wreq->io_streams[s]); + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + + mutex_unlock(&ictx->wb_lock); + + netfs_put_request(wreq, false, netfs_rreq_trace_put_return); + _leave(" = %d", error); + return error; + +couldnt_start: + netfs_kill_dirty_pages(mapping, wbc, folio); +out: + mutex_unlock(&ictx->wb_lock); + _leave(" = %d", error); + return error; +} +EXPORT_SYMBOL(new_netfs_writepages); + +/* + * Begin a write operation for writing through the pagecache. + */ +struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len) +{ + struct netfs_io_request *wreq = NULL; + struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp)); + + mutex_lock(&ictx->wb_lock); + + wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, + iocb->ki_pos, NETFS_WRITETHROUGH); + if (IS_ERR(wreq)) { + mutex_unlock(&ictx->wb_lock); + return wreq; + } + + wreq->io_streams[0].avail = true; + trace_netfs_write(wreq, netfs_write_trace_writethrough); + return wreq; +} + +/* + * Advance the state of the write operation used when writing through the + * pagecache. Data has been copied into the pagecache that we need to append + * to the request. If we've added more than wsize then we need to create a new + * subrequest. + */ +int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *folio, size_t copied, bool to_page_end, + struct folio **writethrough_cache) +{ + _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", + wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); + + if (!*writethrough_cache) { + if (folio_test_dirty(folio)) + /* Sigh. mmap. */ + folio_clear_dirty_for_io(folio); + + /* We can make multiple writes to the folio... */ + folio_start_writeback(folio); + if (wreq->len == 0) + trace_netfs_folio(folio, netfs_folio_trace_wthru); + else + trace_netfs_folio(folio, netfs_folio_trace_wthru_plus); + *writethrough_cache = folio; + } + + wreq->len += copied; + if (!to_page_end) + return 0; + + *writethrough_cache = NULL; + return netfs_write_folio(wreq, wbc, folio); +} + +/* + * End a write operation used when writing through the pagecache. + */ +int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *writethrough_cache) +{ + struct netfs_inode *ictx = netfs_inode(wreq->inode); + int ret; + + _enter("R=%x", wreq->debug_id); + + if (writethrough_cache) + netfs_write_folio(wreq, wbc, writethrough_cache); + + netfs_issue_write(wreq, &wreq->io_streams[0]); + netfs_issue_write(wreq, &wreq->io_streams[1]); + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + + mutex_unlock(&ictx->wb_lock); + + ret = wreq->error; + netfs_put_request(wreq, false, netfs_rreq_trace_put_return); + return ret; +} + +/* + * Write data to the server without going through the pagecache and without + * writing it to the local cache. + */ +int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len) +{ + struct netfs_io_stream *upload = &wreq->io_streams[0]; + ssize_t part; + loff_t start = wreq->start; + int error = 0; + + _enter("%zx", len); + + if (wreq->origin == NETFS_DIO_WRITE) + inode_dio_begin(wreq->inode); + + while (len) { + // TODO: Prepare content encryption + + _debug("unbuffered %zx", len); + part = netfs_advance_write(wreq, upload, start, len, false); + start += part; + len -= part; + if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags)) { + trace_netfs_rreq(wreq, netfs_rreq_trace_wait_pause); + wait_on_bit(&wreq->flags, NETFS_RREQ_PAUSE, TASK_UNINTERRUPTIBLE); + } + if (test_bit(NETFS_RREQ_FAILED, &wreq->flags)) + break; + } + + netfs_issue_write(wreq, upload); + + smp_wmb(); /* Write lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + if (list_empty(&upload->subrequests)) + netfs_wake_write_collector(wreq, false); + + _leave(" = %d", error); + return error; +} diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 88269681d4fc..42dba05a428b 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -64,6 +64,7 @@ struct netfs_inode { #if IS_ENABLED(CONFIG_FSCACHE) struct fscache_cookie *cache; #endif + struct mutex wb_lock; /* Writeback serialisation */ loff_t remote_i_size; /* Size of the remote file */ loff_t zero_point; /* Size after which we assume there's no data * on the server */ @@ -71,7 +72,6 @@ struct netfs_inode { #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ -#define NETFS_ICTX_NO_WRITE_STREAMING 3 /* Don't engage in write-streaming */ #define NETFS_ICTX_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark * write to cache on read */ }; @@ -126,6 +126,33 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio) return priv; } +/* + * Stream of I/O subrequests going to a particular destination, such as the + * server or the local cache. This is mainly intended for writing where we may + * have to write to multiple destinations concurrently. + */ +struct netfs_io_stream { + /* Submission tracking */ + struct netfs_io_subrequest *construct; /* Op being constructed */ + unsigned int submit_off; /* Folio offset we're submitting from */ + unsigned int submit_len; /* Amount of data left to submit */ + unsigned int submit_max_len; /* Amount I/O can be rounded up to */ + void (*prepare_write)(struct netfs_io_subrequest *subreq); + void (*issue_write)(struct netfs_io_subrequest *subreq); + /* Collection tracking */ + struct list_head subrequests; /* Contributory I/O operations */ + struct netfs_io_subrequest *front; /* Op being collected */ + unsigned long long collected_to; /* Position we've collected results to */ + size_t transferred; /* The amount transferred from this stream */ + enum netfs_io_source source; /* Where to read from/write to */ + unsigned short error; /* Aggregate error for the stream */ + unsigned char stream_nr; /* Index of stream in parent table */ + bool avail; /* T if stream is available */ + bool active; /* T if stream is active */ + bool need_retry; /* T if this stream needs retrying */ + bool failed; /* T if this stream failed */ +}; + /* * Resources required to do operations on a cache. */ @@ -150,13 +177,16 @@ struct netfs_io_subrequest { struct list_head rreq_link; /* Link in rreq->subrequests */ struct iov_iter io_iter; /* Iterator for this subrequest */ unsigned long long start; /* Where to start the I/O */ + size_t max_len; /* Maximum size of the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ + unsigned int nr_segs; /* Number of segs in io_iter */ unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */ enum netfs_io_source source; /* Where to read from/write to */ + unsigned char stream_nr; /* I/O stream this belongs to */ unsigned long flags; #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ @@ -164,6 +194,11 @@ struct netfs_io_subrequest { #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ +#define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */ +#define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */ +#define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */ +#define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */ +#define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */ }; enum netfs_io_origin { @@ -194,6 +229,9 @@ struct netfs_io_request { struct netfs_cache_resources cache_resources; struct list_head proc_link; /* Link in netfs_iorequests */ struct list_head subrequests; /* Contributory I/O operations */ + struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */ +#define NR_IO_STREAMS 2 //wreq->nr_io_streams + struct netfs_group *group; /* Writeback group being written back */ struct iov_iter iter; /* Unencrypted-side iterator */ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ void *netfs_priv; /* Private data for the netfs */ @@ -203,6 +241,8 @@ struct netfs_io_request { unsigned int rsize; /* Maximum read size (0 for none) */ unsigned int wsize; /* Maximum write size (0 for none) */ atomic_t subreq_counter; /* Next subreq->debug_index */ + unsigned int nr_group_rel; /* Number of refs to release on ->group */ + spinlock_t lock; /* Lock for queuing subreqs */ atomic_t nr_outstanding; /* Number of ops in progress */ atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ size_t upper_len; /* Length can be extended to here */ @@ -214,6 +254,10 @@ struct netfs_io_request { bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ unsigned long long i_size; /* Size of the file */ unsigned long long start; /* Start position */ + atomic64_t issued_to; /* Write issuer folio cursor */ + unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */ + unsigned long long collected_to; /* Point we've collected to */ + unsigned long long cleaned_to; /* Position we've cleaned folios to */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ refcount_t ref; unsigned long flags; @@ -227,6 +271,9 @@ struct netfs_io_request { #define NETFS_RREQ_UPLOAD_TO_SERVER 8 /* Need to write to the server */ #define NETFS_RREQ_NONBLOCK 9 /* Don't block if possible (O_NONBLOCK) */ #define NETFS_RREQ_BLOCKED 10 /* We blocked */ +#define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */ +#define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */ +#define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */ #define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark * write to cache on read */ const struct netfs_request_ops *netfs_ops; @@ -258,6 +305,9 @@ struct netfs_request_ops { /* Write request handling */ void (*create_write_requests)(struct netfs_io_request *wreq, loff_t start, size_t len); + void (*begin_writeback)(struct netfs_io_request *wreq); + void (*prepare_write)(struct netfs_io_subrequest *subreq); + void (*issue_write)(struct netfs_io_subrequest *subreq); void (*invalidate_cache)(struct netfs_io_request *wreq); }; @@ -292,6 +342,9 @@ struct netfs_cache_ops { netfs_io_terminated_t term_func, void *term_func_priv); + /* Write data to the cache from a netfs subrequest. */ + void (*issue_write)(struct netfs_io_subrequest *subreq); + /* Expand readahead request */ void (*expand_readahead)(struct netfs_cache_resources *cres, unsigned long long *_start, @@ -304,6 +357,13 @@ struct netfs_cache_ops { enum netfs_io_source (*prepare_read)(struct netfs_io_subrequest *subreq, unsigned long long i_size); + /* Prepare a write subrequest, working out if we're allowed to do it + * and finding out the maximum amount of data to gather before + * attempting to submit. If we're not permitted to do it, the + * subrequest should be marked failed. + */ + void (*prepare_write_subreq)(struct netfs_io_subrequest *subreq); + /* Prepare a write operation, working out what part of the write we can * actually do. */ @@ -349,6 +409,8 @@ int netfs_write_begin(struct netfs_inode *, struct file *, struct folio **, void **fsdata); int netfs_writepages(struct address_space *mapping, struct writeback_control *wbc); +int new_netfs_writepages(struct address_space *mapping, + struct writeback_control *wbc); bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); void netfs_clear_inode_writeback(struct inode *inode, const void *aux); @@ -372,8 +434,11 @@ size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, struct netfs_io_subrequest *netfs_create_write_request( struct netfs_io_request *wreq, enum netfs_io_source dest, loff_t start, size_t len, work_func_t worker); +void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq); void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, bool was_async); +void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, + bool was_async); void netfs_queue_write_request(struct netfs_io_subrequest *subreq); int netfs_start_io_read(struct inode *inode); @@ -415,6 +480,7 @@ static inline void netfs_inode_init(struct netfs_inode *ctx, #if IS_ENABLED(CONFIG_FSCACHE) ctx->cache = NULL; #endif + mutex_init(&ctx->wb_lock); /* ->releasepage() drives zero_point */ if (use_zero_point) { ctx->zero_point = ctx->remote_i_size; diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 7126d2ea459c..e7700172ae7e 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -44,14 +44,18 @@ #define netfs_rreq_traces \ EM(netfs_rreq_trace_assess, "ASSESS ") \ EM(netfs_rreq_trace_copy, "COPY ") \ + EM(netfs_rreq_trace_collect, "COLLECT") \ EM(netfs_rreq_trace_done, "DONE ") \ EM(netfs_rreq_trace_free, "FREE ") \ EM(netfs_rreq_trace_redirty, "REDIRTY") \ EM(netfs_rreq_trace_resubmit, "RESUBMT") \ + EM(netfs_rreq_trace_set_pause, "PAUSE ") \ EM(netfs_rreq_trace_unlock, "UNLOCK ") \ EM(netfs_rreq_trace_unmark, "UNMARK ") \ EM(netfs_rreq_trace_wait_ip, "WAIT-IP") \ + EM(netfs_rreq_trace_wait_pause, "WT-PAUS") \ EM(netfs_rreq_trace_wake_ip, "WAKE-IP") \ + EM(netfs_rreq_trace_unpause, "UNPAUSE") \ E_(netfs_rreq_trace_write_done, "WR-DONE") #define netfs_sreq_sources \ @@ -64,11 +68,15 @@ E_(NETFS_INVALID_WRITE, "INVL") #define netfs_sreq_traces \ + EM(netfs_sreq_trace_discard, "DSCRD") \ EM(netfs_sreq_trace_download_instead, "RDOWN") \ + EM(netfs_sreq_trace_fail, "FAIL ") \ EM(netfs_sreq_trace_free, "FREE ") \ EM(netfs_sreq_trace_limited, "LIMIT") \ EM(netfs_sreq_trace_prepare, "PREP ") \ + EM(netfs_sreq_trace_prep_failed, "PRPFL") \ EM(netfs_sreq_trace_resubmit_short, "SHORT") \ + EM(netfs_sreq_trace_retry, "RETRY") \ EM(netfs_sreq_trace_submit, "SUBMT") \ EM(netfs_sreq_trace_terminated, "TERM ") \ EM(netfs_sreq_trace_write, "WRITE") \ @@ -88,6 +96,7 @@ #define netfs_rreq_ref_traces \ EM(netfs_rreq_trace_get_for_outstanding,"GET OUTSTND") \ EM(netfs_rreq_trace_get_subreq, "GET SUBREQ ") \ + EM(netfs_rreq_trace_get_work, "GET WORK ") \ EM(netfs_rreq_trace_put_complete, "PUT COMPLT ") \ EM(netfs_rreq_trace_put_discard, "PUT DISCARD") \ EM(netfs_rreq_trace_put_failed, "PUT FAILED ") \ @@ -95,6 +104,8 @@ EM(netfs_rreq_trace_put_return, "PUT RETURN ") \ EM(netfs_rreq_trace_put_subreq, "PUT SUBREQ ") \ EM(netfs_rreq_trace_put_work, "PUT WORK ") \ + EM(netfs_rreq_trace_put_work_complete, "PUT WORK CP") \ + EM(netfs_rreq_trace_put_work_nq, "PUT WORK NQ") \ EM(netfs_rreq_trace_see_work, "SEE WORK ") \ E_(netfs_rreq_trace_new, "NEW ") @@ -103,11 +114,14 @@ EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ EM(netfs_sreq_trace_new, "NEW ") \ + EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \ EM(netfs_sreq_trace_put_clear, "PUT CLEAR ") \ EM(netfs_sreq_trace_put_discard, "PUT DISCARD") \ + EM(netfs_sreq_trace_put_done, "PUT DONE ") \ EM(netfs_sreq_trace_put_failed, "PUT FAILED ") \ EM(netfs_sreq_trace_put_merged, "PUT MERGED ") \ EM(netfs_sreq_trace_put_no_copy, "PUT NO COPY") \ + EM(netfs_sreq_trace_put_oom, "PUT OOM ") \ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \ EM(netfs_sreq_trace_put_work, "PUT WORK ") \ E_(netfs_sreq_trace_put_terminated, "PUT TERM ") @@ -124,7 +138,9 @@ EM(netfs_streaming_filled_page, "mod-streamw-f") \ EM(netfs_streaming_cont_filled_page, "mod-streamw-f+") \ /* The rest are for writeback */ \ + EM(netfs_folio_trace_cancel_copy, "cancel-copy") \ EM(netfs_folio_trace_clear, "clear") \ + EM(netfs_folio_trace_clear_cc, "clear-cc") \ EM(netfs_folio_trace_clear_s, "clear-s") \ EM(netfs_folio_trace_clear_g, "clear-g") \ EM(netfs_folio_trace_copy, "copy") \ @@ -133,16 +149,26 @@ EM(netfs_folio_trace_end_copy, "end-copy") \ EM(netfs_folio_trace_filled_gaps, "filled-gaps") \ EM(netfs_folio_trace_kill, "kill") \ + EM(netfs_folio_trace_kill_cc, "kill-cc") \ + EM(netfs_folio_trace_kill_g, "kill-g") \ + EM(netfs_folio_trace_kill_s, "kill-s") \ EM(netfs_folio_trace_mkwrite, "mkwrite") \ EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \ + EM(netfs_folio_trace_not_under_wback, "!wback") \ EM(netfs_folio_trace_read_gaps, "read-gaps") \ EM(netfs_folio_trace_redirty, "redirty") \ EM(netfs_folio_trace_redirtied, "redirtied") \ EM(netfs_folio_trace_store, "store") \ + EM(netfs_folio_trace_store_copy, "store-copy") \ EM(netfs_folio_trace_store_plus, "store+") \ EM(netfs_folio_trace_wthru, "wthru") \ E_(netfs_folio_trace_wthru_plus, "wthru+") +#define netfs_collect_contig_traces \ + EM(netfs_contig_trace_collect, "Collect") \ + EM(netfs_contig_trace_jump, "-->JUMP-->") \ + E_(netfs_contig_trace_unlock, "Unlock") + #ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY #define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY @@ -159,6 +185,7 @@ enum netfs_failure { netfs_failures } __mode(byte); enum netfs_rreq_ref_trace { netfs_rreq_ref_traces } __mode(byte); enum netfs_sreq_ref_trace { netfs_sreq_ref_traces } __mode(byte); enum netfs_folio_trace { netfs_folio_traces } __mode(byte); +enum netfs_collect_contig_trace { netfs_collect_contig_traces } __mode(byte); #endif @@ -180,6 +207,7 @@ netfs_failures; netfs_rreq_ref_traces; netfs_sreq_ref_traces; netfs_folio_traces; +netfs_collect_contig_traces; /* * Now redefine the EM() and E_() macros to map the enums to the strings that @@ -413,16 +441,18 @@ TRACE_EVENT(netfs_write_iter, __field(unsigned long long, start ) __field(size_t, len ) __field(unsigned int, flags ) + __field(unsigned int, ino ) ), TP_fast_assign( __entry->start = iocb->ki_pos; __entry->len = iov_iter_count(from); + __entry->ino = iocb->ki_filp->f_inode->i_ino; __entry->flags = iocb->ki_flags; ), - TP_printk("WRITE-ITER s=%llx l=%zx f=%x", - __entry->start, __entry->len, __entry->flags) + TP_printk("WRITE-ITER i=%x s=%llx l=%zx f=%x", + __entry->ino, __entry->start, __entry->len, __entry->flags) ); TRACE_EVENT(netfs_write, @@ -434,6 +464,7 @@ TRACE_EVENT(netfs_write, TP_STRUCT__entry( __field(unsigned int, wreq ) __field(unsigned int, cookie ) + __field(unsigned int, ino ) __field(enum netfs_write_trace, what ) __field(unsigned long long, start ) __field(unsigned long long, len ) @@ -444,18 +475,213 @@ TRACE_EVENT(netfs_write, struct fscache_cookie *__cookie = netfs_i_cookie(__ctx); __entry->wreq = wreq->debug_id; __entry->cookie = __cookie ? __cookie->debug_id : 0; + __entry->ino = wreq->inode->i_ino; __entry->what = what; __entry->start = wreq->start; __entry->len = wreq->len; ), - TP_printk("R=%08x %s c=%08x by=%llx-%llx", + TP_printk("R=%08x %s c=%08x i=%x by=%llx-%llx", __entry->wreq, __print_symbolic(__entry->what, netfs_write_traces), __entry->cookie, + __entry->ino, __entry->start, __entry->start + __entry->len - 1) ); +TRACE_EVENT(netfs_collect, + TP_PROTO(const struct netfs_io_request *wreq), + + TP_ARGS(wreq), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned int, len ) + __field(unsigned long long, transferred ) + __field(unsigned long long, start ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->start = wreq->start; + __entry->len = wreq->len; + __entry->transferred = wreq->transferred; + ), + + TP_printk("R=%08x s=%llx-%llx", + __entry->wreq, + __entry->start + __entry->transferred, + __entry->start + __entry->len) + ); + +TRACE_EVENT(netfs_collect_contig, + TP_PROTO(const struct netfs_io_request *wreq, unsigned long long to, + enum netfs_collect_contig_trace type), + + TP_ARGS(wreq, to, type), + + TP_STRUCT__entry( + __field(unsigned int, wreq) + __field(enum netfs_collect_contig_trace, type) + __field(unsigned long long, contiguity) + __field(unsigned long long, to) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->type = type; + __entry->contiguity = wreq->contiguity; + __entry->to = to; + ), + + TP_printk("R=%08x %llx -> %llx %s", + __entry->wreq, + __entry->contiguity, + __entry->to, + __print_symbolic(__entry->type, netfs_collect_contig_traces)) + ); + +TRACE_EVENT(netfs_collect_sreq, + TP_PROTO(const struct netfs_io_request *wreq, + const struct netfs_io_subrequest *subreq), + + TP_ARGS(wreq, subreq), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned int, subreq ) + __field(unsigned int, stream ) + __field(unsigned int, len ) + __field(unsigned int, transferred ) + __field(unsigned long long, start ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->subreq = subreq->debug_index; + __entry->stream = subreq->stream_nr; + __entry->start = subreq->start; + __entry->len = subreq->len; + __entry->transferred = subreq->transferred; + ), + + TP_printk("R=%08x[%u:%02x] s=%llx t=%x/%x", + __entry->wreq, __entry->stream, __entry->subreq, + __entry->start, __entry->transferred, __entry->len) + ); + +TRACE_EVENT(netfs_collect_folio, + TP_PROTO(const struct netfs_io_request *wreq, + const struct folio *folio, + unsigned long long fend, + unsigned long long collected_to), + + TP_ARGS(wreq, folio, fend, collected_to), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned long, index ) + __field(unsigned long long, fend ) + __field(unsigned long long, cleaned_to ) + __field(unsigned long long, collected_to ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->index = folio->index; + __entry->fend = fend; + __entry->cleaned_to = wreq->cleaned_to; + __entry->collected_to = collected_to; + ), + + TP_printk("R=%08x ix=%05lx r=%llx-%llx t=%llx/%llx", + __entry->wreq, __entry->index, + (unsigned long long)__entry->index * PAGE_SIZE, __entry->fend, + __entry->cleaned_to, __entry->collected_to) + ); + +TRACE_EVENT(netfs_collect_state, + TP_PROTO(const struct netfs_io_request *wreq, + unsigned long long collected_to, + unsigned int notes), + + TP_ARGS(wreq, collected_to, notes), + + TP_STRUCT__entry( + __field(unsigned int, wreq ) + __field(unsigned int, notes ) + __field(unsigned long long, collected_to ) + __field(unsigned long long, cleaned_to ) + __field(unsigned long long, contiguity ) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->notes = notes; + __entry->collected_to = collected_to; + __entry->cleaned_to = wreq->cleaned_to; + __entry->contiguity = wreq->contiguity; + ), + + TP_printk("R=%08x cto=%llx fto=%llx ctg=%llx n=%x", + __entry->wreq, __entry->collected_to, + __entry->cleaned_to, __entry->contiguity, + __entry->notes) + ); + +TRACE_EVENT(netfs_collect_gap, + TP_PROTO(const struct netfs_io_request *wreq, + const struct netfs_io_stream *stream, + unsigned long long jump_to, char type), + + TP_ARGS(wreq, stream, jump_to, type), + + TP_STRUCT__entry( + __field(unsigned int, wreq) + __field(unsigned char, stream) + __field(unsigned char, type) + __field(unsigned long long, from) + __field(unsigned long long, to) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->stream = stream->stream_nr; + __entry->from = stream->collected_to; + __entry->to = jump_to; + __entry->type = type; + ), + + TP_printk("R=%08x[%x:] %llx->%llx %c", + __entry->wreq, __entry->stream, + __entry->from, __entry->to, __entry->type) + ); + +TRACE_EVENT(netfs_collect_stream, + TP_PROTO(const struct netfs_io_request *wreq, + const struct netfs_io_stream *stream), + + TP_ARGS(wreq, stream), + + TP_STRUCT__entry( + __field(unsigned int, wreq) + __field(unsigned char, stream) + __field(unsigned long long, collected_to) + __field(unsigned long long, front) + ), + + TP_fast_assign( + __entry->wreq = wreq->debug_id; + __entry->stream = stream->stream_nr; + __entry->collected_to = stream->collected_to; + __entry->front = stream->front ? stream->front->start : UINT_MAX; + ), + + TP_printk("R=%08x[%x:] cto=%llx frn=%llx", + __entry->wreq, __entry->stream, + __entry->collected_to, __entry->front) + ); + #undef EM #undef E_ #endif /* _TRACE_NETFS_H */ -- cgit From 2df86547b23dabcd02ab000a24ed7813606c269f Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 8 Mar 2024 12:36:05 +0000 Subject: netfs: Cut over to using new writeback code Cut over to using the new writeback code. The old code is #ifdef'd out or otherwise removed from compilation to avoid conflicts and will be removed in a future patch. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Eric Van Hensbergen cc: Latchesar Ionkov cc: Dominique Martinet cc: Christian Schoenebeck cc: Marc Dionne cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org --- fs/9p/vfs_addr.c | 6 ++---- fs/afs/file.c | 3 +-- fs/afs/internal.h | 1 - fs/afs/write.c | 2 ++ fs/netfs/Makefile | 1 - fs/netfs/buffered_write.c | 45 ++++++++++++++++++++++++--------------------- fs/netfs/direct_write.c | 26 ++++++++++++++------------ fs/netfs/internal.h | 21 ++++++--------------- fs/netfs/write_collect.c | 8 ++++---- fs/netfs/write_issue.c | 18 +++++++++--------- include/linux/netfs.h | 9 --------- 11 files changed, 62 insertions(+), 78 deletions(-) (limited to 'include/linux') diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index 07d03efdd594..4845e655bc39 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -60,6 +60,7 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq) netfs_write_subrequest_terminated(subreq, len ?: err, false); } +#if 0 // TODO: Remove static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq) { struct p9_fid *fid = subreq->rreq->netfs_priv; @@ -91,6 +92,7 @@ static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t sta if (subreq) netfs_queue_write_request(subreq); } +#endif /** * v9fs_issue_read - Issue a read from 9P @@ -121,18 +123,15 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) { struct p9_fid *fid; bool writing = (rreq->origin == NETFS_READ_FOR_WRITE || - rreq->origin == NETFS_WRITEBACK || rreq->origin == NETFS_WRITETHROUGH || rreq->origin == NETFS_UNBUFFERED_WRITE || rreq->origin == NETFS_DIO_WRITE); -#if 0 // TODO: Cut over if (rreq->origin == NETFS_WRITEBACK) return 0; /* We don't get the write handle until we find we * have actually dirty data and not just * copy-to-cache data. */ -#endif if (file) { fid = file->private_data; @@ -179,7 +178,6 @@ const struct netfs_request_ops v9fs_req_ops = { .issue_read = v9fs_issue_read, .begin_writeback = v9fs_begin_writeback, .issue_write = v9fs_issue_write, - .create_write_requests = v9fs_create_write_requests, }; const struct address_space_operations v9fs_addr_operations = { diff --git a/fs/afs/file.c b/fs/afs/file.c index db9ebae84fa2..8f983e3ecae7 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -353,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file) if (file) rreq->netfs_priv = key_get(afs_file_key(file)); rreq->rsize = 256 * 1024; - rreq->wsize = 256 * 1024; + rreq->wsize = 256 * 1024 * 1024; return 0; } @@ -399,7 +399,6 @@ const struct netfs_request_ops afs_req_ops = { .issue_read = afs_issue_read, .update_i_size = afs_update_i_size, .invalidate_cache = afs_netfs_invalidate_cache, - .create_write_requests = afs_create_write_requests, .begin_writeback = afs_begin_writeback, .prepare_write = afs_prepare_write, .issue_write = afs_issue_write, diff --git a/fs/afs/internal.h b/fs/afs/internal.h index dcf0ae0323d3..887245f9336d 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -1605,7 +1605,6 @@ extern int afs_writepages(struct address_space *, struct writeback_control *); extern int afs_fsync(struct file *, loff_t, loff_t, int); extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf); extern void afs_prune_wb_keys(struct afs_vnode *); -void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len); /* * xattr.c diff --git a/fs/afs/write.c b/fs/afs/write.c index 34595f482718..35db74627563 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -156,6 +156,7 @@ try_next_key: return afs_put_operation(op); } +#if 0 // TODO: Remove static void afs_upload_to_server(struct netfs_io_subrequest *subreq) { struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); @@ -193,6 +194,7 @@ void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size if (subreq) netfs_queue_write_request(subreq); } +#endif /* * Writeback calls this when it finds a folio that needs uploading. This isn't diff --git a/fs/netfs/Makefile b/fs/netfs/Makefile index 1eb86e34b5a9..8e6781e0b10b 100644 --- a/fs/netfs/Makefile +++ b/fs/netfs/Makefile @@ -11,7 +11,6 @@ netfs-y := \ main.o \ misc.o \ objects.o \ - output.o \ write_collect.o \ write_issue.o diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index 33ea4c20e7e7..ee8d9e3216bd 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -26,8 +26,6 @@ enum netfs_how_to_modify { NETFS_FLUSH_CONTENT, /* Flush incompatible content. */ }; -static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq); - static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) { void *priv = folio_get_private(folio); @@ -180,7 +178,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, }; struct netfs_io_request *wreq = NULL; struct netfs_folio *finfo; - struct folio *folio; + struct folio *folio, *writethrough = NULL; enum netfs_how_to_modify howto; enum netfs_folio_trace trace; unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC; @@ -209,7 +207,6 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, } if (!is_sync_kiocb(iocb)) wreq->iocb = iocb; - wreq->cleanup = netfs_cleanup_buffered_write; netfs_stat(&netfs_n_wh_writethrough); } else { netfs_stat(&netfs_n_wh_buffered_write); @@ -253,6 +250,16 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, offset = pos & (flen - 1); part = min_t(size_t, flen - offset, part); + /* Wait for writeback to complete. The writeback engine owns + * the info in folio->private and may change it until it + * removes the WB mark. + */ + if (folio_get_private(folio) && + folio_wait_writeback_killable(folio)) { + ret = written ? -EINTR : -ERESTARTSYS; + goto error_folio_unlock; + } + if (signal_pending(current)) { ret = written ? -EINTR : -ERESTARTSYS; goto error_folio_unlock; @@ -327,6 +334,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, maybe_trouble = true; iov_iter_revert(iter, copied); copied = 0; + folio_unlock(folio); goto retry; } netfs_set_group(folio, netfs_group); @@ -382,23 +390,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, if (likely(!wreq)) { folio_mark_dirty(folio); + folio_unlock(folio); } else { - if (folio_test_dirty(folio)) - /* Sigh. mmap. */ - folio_clear_dirty_for_io(folio); - /* We make multiple writes to the folio... */ - if (!folio_test_writeback(folio)) { - folio_start_writeback(folio); - if (wreq->iter.count == 0) - trace_netfs_folio(folio, netfs_folio_trace_wthru); - else - trace_netfs_folio(folio, netfs_folio_trace_wthru_plus); - } - netfs_advance_writethrough(wreq, copied, - offset + copied == flen); + netfs_advance_writethrough(wreq, &wbc, folio, copied, + offset + copied == flen, + &writethrough); + /* Folio unlocked */ } retry: - folio_unlock(folio); folio_put(folio); folio = NULL; @@ -407,7 +406,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, out: if (unlikely(wreq)) { - ret2 = netfs_end_writethrough(wreq, iocb); + ret2 = netfs_end_writethrough(wreq, &wbc, writethrough); wbc_detach_inode(&wbc); if (ret2 == -EIOCBQUEUED) return ret2; @@ -529,11 +528,13 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr sb_start_pagefault(inode->i_sb); - if (folio_wait_writeback_killable(folio)) + if (folio_lock_killable(folio) < 0) goto out; - if (folio_lock_killable(folio) < 0) + if (folio_wait_writeback_killable(folio)) { + ret = VM_FAULT_LOCKED; goto out; + } /* Can we see a streaming write here? */ if (WARN_ON(!folio_test_uptodate(folio))) { @@ -573,6 +574,7 @@ out: } EXPORT_SYMBOL(netfs_page_mkwrite); +#if 0 // TODO: Remove /* * Kill all the pages in the given range */ @@ -1199,3 +1201,4 @@ out: return ret; } EXPORT_SYMBOL(netfs_writepages); +#endif diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c index 36b6db504500..608ba6416919 100644 --- a/fs/netfs/direct_write.c +++ b/fs/netfs/direct_write.c @@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov unsigned long long start = iocb->ki_pos; unsigned long long end = start + iov_iter_count(iter); ssize_t ret, n; + size_t len = iov_iter_count(iter); bool async = !is_sync_kiocb(iocb); _enter(""); @@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov _debug("uw %llx-%llx", start, end); - wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp, - start, end - start, - iocb->ki_flags & IOCB_DIRECT ? - NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE); + wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start, + iocb->ki_flags & IOCB_DIRECT ? + NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE); if (IS_ERR(wreq)) return PTR_ERR(wreq); + wreq->io_streams[0].avail = true; + trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ? + netfs_write_trace_dio_write : + netfs_write_trace_unbuffered_write)); + { /* If this is an async op and we're not using a bounce buffer, * we have to save the source buffer as the iterator is only @@ -63,7 +68,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov * request. */ if (async || user_backed_iter(iter)) { - n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0); + n = netfs_extract_user_iter(iter, len, &wreq->iter, 0); if (n < 0) { ret = n; goto out; @@ -71,7 +76,6 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec; wreq->direct_bv_count = n; wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter); - wreq->len = iov_iter_count(&wreq->iter); } else { wreq->iter = *iter; } @@ -79,6 +83,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov wreq->io_iter = wreq->iter; } + __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags); + /* Copy the data into the bounce buffer and encrypt it. */ // TODO @@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov if (async) wreq->iocb = iocb; wreq->cleanup = netfs_cleanup_dio_write; - ret = netfs_begin_write(wreq, is_sync_kiocb(iocb), - iocb->ki_flags & IOCB_DIRECT ? - netfs_write_trace_dio_write : - netfs_write_trace_unbuffered_write); + ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter)); if (ret < 0) { _debug("begin = %zd", ret); goto out; @@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip); wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, TASK_UNINTERRUPTIBLE); - + smp_rmb(); /* Read error/transferred after RIP flag */ ret = wreq->error; - _debug("waited = %zd", ret); if (ret == 0) { ret = wreq->transferred; iocb->ki_pos += ret; diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h index 5d3f74a70fa7..95e281a8af78 100644 --- a/fs/netfs/internal.h +++ b/fs/netfs/internal.h @@ -92,15 +92,6 @@ static inline void netfs_see_request(struct netfs_io_request *rreq, trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); } -/* - * output.c - */ -int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait, - enum netfs_write_trace what); -struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); -int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end); -int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb); - /* * stats.c */ @@ -172,12 +163,12 @@ void netfs_reissue_write(struct netfs_io_stream *stream, int netfs_advance_write(struct netfs_io_request *wreq, struct netfs_io_stream *stream, loff_t start, size_t len, bool to_eof); -struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len); -int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, - struct folio *folio, size_t copied, bool to_page_end, - struct folio **writethrough_cache); -int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, - struct folio *writethrough_cache); +struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len); +int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *folio, size_t copied, bool to_page_end, + struct folio **writethrough_cache); +int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *writethrough_cache); int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); /* diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c index b8c1d3ca724a..f14c08bf605d 100644 --- a/fs/netfs/write_collect.c +++ b/fs/netfs/write_collect.c @@ -709,7 +709,7 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async) } /** - * new_netfs_write_subrequest_terminated - Note the termination of a write operation. + * netfs_write_subrequest_terminated - Note the termination of a write operation. * @_op: The I/O request that has terminated. * @transferred_or_error: The amount of data transferred or an error code. * @was_async: The termination was asynchronous @@ -731,8 +731,8 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async) * Note that %_op is a void* so that the function can be passed to * kiocb::term_func without the need for a casting wrapper. */ -void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, - bool was_async) +void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, + bool was_async) { struct netfs_io_subrequest *subreq = _op; struct netfs_io_request *wreq = subreq->rreq; @@ -800,4 +800,4 @@ void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_err netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); } -EXPORT_SYMBOL(new_netfs_write_subrequest_terminated); +EXPORT_SYMBOL(netfs_write_subrequest_terminated); diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index 7ea86e33382c..e190043bc0da 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -494,8 +494,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq, /* * Write some of the pending data back to the server */ -int new_netfs_writepages(struct address_space *mapping, - struct writeback_control *wbc) +int netfs_writepages(struct address_space *mapping, + struct writeback_control *wbc) { struct netfs_inode *ictx = netfs_inode(mapping->host); struct netfs_io_request *wreq = NULL; @@ -556,12 +556,12 @@ out: _leave(" = %d", error); return error; } -EXPORT_SYMBOL(new_netfs_writepages); +EXPORT_SYMBOL(netfs_writepages); /* * Begin a write operation for writing through the pagecache. */ -struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len) +struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len) { struct netfs_io_request *wreq = NULL; struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp)); @@ -586,9 +586,9 @@ struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t * to the request. If we've added more than wsize then we need to create a new * subrequest. */ -int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, - struct folio *folio, size_t copied, bool to_page_end, - struct folio **writethrough_cache) +int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *folio, size_t copied, bool to_page_end, + struct folio **writethrough_cache) { _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); @@ -618,8 +618,8 @@ int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeba /* * End a write operation used when writing through the pagecache. */ -int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, - struct folio *writethrough_cache) +int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, + struct folio *writethrough_cache) { struct netfs_inode *ictx = netfs_inode(wreq->inode); int ret; diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 42dba05a428b..c2ba364041b0 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -303,8 +303,6 @@ struct netfs_request_ops { void (*update_i_size)(struct inode *inode, loff_t i_size); /* Write request handling */ - void (*create_write_requests)(struct netfs_io_request *wreq, - loff_t start, size_t len); void (*begin_writeback)(struct netfs_io_request *wreq); void (*prepare_write)(struct netfs_io_subrequest *subreq); void (*issue_write)(struct netfs_io_subrequest *subreq); @@ -409,8 +407,6 @@ int netfs_write_begin(struct netfs_inode *, struct file *, struct folio **, void **fsdata); int netfs_writepages(struct address_space *mapping, struct writeback_control *wbc); -int new_netfs_writepages(struct address_space *mapping, - struct writeback_control *wbc); bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); void netfs_clear_inode_writeback(struct inode *inode, const void *aux); @@ -431,14 +427,9 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, iov_iter_extraction_t extraction_flags); size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, size_t max_size, size_t max_segs); -struct netfs_io_subrequest *netfs_create_write_request( - struct netfs_io_request *wreq, enum netfs_io_source dest, - loff_t start, size_t len, work_func_t worker); void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq); void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, bool was_async); -void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, - bool was_async); void netfs_queue_write_request(struct netfs_io_subrequest *subreq); int netfs_start_io_read(struct inode *inode); -- cgit From 1ecb146f7cd82e44277de448d4f736b98741f3cb Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 15 Mar 2024 15:15:44 +0000 Subject: netfs, afs: Use writeback retry to deal with alternate keys Use a hook in the new writeback code's retry algorithm to rotate the keys once all the outstanding subreqs have failed rather than doing it separately on each subreq. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Marc Dionne cc: linux-afs@lists.infradead.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org --- fs/afs/file.c | 1 + fs/afs/internal.h | 1 + fs/afs/write.c | 191 +++++++++++++++++++++++------------------------ fs/netfs/write_collect.c | 9 ++- include/linux/netfs.h | 2 + 5 files changed, 104 insertions(+), 100 deletions(-) (limited to 'include/linux') diff --git a/fs/afs/file.c b/fs/afs/file.c index 8f983e3ecae7..c3f0c45ae9a9 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -368,6 +368,7 @@ static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len, static void afs_free_request(struct netfs_io_request *rreq) { key_put(rreq->netfs_priv); + afs_put_wb_key(rreq->netfs_priv2); } static void afs_update_i_size(struct inode *inode, loff_t new_i_size) diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 887245f9336d..6e1d3c4daf72 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -1601,6 +1601,7 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *); void afs_prepare_write(struct netfs_io_subrequest *subreq); void afs_issue_write(struct netfs_io_subrequest *subreq); void afs_begin_writeback(struct netfs_io_request *wreq); +void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream); extern int afs_writepages(struct address_space *, struct writeback_control *); extern int afs_fsync(struct file *, loff_t, loff_t, int); extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf); diff --git a/fs/afs/write.c b/fs/afs/write.c index b8505a8b622a..e959640694c2 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -29,43 +29,39 @@ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsign /* * Find a key to use for the writeback. We cached the keys used to author the - * writes on the vnode. *_wbk will contain the last writeback key used or NULL - * and we need to start from there if it's set. + * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key + * record used or NULL and we need to start from there if it's set. + * wreq->netfs_priv will be set to the key itself or NULL. */ -static int afs_get_writeback_key(struct afs_vnode *vnode, - struct afs_wb_key **_wbk) +static void afs_get_writeback_key(struct netfs_io_request *wreq) { - struct afs_wb_key *wbk = NULL; - struct list_head *p; - int ret = -ENOKEY, ret2; + struct afs_wb_key *wbk, *old = wreq->netfs_priv2; + struct afs_vnode *vnode = AFS_FS_I(wreq->inode); + + key_put(wreq->netfs_priv); + wreq->netfs_priv = NULL; + wreq->netfs_priv2 = NULL; spin_lock(&vnode->wb_lock); - if (*_wbk) - p = (*_wbk)->vnode_link.next; + if (old) + wbk = list_next_entry(old, vnode_link); else - p = vnode->wb_keys.next; + wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link); - while (p != &vnode->wb_keys) { - wbk = list_entry(p, struct afs_wb_key, vnode_link); + list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) { _debug("wbk %u", key_serial(wbk->key)); - ret2 = key_validate(wbk->key); - if (ret2 == 0) { + if (key_validate(wbk->key) == 0) { refcount_inc(&wbk->usage); + wreq->netfs_priv = key_get(wbk->key); + wreq->netfs_priv2 = wbk; _debug("USE WB KEY %u", key_serial(wbk->key)); break; } - - wbk = NULL; - if (ret == -ENOKEY) - ret = ret2; - p = p->next; } spin_unlock(&vnode->wb_lock); - if (*_wbk) - afs_put_wb_key(*_wbk); - *_wbk = wbk; - return 0; + + afs_put_wb_key(old); } static void afs_store_data_success(struct afs_operation *op) @@ -88,72 +84,91 @@ static const struct afs_operation_ops afs_store_data_operation = { }; /* - * write to a file + * Prepare a subrequest to write to the server. This sets the max_len + * parameter. */ -static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos) +void afs_prepare_write(struct netfs_io_subrequest *subreq) { + //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) + // subreq->max_len = 512 * 1024; + //else + subreq->max_len = 256 * 1024 * 1024; +} + +/* + * Issue a subrequest to write to the server. + */ +static void afs_issue_write_worker(struct work_struct *work) +{ + struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); + struct netfs_io_request *wreq = subreq->rreq; struct afs_operation *op; - struct afs_wb_key *wbk = NULL; - loff_t size = iov_iter_count(iter); + struct afs_vnode *vnode = AFS_FS_I(wreq->inode); + unsigned long long pos = subreq->start + subreq->transferred; + size_t len = subreq->len - subreq->transferred; int ret = -ENOKEY; - _enter("%s{%llx:%llu.%u},%llx,%llx", + _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx", + wreq->debug_id, subreq->debug_index, vnode->volume->name, vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique, - size, pos); + pos, len); - ret = afs_get_writeback_key(vnode, &wbk); - if (ret) { - _leave(" = %d [no keys]", ret); - return ret; - } +#if 0 // Error injection + if (subreq->debug_index == 3) + return netfs_write_subrequest_terminated(subreq, -ENOANO, false); - op = afs_alloc_operation(wbk->key, vnode->volume); - if (IS_ERR(op)) { - afs_put_wb_key(wbk); - return -ENOMEM; + if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) { + set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); + return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); } +#endif + + op = afs_alloc_operation(wreq->netfs_priv, vnode->volume); + if (IS_ERR(op)) + return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); afs_op_set_vnode(op, 0, vnode); - op->file[0].dv_delta = 1; + op->file[0].dv_delta = 1; op->file[0].modification = true; - op->store.pos = pos; - op->store.size = size; - op->flags |= AFS_OPERATION_UNINTR; - op->ops = &afs_store_data_operation; + op->store.pos = pos; + op->store.size = len; + op->flags |= AFS_OPERATION_UNINTR; + op->ops = &afs_store_data_operation; -try_next_key: afs_begin_vnode_operation(op); - op->store.write_iter = iter; - op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); - op->mtime = inode_get_mtime(&vnode->netfs.inode); + op->store.write_iter = &subreq->io_iter; + op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size); + op->mtime = inode_get_mtime(&vnode->netfs.inode); afs_wait_for_operation(op); - - switch (afs_op_error(op)) { + ret = afs_put_operation(op); + switch (ret) { case -EACCES: case -EPERM: case -ENOKEY: case -EKEYEXPIRED: case -EKEYREJECTED: case -EKEYREVOKED: - _debug("next"); - - ret = afs_get_writeback_key(vnode, &wbk); - if (ret == 0) { - key_put(op->key); - op->key = key_get(wbk->key); - goto try_next_key; - } + /* If there are more keys we can try, use the retry algorithm + * to rotate the keys. + */ + if (wreq->netfs_priv2) + set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); break; } - afs_put_wb_key(wbk); - _leave(" = %d", afs_op_error(op)); - return afs_put_operation(op); + netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false); +} + +void afs_issue_write(struct netfs_io_subrequest *subreq) +{ + subreq->work.func = afs_issue_write_worker; + if (!queue_work(system_unbound_wq, &subreq->work)) + WARN_ON_ONCE(1); } /* @@ -162,52 +177,32 @@ try_next_key: */ void afs_begin_writeback(struct netfs_io_request *wreq) { + afs_get_writeback_key(wreq); wreq->io_streams[0].avail = true; } /* - * Prepare a subrequest to write to the server. This sets the max_len - * parameter. - */ -void afs_prepare_write(struct netfs_io_subrequest *subreq) -{ - //if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) - // subreq->max_len = 512 * 1024; - //else - subreq->max_len = 256 * 1024 * 1024; -} - -/* - * Issue a subrequest to write to the server. + * Prepare to retry the writes in request. Use this to try rotating the + * available writeback keys. */ -static void afs_issue_write_worker(struct work_struct *work) +void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream) { - struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work); - struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); - ssize_t ret; - - _enter("%x[%x],%zx", - subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count); - -#if 0 // Error injection - if (subreq->debug_index == 3) - return netfs_write_subrequest_terminated(subreq, -ENOANO, false); + struct netfs_io_subrequest *subreq = + list_first_entry(&stream->subrequests, + struct netfs_io_subrequest, rreq_link); - if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) { - set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags); - return netfs_write_subrequest_terminated(subreq, -EAGAIN, false); + switch (subreq->error) { + case -EACCES: + case -EPERM: + case -ENOKEY: + case -EKEYEXPIRED: + case -EKEYREJECTED: + case -EKEYREVOKED: + afs_get_writeback_key(wreq); + if (!wreq->netfs_priv) + stream->failed = true; + break; } -#endif - - ret = afs_store_data(vnode, &subreq->io_iter, subreq->start); - netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false); -} - -void afs_issue_write(struct netfs_io_subrequest *subreq) -{ - subreq->work.func = afs_issue_write_worker; - if (!queue_work(system_unbound_wq, &subreq->work)) - WARN_ON_ONCE(1); } /* diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c index f14c08bf605d..60112e4b2c5e 100644 --- a/fs/netfs/write_collect.c +++ b/fs/netfs/write_collect.c @@ -163,6 +163,13 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq, _enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); + if (list_empty(&stream->subrequests)) + return; + + if (stream->source == NETFS_UPLOAD_TO_SERVER && + wreq->netfs_ops->retry_request) + wreq->netfs_ops->retry_request(wreq, stream); + if (unlikely(stream->failed)) return; @@ -182,8 +189,6 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq, return; } - if (list_empty(&stream->subrequests)) - return; next = stream->subrequests.next; do { diff --git a/include/linux/netfs.h b/include/linux/netfs.h index c2ba364041b0..298552f5122c 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -235,6 +235,7 @@ struct netfs_io_request { struct iov_iter iter; /* Unencrypted-side iterator */ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ void *netfs_priv; /* Private data for the netfs */ + void *netfs_priv2; /* Private data for the netfs */ struct bio_vec *direct_bv; /* DIO buffer list (when handling iovec-iter) */ unsigned int direct_bv_count; /* Number of elements in direct_bv[] */ unsigned int debug_id; @@ -306,6 +307,7 @@ struct netfs_request_ops { void (*begin_writeback)(struct netfs_io_request *wreq); void (*prepare_write)(struct netfs_io_subrequest *subreq); void (*issue_write)(struct netfs_io_subrequest *subreq); + void (*retry_request)(struct netfs_io_request *wreq, struct netfs_io_stream *stream); void (*invalidate_cache)(struct netfs_io_request *wreq); }; -- cgit From 69c3c023af25edb5433a2db824d3e7cc328f0183 Mon Sep 17 00:00:00 2001 From: David Howells Date: Fri, 6 Oct 2023 18:16:15 +0100 Subject: cifs: Implement netfslib hooks Provide implementation of the netfslib hooks that will be used by netfslib to ask cifs to set up and perform operations. Of particular note are (*) cifs_clamp_length() - This is used to negotiate the size of the next subrequest in a read request, taking into account the credit available and the rsize. The credits are attached to the subrequest. (*) cifs_req_issue_read() - This is used to issue a subrequest that has been set up and clamped. (*) cifs_prepare_write() - This prepares to fill a subrequest by picking a channel, reopening the file and requesting credits so that we can set the maximum size of the subrequest and also sets the maximum number of segments if we're doing RDMA. (*) cifs_issue_write() - This releases any unneeded credits and issues an asynchronous data write for the contiguous slice of file covered by the subrequest. This should possibly be folded in to all ->async_writev() ops and that called directly. (*) cifs_begin_writeback() - This gets the cached writable handle through which we do writeback (this does not affect writethrough, unbuffered or direct writes). At this point, cifs is not wired up to actually *use* netfslib; that will be done in a subsequent patch. Signed-off-by: David Howells cc: Steve French cc: Shyam Prasad N cc: Rohith Surabattula cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org --- fs/netfs/buffered_write.c | 6 + fs/smb/client/Kconfig | 1 + fs/smb/client/cifsfs.c | 2 +- fs/smb/client/cifsfs.h | 1 + fs/smb/client/cifsglob.h | 28 ++-- fs/smb/client/file.c | 315 +++++++++++++++++++++++++++++++++++++++++++ include/linux/netfs.h | 1 + include/trace/events/netfs.h | 1 + 8 files changed, 345 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c index 825e6632ee4f..1121601536d1 100644 --- a/fs/netfs/buffered_write.c +++ b/fs/netfs/buffered_write.c @@ -405,6 +405,9 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, } while (iov_iter_count(iter)); out: + if (likely(written) && ctx->ops->post_modify) + ctx->ops->post_modify(inode); + if (unlikely(wreq)) { ret2 = netfs_end_writethrough(wreq, &wbc, writethrough); wbc_detach_inode(&wbc); @@ -521,6 +524,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr struct folio *folio = page_folio(vmf->page); struct file *file = vmf->vma->vm_file; struct inode *inode = file_inode(file); + struct netfs_inode *ictx = netfs_inode(inode); vm_fault_t ret = VM_FAULT_RETRY; int err; @@ -567,6 +571,8 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr trace_netfs_folio(folio, netfs_folio_trace_mkwrite); netfs_set_group(folio, netfs_group); file_update_time(file); + if (ictx->ops->post_modify) + ictx->ops->post_modify(inode); ret = VM_FAULT_LOCKED; out: sb_end_pagefault(inode->i_sb); diff --git a/fs/smb/client/Kconfig b/fs/smb/client/Kconfig index 2927bd174a88..2517dc242386 100644 --- a/fs/smb/client/Kconfig +++ b/fs/smb/client/Kconfig @@ -2,6 +2,7 @@ config CIFS tristate "SMB3 and CIFS support (advanced network filesystem)" depends on INET + select NETFS_SUPPORT select NLS select NLS_UCS2_UTILS select CRYPTO diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index 763d17870e0b..8f7165567be7 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -1758,7 +1758,7 @@ static int cifs_init_netfs(void) { cifs_io_request_cachep = kmem_cache_create("cifs_io_request", - sizeof(struct netfs_io_request), 0, + sizeof(struct cifs_io_request), 0, SLAB_HWCACHE_ALIGN, NULL); if (!cifs_io_request_cachep) goto nomem_req; diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h index 1acf6bfc06de..922c10d7cfdd 100644 --- a/fs/smb/client/cifsfs.h +++ b/fs/smb/client/cifsfs.h @@ -84,6 +84,7 @@ extern const struct inode_operations cifs_namespace_inode_operations; /* Functions related to files and directories */ +extern const struct netfs_request_ops cifs_req_ops; extern const struct file_operations cifs_file_ops; extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */ extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */ diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h index 611f59c6d2c0..4e9033b2f191 100644 --- a/fs/smb/client/cifsglob.h +++ b/fs/smb/client/cifsglob.h @@ -1491,15 +1491,24 @@ struct cifs_aio_ctx { bool direct_io; }; +struct cifs_io_request { + struct netfs_io_request rreq; + struct cifsFileInfo *cfile; +}; + /* asynchronous read support */ struct cifs_io_subrequest { - struct netfs_io_subrequest subreq; - struct cifsFileInfo *cfile; - struct address_space *mapping; - struct cifs_aio_ctx *ctx; + union { + struct netfs_io_subrequest subreq; + struct netfs_io_request *rreq; + struct cifs_io_request *req; + }; ssize_t got_bytes; pid_t pid; + unsigned int xid; int result; + bool have_xid; + bool replay; struct kvec iov[2]; struct TCP_Server_Info *server; #ifdef CONFIG_CIFS_SMB_DIRECT @@ -1507,15 +1516,16 @@ struct cifs_io_subrequest { #endif struct cifs_credits credits; - enum writeback_sync_modes sync_mode; - bool uncached; - bool replay; - struct bio_vec *bv; - // TODO: Remove following elements struct list_head list; struct completion done; struct work_struct work; + struct cifsFileInfo *cfile; + struct address_space *mapping; + struct cifs_aio_ctx *ctx; + enum writeback_sync_modes sync_mode; + bool uncached; + struct bio_vec *bv; }; /* diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c index 331446fc3d22..c9c33b3a54f3 100644 --- a/fs/smb/client/file.c +++ b/fs/smb/client/file.c @@ -36,6 +36,321 @@ #include "fs_context.h" #include "cifs_ioctl.h" #include "cached_dir.h" +#include + +static int cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush); + +/* + * Prepare a subrequest to upload to the server. We need to allocate credits + * so that we know the maximum amount of data that we can include in it. + */ +static void cifs_prepare_write(struct netfs_io_subrequest *subreq) +{ + struct cifs_io_subrequest *wdata = + container_of(subreq, struct cifs_io_subrequest, subreq); + struct cifs_io_request *req = wdata->req; + struct TCP_Server_Info *server; + struct cifsFileInfo *open_file = req->cfile; + size_t wsize = req->rreq.wsize; + int rc; + + if (!wdata->have_xid) { + wdata->xid = get_xid(); + wdata->have_xid = true; + } + + server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); + wdata->server = server; + +retry: + if (open_file->invalidHandle) { + rc = cifs_reopen_file(open_file, false); + if (rc < 0) { + if (rc == -EAGAIN) + goto retry; + subreq->error = rc; + return netfs_prepare_write_failed(subreq); + } + } + + rc = server->ops->wait_mtu_credits(server, wsize, &wdata->subreq.max_len, + &wdata->credits); + if (rc < 0) { + subreq->error = rc; + return netfs_prepare_write_failed(subreq); + } + +#ifdef CONFIG_CIFS_SMB_DIRECT + if (server->smbd_conn) + subreq->max_nr_segs = server->smbd_conn->max_frmr_depth; +#endif +} + +/* + * Issue a subrequest to upload to the server. + */ +static void cifs_issue_write(struct netfs_io_subrequest *subreq) +{ + struct cifs_io_subrequest *wdata = + container_of(subreq, struct cifs_io_subrequest, subreq); + struct cifs_sb_info *sbi = CIFS_SB(subreq->rreq->inode->i_sb); + int rc; + + if (cifs_forced_shutdown(sbi)) { + rc = -EIO; + goto fail; + } + + rc = adjust_credits(wdata->server, &wdata->credits, wdata->subreq.len); + if (rc) + goto fail; + + rc = -EAGAIN; + if (wdata->req->cfile->invalidHandle) + goto fail; + + wdata->server->ops->async_writev(wdata); +out: + return; + +fail: + if (rc == -EAGAIN) + trace_netfs_sreq(subreq, netfs_sreq_trace_retry); + else + trace_netfs_sreq(subreq, netfs_sreq_trace_fail); + add_credits_and_wake_if(wdata->server, &wdata->credits, 0); + netfs_write_subrequest_terminated(wdata, rc, false); + goto out; +} + +/* + * Split the read up according to how many credits we can get for each piece. + * It's okay to sleep here if we need to wait for more credit to become + * available. + * + * We also choose the server and allocate an operation ID to be cleaned up + * later. + */ +static bool cifs_clamp_length(struct netfs_io_subrequest *subreq) +{ + struct netfs_io_request *rreq = subreq->rreq; + struct TCP_Server_Info *server; + struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); + struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); + size_t rsize = 0; + int rc; + + rdata->xid = get_xid(); + rdata->have_xid = true; + + server = cifs_pick_channel(tlink_tcon(req->cfile->tlink)->ses); + rdata->server = server; + + if (cifs_sb->ctx->rsize == 0) + cifs_sb->ctx->rsize = + server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink), + cifs_sb->ctx); + + + rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize, + &rdata->credits); + if (rc) { + subreq->error = rc; + return false; + } + + subreq->len = min_t(size_t, subreq->len, rsize); +#ifdef CONFIG_CIFS_SMB_DIRECT + if (server->smbd_conn) + subreq->max_nr_segs = server->smbd_conn->max_frmr_depth; +#endif + return true; +} + +/* + * Issue a read operation on behalf of the netfs helper functions. We're asked + * to make a read of a certain size at a point in the file. We are permitted + * to only read a portion of that, but as long as we read something, the netfs + * helper will call us again so that we can issue another read. + */ +static void cifs_req_issue_read(struct netfs_io_subrequest *subreq) +{ + struct netfs_io_request *rreq = subreq->rreq; + struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); + struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); + pid_t pid; + int rc = 0; + + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) + pid = req->cfile->pid; + else + pid = current->tgid; // Ummm... This may be a workqueue + + cifs_dbg(FYI, "%s: op=%08x[%x] mapping=%p len=%zu/%zu\n", + __func__, rreq->debug_id, subreq->debug_index, rreq->mapping, + subreq->transferred, subreq->len); + + if (req->cfile->invalidHandle) { + do { + rc = cifs_reopen_file(req->cfile, true); + } while (rc == -EAGAIN); + if (rc) + goto out; + } + + __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags); + rdata->pid = pid; + + rc = adjust_credits(rdata->server, &rdata->credits, rdata->subreq.len); + if (!rc) { + if (rdata->req->cfile->invalidHandle) + rc = -EAGAIN; + else + rc = rdata->server->ops->async_readv(rdata); + } + +out: + if (rc) + netfs_subreq_terminated(subreq, rc, false); +} + +/* + * Writeback calls this when it finds a folio that needs uploading. This isn't + * called if writeback only has copy-to-cache to deal with. + */ +static void cifs_begin_writeback(struct netfs_io_request *wreq) +{ + struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq); + int ret; + + ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile); + if (ret) { + cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret); + return; + } + + wreq->io_streams[0].avail = true; +} + +/* + * Initialise a request. + */ +static int cifs_init_request(struct netfs_io_request *rreq, struct file *file) +{ + struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); + struct cifs_sb_info *cifs_sb = CIFS_SB(rreq->inode->i_sb); + struct cifsFileInfo *open_file = NULL; + + rreq->rsize = cifs_sb->ctx->rsize; + rreq->wsize = cifs_sb->ctx->wsize; + + if (file) { + open_file = file->private_data; + rreq->netfs_priv = file->private_data; + req->cfile = cifsFileInfo_get(open_file); + } else if (rreq->origin != NETFS_WRITEBACK) { + WARN_ON_ONCE(1); + return -EIO; + } + + return 0; +} + +/* + * Expand the size of a readahead to the size of the rsize, if at least as + * large as a page, allowing for the possibility that rsize is not pow-2 + * aligned. + */ +static void cifs_expand_readahead(struct netfs_io_request *rreq) +{ + unsigned int rsize = rreq->rsize; + loff_t misalignment, i_size = i_size_read(rreq->inode); + + if (rsize < PAGE_SIZE) + return; + + if (rsize < INT_MAX) + rsize = roundup_pow_of_two(rsize); + else + rsize = ((unsigned int)INT_MAX + 1) / 2; + + misalignment = rreq->start & (rsize - 1); + if (misalignment) { + rreq->start -= misalignment; + rreq->len += misalignment; + } + + rreq->len = round_up(rreq->len, rsize); + if (rreq->start < i_size && rreq->len > i_size - rreq->start) + rreq->len = i_size - rreq->start; +} + +/* + * Completion of a request operation. + */ +static void cifs_rreq_done(struct netfs_io_request *rreq) +{ + struct timespec64 atime, mtime; + struct inode *inode = rreq->inode; + + /* we do not want atime to be less than mtime, it broke some apps */ + atime = inode_set_atime_to_ts(inode, current_time(inode)); + mtime = inode_get_mtime(inode); + if (timespec64_compare(&atime, &mtime)) + inode_set_atime_to_ts(inode, inode_get_mtime(inode)); +} + +static void cifs_post_modify(struct inode *inode) +{ + /* Indication to update ctime and mtime as close is deferred */ + set_bit(CIFS_INO_MODIFIED_ATTR, &CIFS_I(inode)->flags); +} + +static void cifs_free_request(struct netfs_io_request *rreq) +{ + struct cifs_io_request *req = container_of(rreq, struct cifs_io_request, rreq); + + if (req->cfile) + cifsFileInfo_put(req->cfile); +} + +static void cifs_free_subrequest(struct netfs_io_subrequest *subreq) +{ + struct cifs_io_subrequest *rdata = + container_of(subreq, struct cifs_io_subrequest, subreq); + int rc = subreq->error; + + if (rdata->subreq.source == NETFS_DOWNLOAD_FROM_SERVER) { +#ifdef CONFIG_CIFS_SMB_DIRECT + if (rdata->mr) { + smbd_deregister_mr(rdata->mr); + rdata->mr = NULL; + } +#endif + } + + add_credits_and_wake_if(rdata->server, &rdata->credits, 0); + if (rdata->have_xid) + free_xid(rdata->xid); +} + +const struct netfs_request_ops cifs_req_ops = { + .request_pool = &cifs_io_request_pool, + .subrequest_pool = &cifs_io_subrequest_pool, + .init_request = cifs_init_request, + .free_request = cifs_free_request, + .free_subrequest = cifs_free_subrequest, + .expand_readahead = cifs_expand_readahead, + .clamp_length = cifs_clamp_length, + .issue_read = cifs_req_issue_read, + .done = cifs_rreq_done, + .post_modify = cifs_post_modify, + .begin_writeback = cifs_begin_writeback, + .prepare_write = cifs_prepare_write, + .issue_write = cifs_issue_write, +}; /* * Remove the dirty flags from a span of pages. diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 298552f5122c..f45d06284f2f 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -302,6 +302,7 @@ struct netfs_request_ops { /* Modification handling */ void (*update_i_size)(struct inode *inode, loff_t i_size); + void (*post_modify)(struct inode *inode); /* Write request handling */ void (*begin_writeback)(struct netfs_io_request *wreq); diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 4ba553a6d71b..da23484268df 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -112,6 +112,7 @@ #define netfs_sreq_ref_traces \ EM(netfs_sreq_trace_get_copy_to_cache, "GET COPY2C ") \ EM(netfs_sreq_trace_get_resubmit, "GET RESUBMIT") \ + EM(netfs_sreq_trace_get_submit, "GET SUBMIT") \ EM(netfs_sreq_trace_get_short_read, "GET SHORTRD") \ EM(netfs_sreq_trace_new, "NEW ") \ EM(netfs_sreq_trace_put_cancel, "PUT CANCEL ") \ -- cgit From 74df22453c51392476117d7330bf02cee6e987cf Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 1 May 2024 16:29:48 -0700 Subject: kunit/fortify: Fix replaced failure path to unbreak __alloc_size The __alloc_size annotation for kmemdup() was getting disabled under KUnit testing because the replaced fortify_panic macro implementation was using "return NULL" as a way to survive the sanity checking. But having the chance to return NULL invalidated __alloc_size, so kmemdup was not passing the __builtin_dynamic_object_size() tests any more: [23:26:18] [PASSED] fortify_test_alloc_size_kmalloc_const [23:26:19] # fortify_test_alloc_size_kmalloc_dynamic: EXPECTATION FAILED at lib/fortify_kunit.c:265 [23:26:19] Expected __builtin_dynamic_object_size(p, 1) == expected, but [23:26:19] __builtin_dynamic_object_size(p, 1) == -1 (0xffffffffffffffff) [23:26:19] expected == 11 (0xb) [23:26:19] __alloc_size() not working with __bdos on kmemdup("hello there", len, gfp) [23:26:19] [FAILED] fortify_test_alloc_size_kmalloc_dynamic Normal builds were not affected: __alloc_size continued to work there. Use a zero-sized allocation instead, which allows __alloc_size to behave. Fixes: 4ce615e798a7 ("fortify: Provide KUnit counters for failure testing") Fixes: fa4a3f86d498 ("fortify: Add KUnit tests for runtime overflows") Link: https://lore.kernel.org/r/20240501232937.work.532-kees@kernel.org Signed-off-by: Kees Cook --- include/linux/fortify-string.h | 3 ++- lib/fortify_kunit.c | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h index a0bb13825109..85fc0e6f0f7f 100644 --- a/include/linux/fortify-string.h +++ b/include/linux/fortify-string.h @@ -738,7 +738,8 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp if (__compiletime_lessthan(p_size, size)) __read_overflow(); if (p_size < size) - fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, NULL); + fortify_panic(FORTIFY_FUNC_kmemdup, FORTIFY_READ, p_size, size, + __real_kmemdup(p, 0, gfp)); return __real_kmemdup(p, size, gfp); } diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index ef3e4c68b759..306522fd0aa2 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -1002,19 +1002,19 @@ static void fortify_test_kmemdup(struct kunit *test) /* Out of bounds by 1 byte. */ copy = kmemdup(src, len + 1, GFP_KERNEL); - KUNIT_EXPECT_NULL(test, copy); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1); kfree(copy); /* Way out of bounds. */ copy = kmemdup(src, len * 2, GFP_KERNEL); - KUNIT_EXPECT_NULL(test, copy); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2); kfree(copy); /* Starting offset causing out of bounds. */ copy = kmemdup(src + 1, len, GFP_KERNEL); - KUNIT_EXPECT_NULL(test, copy); + KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR); KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3); kfree(copy); } -- cgit From 0840556e5a3a331b6932ef17dd4bc94445df3297 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Mon, 29 Apr 2024 18:58:12 -0700 Subject: net: Protect dev->name by seqlock. We will convert ioctl(SIOCGARP) to RCU, and then we need to copy dev->name which is currently protected by rtnl_lock(). This patch does the following: 1) Add seqlock netdev_rename_lock to protect dev->name 2) Add netdev_copy_name() that copies dev->name to buffer under netdev_rename_lock 3) Use netdev_copy_name() in netdev_get_name() and drop devnet_rename_sem Suggested-by: Eric Dumazet Link: https://lore.kernel.org/netdev/CANn89iJEWs7AYSJqGCUABeVqOCTkErponfZdT5kV-iD=-SajnQ@mail.gmail.com/ Signed-off-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20240430015813.71143-7-kuniyu@amazon.com Signed-off-by: Jakub Kicinski --- include/linux/netdevice.h | 1 + net/core/dev.c | 27 +++++++++++++++++++++++---- 2 files changed, 24 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f849e7d110ed..41853424b41d 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3136,6 +3136,7 @@ struct net_device *netdev_get_by_name(struct net *net, const char *name, netdevice_tracker *tracker, gfp_t gfp); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); struct net_device *dev_get_by_napi_id(unsigned int napi_id); +void netdev_copy_name(struct net_device *dev, char *name); static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, diff --git a/net/core/dev.c b/net/core/dev.c index cd7ba50eac15..e02d2363347e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -940,6 +940,18 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id) } EXPORT_SYMBOL(dev_get_by_napi_id); +static DEFINE_SEQLOCK(netdev_rename_lock); + +void netdev_copy_name(struct net_device *dev, char *name) +{ + unsigned int seq; + + do { + seq = read_seqbegin(&netdev_rename_lock); + strscpy(name, dev->name, IFNAMSIZ); + } while (read_seqretry(&netdev_rename_lock, seq)); +} + /** * netdev_get_name - get a netdevice name, knowing its ifindex. * @net: network namespace @@ -951,7 +963,6 @@ int netdev_get_name(struct net *net, char *name, int ifindex) struct net_device *dev; int ret; - down_read(&devnet_rename_sem); rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); @@ -960,12 +971,11 @@ int netdev_get_name(struct net *net, char *name, int ifindex) goto out; } - strcpy(name, dev->name); + netdev_copy_name(dev, name); ret = 0; out: rcu_read_unlock(); - up_read(&devnet_rename_sem); return ret; } @@ -1217,7 +1227,10 @@ int dev_change_name(struct net_device *dev, const char *newname) memcpy(oldname, dev->name, IFNAMSIZ); + write_seqlock(&netdev_rename_lock); err = dev_get_valid_name(net, dev, newname); + write_sequnlock(&netdev_rename_lock); + if (err < 0) { up_write(&devnet_rename_sem); return err; @@ -1257,7 +1270,9 @@ rollback: if (err >= 0) { err = ret; down_write(&devnet_rename_sem); + write_seqlock(&netdev_rename_lock); memcpy(dev->name, oldname, IFNAMSIZ); + write_sequnlock(&netdev_rename_lock); memcpy(oldname, newname, IFNAMSIZ); WRITE_ONCE(dev->name_assign_type, old_assign_type); old_assign_type = NET_NAME_RENAMED; @@ -11403,8 +11418,12 @@ int __dev_change_net_namespace(struct net_device *dev, struct net *net, dev_net_set(dev, net); dev->ifindex = new_ifindex; - if (new_name[0]) /* Rename the netdev to prepared name */ + if (new_name[0]) { + /* Rename the netdev to prepared name */ + write_seqlock(&netdev_rename_lock); strscpy(dev->name, new_name, IFNAMSIZ); + write_sequnlock(&netdev_rename_lock); + } /* Fixup kobjects */ dev_set_uevent_suppress(&dev->dev, 1); -- cgit From 45751097aeb386eb239f6a8ed0ccfd7dabce068e Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Mon, 22 Apr 2024 12:24:06 +0200 Subject: seq_file: Optimize seq_puts() Most of seq_puts() usages are done with a string literal. In such cases, the length of the string car be computed at compile time in order to save a strlen() call at run-time. seq_putc() or seq_write() can then be used instead. This saves a few cycles. To have an estimation of how often this optimization triggers: $ git grep seq_puts.*\" | wc -l 3436 $ git grep seq_puts.*\".\" | wc -l 84 Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/a8589bffe4830dafcb9111e22acf06603fea7132.1713781332.git.christophe.jaillet@wanadoo.fr Signed-off-by: Christian Brauner The output for seq_putc() generation has also be checked and works. --- fs/seq_file.c | 4 ++-- include/linux/seq_file.h | 13 ++++++++++++- 2 files changed, 14 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/fs/seq_file.c b/fs/seq_file.c index f5fdaf3b1572..8ef0a07033ca 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -669,7 +669,7 @@ void seq_putc(struct seq_file *m, char c) } EXPORT_SYMBOL(seq_putc); -void seq_puts(struct seq_file *m, const char *s) +void __seq_puts(struct seq_file *m, const char *s) { int len = strlen(s); @@ -680,7 +680,7 @@ void seq_puts(struct seq_file *m, const char *s) memcpy(m->buf + m->count, s, len); m->count += len; } -EXPORT_SYMBOL(seq_puts); +EXPORT_SYMBOL(__seq_puts); /** * seq_put_decimal_ull_width - A helper routine for putting decimal numbers diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 234bcdb1fba4..8bd4fda6e027 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -118,7 +118,18 @@ void seq_vprintf(struct seq_file *m, const char *fmt, va_list args); __printf(2, 3) void seq_printf(struct seq_file *m, const char *fmt, ...); void seq_putc(struct seq_file *m, char c); -void seq_puts(struct seq_file *m, const char *s); +void __seq_puts(struct seq_file *m, const char *s); + +static __always_inline void seq_puts(struct seq_file *m, const char *s) +{ + if (!__builtin_constant_p(*s)) + __seq_puts(m, s); + else if (s[0] && !s[1]) + seq_putc(m, s[0]); + else + seq_write(m, s, __builtin_strlen(s)); +} + void seq_put_decimal_ull_width(struct seq_file *m, const char *delimiter, unsigned long long num, unsigned int width); void seq_put_decimal_ull(struct seq_file *m, const char *delimiter, -- cgit From 82e9c84d8712e8d29278a37aaa9aa767d50a16da Mon Sep 17 00:00:00 2001 From: Venkatesh Srinivas Date: Thu, 4 Apr 2024 23:26:51 +0000 Subject: KVM: Remove kvm_make_all_cpus_request_except() Remove kvm_make_all_cpus_request_except() as it effectively has no users, and arguably should never have been added in the first place. Commit 54163a346d4a ("KVM: Introduce kvm_make_all_cpus_request_except()") added the "except" variation for use in SVM's AVIC update path, which used it to skip sending a request to the current vCPU (commit 7d611233b016 ("KVM: SVM: Disable AVIC before setting V_IRQ")). But the AVIC usage of kvm_make_all_cpus_request_except() was essentially a hack-a-fix that simply squashed the most likely scenario of a racy WARN without addressing the underlying problem(s). Commit f1577ab21442 ("KVM: SVM: svm_set_vintr don't warn if AVIC is active but is about to be deactivated") eventually fixed the WARN itself, and the "except" usage was subsequently dropped by df63202fe52b ("KVM: x86: APICv: drop immediate APICv disablement on current vCPU"). That kvm_make_all_cpus_request_except() hasn't gained any users in the last ~3 years isn't a coincidence. If a VM-wide broadcast *needs* to skip the current vCPU, then odds are very good that there is underlying bug that could be better fixed elsewhere. Signed-off-by: Venkatesh Srinivas Link: https://lore.kernel.org/r/20240404232651.1645176-1-venkateshs@chromium.org [sean: rewrite changelog with --verbose] Signed-off-by: Sean Christopherson --- include/linux/kvm_host.h | 2 -- virt/kvm/kvm_main.c | 13 ++----------- 2 files changed, 2 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 48f31dcd318a..e1555d75a203 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -193,8 +193,6 @@ static inline bool is_error_page(struct page *page) bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, unsigned long *vcpu_bitmap); bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); -bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except); #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3573c8cd9e11..bd04aafcb3c4 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -311,8 +311,7 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, return called; } -bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except) +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) { struct kvm_vcpu *vcpu; struct cpumask *cpus; @@ -325,22 +324,14 @@ bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask); cpumask_clear(cpus); - kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu == except) - continue; + kvm_for_each_vcpu(i, vcpu, kvm) kvm_make_vcpu_request(vcpu, req, cpus, me); - } called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT)); put_cpu(); return called; } - -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) -{ - return kvm_make_all_cpus_request_except(kvm, req, NULL); -} EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request); void kvm_flush_remote_tlbs(struct kvm *kvm) -- cgit From 7d78a77733552092361239b1d8afaf8412f5dffd Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 1 May 2024 16:32:02 -0700 Subject: string: Add additional __realloc_size() annotations for "dup" helpers Several other "dup"-style interfaces could use the __realloc_size() attribute. (As a reminder to myself and others: "realloc" is used here instead of "alloc" because the "alloc_size" attribute implies that the memory contents are uninitialized. Since we're copying contents into the resulting allocation, it must use "realloc_size" to avoid confusing the compiler's optimization passes.) Add KUnit test coverage where possible. (KUnit still does not have the ability to manipulate userspace memory.) Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20240502145218.it.729-kees@kernel.org Signed-off-by: Kees Cook --- include/linux/string.h | 13 ++++++++----- lib/fortify_kunit.c | 26 ++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/include/linux/string.h b/include/linux/string.h index 86aa6cd35167..10e5177bb49c 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -14,8 +14,8 @@ #include extern char *strndup_user(const char __user *, long); -extern void *memdup_user(const void __user *, size_t); -extern void *vmemdup_user(const void __user *, size_t); +extern void *memdup_user(const void __user *, size_t) __realloc_size(2); +extern void *vmemdup_user(const void __user *, size_t) __realloc_size(2); extern void *memdup_user_nul(const void __user *, size_t); /** @@ -27,7 +27,8 @@ extern void *memdup_user_nul(const void __user *, size_t); * Return: an ERR_PTR() on failure. Result is physically * contiguous, to be freed by kfree(). */ -static inline void *memdup_array_user(const void __user *src, size_t n, size_t size) +static inline __realloc_size(2, 3) +void *memdup_array_user(const void __user *src, size_t n, size_t size) { size_t nbytes; @@ -46,7 +47,8 @@ static inline void *memdup_array_user(const void __user *src, size_t n, size_t s * Return: an ERR_PTR() on failure. Result may be not * physically contiguous. Use kvfree() to free. */ -static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size) +static inline __realloc_size(2, 3) +void *vmemdup_array_user(const void __user *src, size_t n, size_t size) { size_t nbytes; @@ -285,7 +287,8 @@ extern char *kstrndup(const char *s, size_t len, gfp_t gfp); extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2); extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp); -extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp); +extern void *kmemdup_array(const void *src, size_t element_size, size_t count, gfp_t gfp) + __realloc_size(2, 3); /* lib/argv_split.c */ extern char **argv_split(gfp_t gfp, const char *str, int *argcp); diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index 306522fd0aa2..d2377e00caab 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -363,6 +363,31 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) } while (0) DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) +static const char * const test_strs[] = { + "", + "Hello there", + "A longer string, just for variety", +}; + +#define TEST_realloc(checker) do { \ + gfp_t gfp = GFP_KERNEL; \ + size_t len; \ + int i; \ + \ + for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \ + len = strlen(test_strs[i]); \ + KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \ + checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \ + kfree(p)); \ + checker(len, kmemdup(test_strs[i], len, gfp), \ + kfree(p)); \ + } \ +} while (0) +static void fortify_test_realloc_size(struct kunit *test) +{ + TEST_realloc(check_dynamic); +} + /* * We can't have an array at the end of a structure or else * builds without -fstrict-flex-arrays=3 will report them as @@ -1046,6 +1071,7 @@ static struct kunit_case fortify_test_cases[] = { KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic), KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const), KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic), + KUNIT_CASE(fortify_test_realloc_size), KUNIT_CASE(fortify_test_strlen), KUNIT_CASE(fortify_test_strnlen), KUNIT_CASE(fortify_test_strcpy), -- cgit From 9a87907de3597a339cc129229d1a20bc7365ea5f Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 2 May 2024 20:35:57 +0200 Subject: ovl: implement tmpfile Combine inode creation with opening a file. There are six separate objects that are being set up: the backing inode, dentry and file, and the overlay inode, dentry and file. Cleanup in case of an error is a bit of a challenge and is difficult to test, so careful review is needed. All tmpfile testcases except generic/509 now run/pass, and no regressions are observed with full xfstests. Signed-off-by: Miklos Szeredi Reviewed-by: Amir Goldstein --- fs/backing-file.c | 23 +++++++ fs/internal.h | 3 + fs/namei.c | 6 +- fs/overlayfs/dir.c | 149 +++++++++++++++++++++++++++++++++++++------ fs/overlayfs/file.c | 3 - fs/overlayfs/overlayfs.h | 3 + include/linux/backing-file.h | 3 + 7 files changed, 165 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/fs/backing-file.c b/fs/backing-file.c index 740185198db3..afb557446c27 100644 --- a/fs/backing-file.c +++ b/fs/backing-file.c @@ -52,6 +52,29 @@ struct file *backing_file_open(const struct path *user_path, int flags, } EXPORT_SYMBOL_GPL(backing_file_open); +struct file *backing_tmpfile_open(const struct path *user_path, int flags, + const struct path *real_parentpath, + umode_t mode, const struct cred *cred) +{ + struct mnt_idmap *real_idmap = mnt_idmap(real_parentpath->mnt); + struct file *f; + int error; + + f = alloc_empty_backing_file(flags, cred); + if (IS_ERR(f)) + return f; + + path_get(user_path); + *backing_file_user_path(f) = *user_path; + error = vfs_tmpfile(real_idmap, real_parentpath, f, mode); + if (error) { + fput(f); + f = ERR_PTR(error); + } + return f; +} +EXPORT_SYMBOL(backing_tmpfile_open); + struct backing_aio { struct kiocb iocb; refcount_t ref; diff --git a/fs/internal.h b/fs/internal.h index 7ca738904e34..ab2225136f60 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -62,6 +62,9 @@ int do_mkdirat(int dfd, struct filename *name, umode_t mode); int do_symlinkat(struct filename *from, int newdfd, struct filename *to); int do_linkat(int olddfd, struct filename *old, int newdfd, struct filename *new, int flags); +int vfs_tmpfile(struct mnt_idmap *idmap, + const struct path *parentpath, + struct file *file, umode_t mode); /* * namespace.c diff --git a/fs/namei.c b/fs/namei.c index c5b2a25be7d0..13e50b0a49d2 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -3668,9 +3668,9 @@ static int do_open(struct nameidata *nd, * On non-idmapped mounts or if permission checking is to be performed on the * raw inode simply pass @nop_mnt_idmap. */ -static int vfs_tmpfile(struct mnt_idmap *idmap, - const struct path *parentpath, - struct file *file, umode_t mode) +int vfs_tmpfile(struct mnt_idmap *idmap, + const struct path *parentpath, + struct file *file, umode_t mode) { struct dentry *child; struct inode *dir = d_inode(parentpath->dentry); diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 0f8b4a719237..c614e923b143 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "overlayfs.h" static unsigned short ovl_redirect_max = 256; @@ -260,14 +261,13 @@ static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) * may not use to instantiate the new dentry. */ static int ovl_instantiate(struct dentry *dentry, struct inode *inode, - struct dentry *newdentry, bool hardlink) + struct dentry *newdentry, bool hardlink, struct file *tmpfile) { struct ovl_inode_params oip = { .upperdentry = newdentry, .newinode = inode, }; - ovl_dir_modified(dentry->d_parent, false); ovl_dentry_set_upper_alias(dentry); ovl_dentry_init_reval(dentry, newdentry, NULL); @@ -295,6 +295,9 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode, inc_nlink(inode); } + if (tmpfile) + d_mark_tmpfile(tmpfile, inode); + d_instantiate(dentry, inode); if (inode != oip.newinode) { pr_warn_ratelimited("newly created inode found in cache (%pd2)\n", @@ -345,7 +348,8 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode, ovl_set_opaque(dentry, newdentry); } - err = ovl_instantiate(dentry, inode, newdentry, !!attr->hardlink); + ovl_dir_modified(dentry->d_parent, false); + err = ovl_instantiate(dentry, inode, newdentry, !!attr->hardlink, NULL); if (err) goto out_cleanup; out_unlock: @@ -529,7 +533,8 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, if (err) goto out_cleanup; } - err = ovl_instantiate(dentry, inode, newdentry, hardlink); + ovl_dir_modified(dentry->d_parent, false); + err = ovl_instantiate(dentry, inode, newdentry, hardlink, NULL); if (err) { ovl_cleanup(ofs, udir, newdentry); dput(newdentry); @@ -551,12 +556,35 @@ out_cleanup: goto out_dput; } +static int ovl_setup_cred_for_create(struct dentry *dentry, struct inode *inode, + umode_t mode, const struct cred *old_cred) +{ + int err; + struct cred *override_cred; + + override_cred = prepare_creds(); + if (!override_cred) + return -ENOMEM; + + override_cred->fsuid = inode->i_uid; + override_cred->fsgid = inode->i_gid; + err = security_dentry_create_files_as(dentry, mode, &dentry->d_name, + old_cred, override_cred); + if (err) { + put_cred(override_cred); + return err; + } + put_cred(override_creds(override_cred)); + put_cred(override_cred); + + return 0; +} + static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, struct ovl_cattr *attr, bool origin) { int err; const struct cred *old_cred; - struct cred *override_cred; struct dentry *parent = dentry->d_parent; old_cred = ovl_override_creds(dentry->d_sb); @@ -572,10 +600,6 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, } if (!attr->hardlink) { - err = -ENOMEM; - override_cred = prepare_creds(); - if (!override_cred) - goto out_revert_creds; /* * In the creation cases(create, mkdir, mknod, symlink), * ovl should transfer current's fs{u,g}id to underlying @@ -589,17 +613,9 @@ static int ovl_create_or_link(struct dentry *dentry, struct inode *inode, * create a new inode, so just use the ovl mounter's * fs{u,g}id. */ - override_cred->fsuid = inode->i_uid; - override_cred->fsgid = inode->i_gid; - err = security_dentry_create_files_as(dentry, - attr->mode, &dentry->d_name, old_cred, - override_cred); - if (err) { - put_cred(override_cred); + err = ovl_setup_cred_for_create(dentry, inode, attr->mode, old_cred); + if (err) goto out_revert_creds; - } - put_cred(override_creds(override_cred)); - put_cred(override_cred); } if (!ovl_dentry_is_whiteout(dentry)) @@ -1290,6 +1306,100 @@ out: return err; } +static int ovl_create_tmpfile(struct file *file, struct dentry *dentry, + struct inode *inode, umode_t mode) +{ + const struct cred *old_cred; + struct path realparentpath; + struct file *realfile; + struct dentry *newdentry; + /* It's okay to set O_NOATIME, since the owner will be current fsuid */ + int flags = file->f_flags | OVL_OPEN_FLAGS; + int err; + + err = ovl_copy_up(dentry->d_parent); + if (err) + return err; + + old_cred = ovl_override_creds(dentry->d_sb); + err = ovl_setup_cred_for_create(dentry, inode, mode, old_cred); + if (err) + goto out_revert_creds; + + ovl_path_upper(dentry->d_parent, &realparentpath); + realfile = backing_tmpfile_open(&file->f_path, flags, &realparentpath, + mode, current_cred()); + err = PTR_ERR_OR_ZERO(realfile); + pr_debug("tmpfile/open(%pd2, 0%o) = %i\n", realparentpath.dentry, mode, err); + if (err) + goto out_revert_creds; + + /* ovl_instantiate() consumes the newdentry reference on success */ + newdentry = dget(realfile->f_path.dentry); + err = ovl_instantiate(dentry, inode, newdentry, false, file); + if (!err) { + file->private_data = realfile; + } else { + dput(newdentry); + fput(realfile); + } +out_revert_creds: + revert_creds(old_cred); + return err; +} + +static int ovl_dummy_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int ovl_tmpfile(struct mnt_idmap *idmap, struct inode *dir, + struct file *file, umode_t mode) +{ + int err; + struct dentry *dentry = file->f_path.dentry; + struct inode *inode; + + if (!OVL_FS(dentry->d_sb)->tmpfile) + return -EOPNOTSUPP; + + err = ovl_want_write(dentry); + if (err) + return err; + + err = -ENOMEM; + inode = ovl_new_inode(dentry->d_sb, mode, 0); + if (!inode) + goto drop_write; + + inode_init_owner(&nop_mnt_idmap, inode, dir, mode); + err = ovl_create_tmpfile(file, dentry, inode, inode->i_mode); + if (err) + goto put_inode; + + /* + * Check if the preallocated inode was actually used. Having something + * else assigned to the dentry shouldn't happen as that would indicate + * that the backing tmpfile "leaked" out of overlayfs. + */ + err = -EIO; + if (WARN_ON(inode != d_inode(dentry))) + goto put_realfile; + + /* inode reference was transferred to dentry */ + inode = NULL; + err = finish_open(file, dentry, ovl_dummy_open); +put_realfile: + /* Without FMODE_OPENED ->release() won't be called on @file */ + if (!(file->f_mode & FMODE_OPENED)) + fput(file->private_data); +put_inode: + iput(inode); +drop_write: + ovl_drop_write(dentry); + return err; +} + const struct inode_operations ovl_dir_inode_operations = { .lookup = ovl_lookup, .mkdir = ovl_mkdir, @@ -1310,4 +1420,5 @@ const struct inode_operations ovl_dir_inode_operations = { .update_time = ovl_update_time, .fileattr_get = ovl_fileattr_get, .fileattr_set = ovl_fileattr_set, + .tmpfile = ovl_tmpfile, }; diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c index 05536964d37f..1a411cae57ed 100644 --- a/fs/overlayfs/file.c +++ b/fs/overlayfs/file.c @@ -24,9 +24,6 @@ static char ovl_whatisit(struct inode *inode, struct inode *realinode) return 'm'; } -/* No atime modification on underlying */ -#define OVL_OPEN_FLAGS (O_NOATIME) - static struct file *ovl_open_realfile(const struct file *file, const struct path *realpath) { diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index ee949f3e7c77..0bfe35da4b7b 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -175,6 +175,9 @@ static inline int ovl_metadata_digest_size(const struct ovl_metacopy *metacopy) return (int)metacopy->len - OVL_METACOPY_MIN_SIZE; } +/* No atime modification on underlying */ +#define OVL_OPEN_FLAGS (O_NOATIME) + extern const char *const ovl_xattr_table[][2]; static inline const char *ovl_xattr(struct ovl_fs *ofs, enum ovl_xattr ox) { diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h index 3f1fe1774f1b..4b61b0e57720 100644 --- a/include/linux/backing-file.h +++ b/include/linux/backing-file.h @@ -22,6 +22,9 @@ struct backing_file_ctx { struct file *backing_file_open(const struct path *user_path, int flags, const struct path *real_path, const struct cred *cred); +struct file *backing_tmpfile_open(const struct path *user_path, int flags, + const struct path *real_parentpath, + umode_t mode, const struct cred *cred); ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, struct kiocb *iocb, int flags, struct backing_file_ctx *ctx); -- cgit From 55111470b4416a6225abfdb4e1f0b8a8c8c52fdc Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 1 May 2024 11:40:23 -0700 Subject: cxl/cper: Fix non-ACPI-APEI-GHES build If ACPI_APEI_GHES is not configured the [un]register work functions are not properly declared. 0day notices that the cxl_cper_register_work() declaration in the CONFIG_ACPI_APEI_GHES=n is broken, fix it to be typical nop stub. Reported-by: kernel test robot Closes: http://lore.kernel.org/r/202405012230.6kXItWen-lkp@intel.com Reviewed-by: Dan Williams Signed-off-by: Ira Weiny Link: https://lore.kernel.org/r/20240501-cper-fix-0day-v1-1-c0b0056eafbc@intel.com Signed-off-by: Dave Jiang --- include/linux/cxl-event.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cxl-event.h b/include/linux/cxl-event.h index 855908792ef8..60b25020281f 100644 --- a/include/linux/cxl-event.h +++ b/include/linux/cxl-event.h @@ -164,12 +164,12 @@ int cxl_cper_register_work(struct work_struct *work); int cxl_cper_unregister_work(struct work_struct *work); int cxl_cper_kfifo_get(struct cxl_cper_work_data *wd); #else -static inline int cxl_cper_register_work(struct work_struct *work); +static inline int cxl_cper_register_work(struct work_struct *work) { return 0; } -static inline int cxl_cper_unregister_work(struct work_struct *work); +static inline int cxl_cper_unregister_work(struct work_struct *work) { return 0; } -- cgit From 798cb7f9aec35460c383eab57b9fa474d999a2eb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 17 Apr 2024 18:26:54 -0400 Subject: swapon(2)/swapoff(2): don't bother with block size once upon a time that used to matter; these days we do swap IO for swap devices at the level that doesn't give a damn about block size, buffer_head or anything of that sort - just attach the page to bio, set the location and size (the latter to PAGE_SIZE) and feed into queue. Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner Signed-off-by: Al Viro --- include/linux/swap.h | 1 - mm/swapfile.c | 12 +----------- 2 files changed, 1 insertion(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index f53d608daa01..a5b640cca459 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -301,7 +301,6 @@ struct swap_info_struct { struct file *bdev_file; /* open handle of the bdev */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ - unsigned int old_block_size; /* seldom referenced */ struct completion comp; /* seldom referenced */ spinlock_t lock; /* * protect map scan related fields like diff --git a/mm/swapfile.c b/mm/swapfile.c index 4919423cce76..304f74d039f3 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2417,7 +2417,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) struct inode *inode; struct filename *pathname; int err, found = 0; - unsigned int old_block_size; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -2529,7 +2528,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) } swap_file = p->swap_file; - old_block_size = p->old_block_size; p->swap_file = NULL; p->max = 0; swap_map = p->swap_map; @@ -2553,7 +2551,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) inode = mapping->host; if (p->bdev_file) { - set_blocksize(p->bdev, old_block_size); fput(p->bdev_file); p->bdev_file = NULL; } @@ -2782,21 +2779,15 @@ static struct swap_info_struct *alloc_swap_info(void) static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) { - int error; - if (S_ISBLK(inode->i_mode)) { p->bdev_file = bdev_file_open_by_dev(inode->i_rdev, BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL); if (IS_ERR(p->bdev_file)) { - error = PTR_ERR(p->bdev_file); + int error = PTR_ERR(p->bdev_file); p->bdev_file = NULL; return error; } p->bdev = file_bdev(p->bdev_file); - p->old_block_size = block_size(p->bdev); - error = set_blocksize(p->bdev, PAGE_SIZE); - if (error < 0) - return error; /* * Zoned block devices contain zones that have a sequential * write only restriction. Hence zoned block devices are not @@ -3235,7 +3226,6 @@ bad_swap: free_percpu(p->cluster_next_cpu); p->cluster_next_cpu = NULL; if (p->bdev_file) { - set_blocksize(p->bdev, p->old_block_size); fput(p->bdev_file); p->bdev_file = NULL; } -- cgit From 51d908b3db0e588aeb2d06df37e4df3fb1754bb5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 17 Apr 2024 18:33:34 -0400 Subject: swapon(2): open swap with O_EXCL ... eliminating the need to reopen block devices so they could be exclusively held. Reviewed-by: Christoph Hellwig Reviewed-by: Christian Brauner Signed-off-by: Al Viro --- include/linux/swap.h | 1 - mm/swapfile.c | 19 ++----------------- 2 files changed, 2 insertions(+), 18 deletions(-) (limited to 'include/linux') diff --git a/include/linux/swap.h b/include/linux/swap.h index a5b640cca459..7e61a4aef2fc 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -298,7 +298,6 @@ struct swap_info_struct { unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ - struct file *bdev_file; /* open handle of the bdev */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ struct completion comp; /* seldom referenced */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 304f74d039f3..7bba0b0d4924 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -2550,10 +2550,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) exit_swap_address_space(p->type); inode = mapping->host; - if (p->bdev_file) { - fput(p->bdev_file); - p->bdev_file = NULL; - } inode_lock(inode); inode->i_flags &= ~S_SWAPFILE; @@ -2780,14 +2776,7 @@ static struct swap_info_struct *alloc_swap_info(void) static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) { if (S_ISBLK(inode->i_mode)) { - p->bdev_file = bdev_file_open_by_dev(inode->i_rdev, - BLK_OPEN_READ | BLK_OPEN_WRITE, p, NULL); - if (IS_ERR(p->bdev_file)) { - int error = PTR_ERR(p->bdev_file); - p->bdev_file = NULL; - return error; - } - p->bdev = file_bdev(p->bdev_file); + p->bdev = I_BDEV(inode); /* * Zoned block devices contain zones that have a sequential * write only restriction. Hence zoned block devices are not @@ -3028,7 +3017,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) name = NULL; goto bad_swap; } - swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0); + swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0); if (IS_ERR(swap_file)) { error = PTR_ERR(swap_file); swap_file = NULL; @@ -3225,10 +3214,6 @@ bad_swap: p->percpu_cluster = NULL; free_percpu(p->cluster_next_cpu); p->cluster_next_cpu = NULL; - if (p->bdev_file) { - fput(p->bdev_file); - p->bdev_file = NULL; - } inode = NULL; destroy_swap_extents(p); swap_cgroup_swapoff(p->type); -- cgit From ead083aeeed9df44fab9227e47688f7305c3a233 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 18 Apr 2024 00:34:31 -0400 Subject: set_blocksize(): switch to passing struct file * Signed-off-by: Al Viro --- block/bdev.c | 11 +++++++---- block/ioctl.c | 21 ++++++++++++--------- drivers/block/pktcdvd.c | 2 +- fs/btrfs/dev-replace.c | 2 +- fs/btrfs/volumes.c | 4 ++-- fs/ext4/super.c | 2 +- fs/reiserfs/journal.c | 5 ++--- fs/xfs/xfs_buf.c | 2 +- include/linux/blkdev.h | 2 +- 9 files changed, 28 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index b8e32d933a63..a329ff9be11d 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -144,8 +144,11 @@ static void set_init_blocksize(struct block_device *bdev) bdev->bd_inode->i_blkbits = blksize_bits(bsize); } -int set_blocksize(struct block_device *bdev, int size) +int set_blocksize(struct file *file, int size) { + struct inode *inode = file->f_mapping->host; + struct block_device *bdev = I_BDEV(inode); + /* Size must be a power of two, and between 512 and PAGE_SIZE */ if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size)) return -EINVAL; @@ -155,9 +158,9 @@ int set_blocksize(struct block_device *bdev, int size) return -EINVAL; /* Don't change the size if it is same as current */ - if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { + if (inode->i_blkbits != blksize_bits(size)) { sync_blockdev(bdev); - bdev->bd_inode->i_blkbits = blksize_bits(size); + inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); } return 0; @@ -167,7 +170,7 @@ EXPORT_SYMBOL(set_blocksize); int sb_set_blocksize(struct super_block *sb, int size) { - if (set_blocksize(sb->s_bdev, size)) + if (set_blocksize(sb->s_bdev_file, size)) return 0; /* If we get here, we know size is power of two * and it's value is between 512 and PAGE_SIZE */ diff --git a/block/ioctl.c b/block/ioctl.c index a9028a2c2db5..1c800364bc70 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -473,11 +473,14 @@ static int compat_hdio_getgeo(struct block_device *bdev, #endif /* set the logical block size */ -static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode, +static int blkdev_bszset(struct file *file, blk_mode_t mode, int __user *argp) { + // this one might be file_inode(file)->i_rdev - a rare valid + // use of file_inode() for those. + dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev; + struct file *excl_file; int ret, n; - struct file *file; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -487,13 +490,13 @@ static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode, return -EFAULT; if (mode & BLK_OPEN_EXCL) - return set_blocksize(bdev, n); + return set_blocksize(file, n); - file = bdev_file_open_by_dev(bdev->bd_dev, mode, &bdev, NULL); - if (IS_ERR(file)) + excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL); + if (IS_ERR(excl_file)) return -EBUSY; - ret = set_blocksize(bdev, n); - fput(file); + ret = set_blocksize(excl_file, n); + fput(excl_file); return ret; } @@ -621,7 +624,7 @@ long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ return put_int(argp, block_size(bdev)); case BLKBSZSET: - return blkdev_bszset(bdev, mode, argp); + return blkdev_bszset(file, mode, argp); case BLKGETSIZE64: return put_u64(argp, bdev_nr_bytes(bdev)); @@ -681,7 +684,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ return put_int(argp, bdev_logical_block_size(bdev)); case BLKBSZSET_32: - return blkdev_bszset(bdev, mode, argp); + return blkdev_bszset(file, mode, argp); case BLKGETSIZE64_32: return put_u64(argp, bdev_nr_bytes(bdev)); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 05933f25b397..8a2ce8070010 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -2215,7 +2215,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, bool write) } dev_info(ddev, "%lukB available on disc\n", lba << 1); } - set_blocksize(file_bdev(bdev_file), CD_FRAMESIZE); + set_blocksize(bdev_file, CD_FRAMESIZE); return 0; diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 7696beec4c21..7130040d92ab 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -316,7 +316,7 @@ static int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); set_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); device->dev_stats_valid = 1; - set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); + set_blocksize(bdev_file, BTRFS_BDEV_BLOCKSIZE); device->fs_devices = fs_devices; ret = btrfs_get_dev_zone_info(device, false); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 43af5a9fb547..65c03ddecc59 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -483,7 +483,7 @@ btrfs_get_bdev_and_sb(const char *device_path, blk_mode_t flags, void *holder, if (flush) sync_blockdev(bdev); if (holder) { - ret = set_blocksize(bdev, BTRFS_BDEV_BLOCKSIZE); + ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE); if (ret) { fput(*bdev_file); goto error; @@ -2717,7 +2717,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state); clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state); device->dev_stats_valid = 1; - set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE); + set_blocksize(device->bdev_file, BTRFS_BDEV_BLOCKSIZE); if (seeding_dev) { btrfs_clear_sb_rdonly(sb); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 044135796f2b..9988b3a40b42 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -5873,7 +5873,7 @@ static struct file *ext4_get_journal_blkdev(struct super_block *sb, sb_block = EXT4_MIN_BLOCK_SIZE / blocksize; offset = EXT4_MIN_BLOCK_SIZE % blocksize; - set_blocksize(bdev, blocksize); + set_blocksize(bdev_file, blocksize); bh = __bread(bdev, sb_block, blocksize); if (!bh) { ext4_msg(sb, KERN_ERR, "couldn't read superblock of " diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index e539ccd39e1e..e477ee0ff35d 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2626,8 +2626,7 @@ static int journal_init_dev(struct super_block *super, MAJOR(jdev), MINOR(jdev), result); return result; } else if (jdev != super->s_dev) - set_blocksize(file_bdev(journal->j_bdev_file), - super->s_blocksize); + set_blocksize(journal->j_bdev_file, super->s_blocksize); return 0; } @@ -2643,7 +2642,7 @@ static int journal_init_dev(struct super_block *super, return result; } - set_blocksize(file_bdev(journal->j_bdev_file), super->s_blocksize); + set_blocksize(journal->j_bdev_file, super->s_blocksize); reiserfs_info(super, "journal_init_dev: journal device: %pg\n", file_bdev(journal->j_bdev_file)); diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index f0fa02264eda..2dc0eacb0999 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -2043,7 +2043,7 @@ xfs_setsize_buftarg( btp->bt_meta_sectorsize = sectorsize; btp->bt_meta_sectormask = sectorsize - 1; - if (set_blocksize(btp->bt_bdev, sectorsize)) { + if (set_blocksize(btp->bt_bdev_file, sectorsize)) { xfs_warn(btp->bt_mount, "Cannot set_blocksize to %u on device %pg", sectorsize, btp->bt_bdev); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 172c91879999..20c749b2ebc2 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1474,7 +1474,7 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) } int bdev_read_only(struct block_device *bdev); -int set_blocksize(struct block_device *bdev, int size); +int set_blocksize(struct file *file, int size); int lookup_bdev(const char *pathname, dev_t *dev); -- cgit From 3f9b8fb46e5d20eac314f56747e24e1a4e74539d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 12 Apr 2024 00:54:19 -0400 Subject: Use bdev_is_paritition() instead of open-coding it Reviewed-by: Christoph Hellwig Signed-off-by: Al Viro --- block/blk-core.c | 5 +++-- block/blk-mq.c | 2 +- include/linux/part_stat.h | 2 +- lib/vsprintf.c | 2 +- 4 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index a16b5abdbbf5..20322abc6082 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -759,7 +759,8 @@ void submit_bio_noacct(struct bio *bio) if (!bio_flagged(bio, BIO_REMAPPED)) { if (unlikely(bio_check_eod(bio))) goto end_io; - if (bdev->bd_partno && unlikely(blk_partition_remap(bio))) + if (bdev_is_partition(bdev) && + unlikely(blk_partition_remap(bio))) goto end_io; } @@ -989,7 +990,7 @@ again: if (likely(try_cmpxchg(&part->bd_stamp, &stamp, now))) __part_stat_add(part, io_ticks, end ? now - stamp : 1); } - if (part->bd_partno) { + if (bdev_is_partition(part)) { part = bdev_whole(part); goto again; } diff --git a/block/blk-mq.c b/block/blk-mq.c index 32afb87efbd0..43bb8f50a07c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -92,7 +92,7 @@ static bool blk_mq_check_inflight(struct request *rq, void *priv) struct mq_inflight *mi = priv; if (rq->part && blk_do_io_stat(rq) && - (!mi->part->bd_partno || rq->part == mi->part) && + (!bdev_is_partition(mi->part) || rq->part == mi->part) && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) mi->inflight[rq_data_dir(rq)]++; diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index abeba356bc3f..ac8c44dd8237 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -59,7 +59,7 @@ static inline void part_stat_set_all(struct block_device *part, int value) #define part_stat_add(part, field, addnd) do { \ __part_stat_add((part), field, addnd); \ - if ((part)->bd_partno) \ + if (bdev_is_partition(part)) \ __part_stat_add(bdev_whole(part), field, addnd); \ } while (0) diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 552738f14275..3f9f1b959ef0 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -966,7 +966,7 @@ char *bdev_name(char *buf, char *end, struct block_device *bdev, hd = bdev->bd_disk; buf = string(buf, end, hd->disk_name, spec); - if (bdev->bd_partno) { + if (bdev_is_partition(bdev)) { if (isdigit(hd->disk_name[strlen(hd->disk_name)-1])) { if (buf < end) *buf = 'p'; -- cgit From b8c873edbf35570b93edfeddad9e85da54defa52 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 12 Apr 2024 01:01:36 -0400 Subject: wrapper for access to ->bd_partno On the next step it's going to get folded into a field where flags will go. Signed-off-by: Al Viro --- block/early-lookup.c | 2 +- block/partitions/core.c | 12 ++++++------ include/linux/blkdev.h | 7 ++++++- lib/vsprintf.c | 2 +- 4 files changed, 14 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/block/early-lookup.c b/block/early-lookup.c index 3effbd0d35e9..3fb57f7d2b12 100644 --- a/block/early-lookup.c +++ b/block/early-lookup.c @@ -78,7 +78,7 @@ static int __init devt_from_partuuid(const char *uuid_str, dev_t *devt) * to the partition number found by UUID. */ *devt = part_devt(dev_to_disk(dev), - dev_to_bdev(dev)->bd_partno + offset); + bdev_partno(dev_to_bdev(dev)) + offset); } else { *devt = dev->devt; } diff --git a/block/partitions/core.c b/block/partitions/core.c index b11e88c82c8c..edd5309dc4ba 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -173,7 +173,7 @@ static struct parsed_partitions *check_partition(struct gendisk *hd) static ssize_t part_partition_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_partno); + return sprintf(buf, "%d\n", bdev_partno(dev_to_bdev(dev))); } static ssize_t part_start_show(struct device *dev, @@ -250,7 +250,7 @@ static int part_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct block_device *part = dev_to_bdev(dev); - add_uevent_var(env, "PARTN=%u", part->bd_partno); + add_uevent_var(env, "PARTN=%u", bdev_partno(part)); if (part->bd_meta_info && part->bd_meta_info->volname[0]) add_uevent_var(env, "PARTNAME=%s", part->bd_meta_info->volname); return 0; @@ -267,7 +267,7 @@ void drop_partition(struct block_device *part) { lockdep_assert_held(&part->bd_disk->open_mutex); - xa_erase(&part->bd_disk->part_tbl, part->bd_partno); + xa_erase(&part->bd_disk->part_tbl, bdev_partno(part)); kobject_put(part->bd_holder_dir); device_del(&part->bd_device); @@ -338,8 +338,8 @@ static struct block_device *add_partition(struct gendisk *disk, int partno, pdev->parent = ddev; /* in consecutive minor range? */ - if (bdev->bd_partno < disk->minors) { - devt = MKDEV(disk->major, disk->first_minor + bdev->bd_partno); + if (bdev_partno(bdev) < disk->minors) { + devt = MKDEV(disk->major, disk->first_minor + bdev_partno(bdev)); } else { err = blk_alloc_ext_minor(); if (err < 0) @@ -404,7 +404,7 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start, rcu_read_lock(); xa_for_each_start(&disk->part_tbl, idx, part, 1) { - if (part->bd_partno != skip_partno && + if (bdev_partno(part) != skip_partno && start < part->bd_start_sect + bdev_nr_sectors(part) && start + length > part->bd_start_sect) { overlap = true; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index c3e8f7cf96be..32549d675955 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -720,6 +720,11 @@ void invalidate_disk(struct gendisk *disk); void set_disk_ro(struct gendisk *disk, bool read_only); void disk_uevent(struct gendisk *disk, enum kobject_action action); +static inline u8 bdev_partno(const struct block_device *bdev) +{ + return bdev->bd_partno; +} + static inline int get_disk_ro(struct gendisk *disk) { return disk->part0->bd_read_only || @@ -1095,7 +1100,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, static inline bool bdev_is_partition(struct block_device *bdev) { - return bdev->bd_partno; + return bdev_partno(bdev) != 0; } enum blk_default_limits { diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 3f9f1b959ef0..cdd4e2314bfc 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -972,7 +972,7 @@ char *bdev_name(char *buf, char *end, struct block_device *bdev, *buf = 'p'; buf++; } - buf = number(buf, end, bdev->bd_partno, spec); + buf = number(buf, end, bdev_partno(bdev), spec); } return buf; } -- cgit From b478e162f227d17d6a29f40ffd464af358334971 Mon Sep 17 00:00:00 2001 From: Ilpo Järvinen Date: Fri, 22 Mar 2024 14:39:51 +0200 Subject: PCI/ASPM: Consolidate link state defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The linux/pci.h and aspm.c files define their own sets of link state related defines which are almost the same. Consolidate the use of defines into those defined by linux/pci.h and expand PCIE_LINK_STATE_L0S to match earlier ASPM_STATE_L0S that includes both upstream and downstream bits. Rename also the defines that are internal to aspm.c to start with PCIE_LINK_STATE for consistency. While the PCIE_LINK_STATE_L0S BIT(0) -> (BIT(0) | BIT(1)) transformation is not 1:1, in practice aspm.c already used ASPM_STATE_L0S that has both bits enabled except during mapping. While at it, place the PCIE_LINK_STATE_CLKPM define last to have more logical grouping. Use static_assert() to ensure PCIE_LINK_STATE_L0S is strictly equal to the combination of PCIE_LINK_STATE_L0S_UP/DW. Link: https://lore.kernel.org/r/20240322123952.6384-2-ilpo.jarvinen@linux.intel.com Signed-off-by: Ilpo Järvinen Signed-off-by: Bjorn Helgaas --- drivers/pci/pcie/aspm.c | 155 ++++++++++++++++++++++++------------------------ include/linux/pci.h | 24 ++++---- 2 files changed, 91 insertions(+), 88 deletions(-) (limited to 'include/linux') diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 2428d278e015..994b283b6a0f 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -8,6 +8,8 @@ */ #include +#include +#include #include #include #include @@ -189,21 +191,18 @@ void pci_restore_aspm_l1ss_state(struct pci_dev *pdev) #endif #define MODULE_PARAM_PREFIX "pcie_aspm." -/* Note: those are not register definitions */ -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */ -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */ -#define ASPM_STATE_L1 (4) /* L1 state */ -#define ASPM_STATE_L1_1 (8) /* ASPM L1.1 state */ -#define ASPM_STATE_L1_2 (0x10) /* ASPM L1.2 state */ -#define ASPM_STATE_L1_1_PCIPM (0x20) /* PCI PM L1.1 state */ -#define ASPM_STATE_L1_2_PCIPM (0x40) /* PCI PM L1.2 state */ -#define ASPM_STATE_L1_SS_PCIPM (ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1_2_PCIPM) -#define ASPM_STATE_L1_2_MASK (ASPM_STATE_L1_2 | ASPM_STATE_L1_2_PCIPM) -#define ASPM_STATE_L1SS (ASPM_STATE_L1_1 | ASPM_STATE_L1_1_PCIPM |\ - ASPM_STATE_L1_2_MASK) -#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW) -#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \ - ASPM_STATE_L1SS) +/* Note: these are not register definitions */ +#define PCIE_LINK_STATE_L0S_UP BIT(0) /* Upstream direction L0s state */ +#define PCIE_LINK_STATE_L0S_DW BIT(1) /* Downstream direction L0s state */ +static_assert(PCIE_LINK_STATE_L0S == (PCIE_LINK_STATE_L0S_UP | PCIE_LINK_STATE_L0S_DW)); + +#define PCIE_LINK_STATE_L1_SS_PCIPM (PCIE_LINK_STATE_L1_1_PCIPM |\ + PCIE_LINK_STATE_L1_2_PCIPM) +#define PCIE_LINK_STATE_L1_2_MASK (PCIE_LINK_STATE_L1_2 |\ + PCIE_LINK_STATE_L1_2_PCIPM) +#define PCIE_LINK_STATE_L1SS (PCIE_LINK_STATE_L1_1 |\ + PCIE_LINK_STATE_L1_1_PCIPM |\ + PCIE_LINK_STATE_L1_2_MASK) struct pcie_link_state { struct pci_dev *pdev; /* Upstream component of the Link */ @@ -275,10 +274,10 @@ static int policy_to_aspm_state(struct pcie_link_state *link) return 0; case POLICY_POWERSAVE: /* Enable ASPM L0s/L1 */ - return (ASPM_STATE_L0S | ASPM_STATE_L1); + return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1; case POLICY_POWER_SUPERSAVE: /* Enable Everything */ - return ASPM_STATE_ALL; + return PCIE_LINK_STATE_ASPM_ALL; case POLICY_DEFAULT: return link->aspm_default; } @@ -581,14 +580,14 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) latency_dw_l1 = calc_l1_latency(lnkcap_dw); /* Check upstream direction L0s latency */ - if ((link->aspm_capable & ASPM_STATE_L0S_UP) && + if ((link->aspm_capable & PCIE_LINK_STATE_L0S_UP) && (latency_up_l0s > acceptable_l0s)) - link->aspm_capable &= ~ASPM_STATE_L0S_UP; + link->aspm_capable &= ~PCIE_LINK_STATE_L0S_UP; /* Check downstream direction L0s latency */ - if ((link->aspm_capable & ASPM_STATE_L0S_DW) && + if ((link->aspm_capable & PCIE_LINK_STATE_L0S_DW) && (latency_dw_l0s > acceptable_l0s)) - link->aspm_capable &= ~ASPM_STATE_L0S_DW; + link->aspm_capable &= ~PCIE_LINK_STATE_L0S_DW; /* * Check L1 latency. * Every switch on the path to root complex need 1 @@ -603,9 +602,9 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) * substate latencies (and hence do not do any check). */ latency = max_t(u32, latency_up_l1, latency_dw_l1); - if ((link->aspm_capable & ASPM_STATE_L1) && + if ((link->aspm_capable & PCIE_LINK_STATE_L1) && (latency + l1_switch_latency > acceptable_l1)) - link->aspm_capable &= ~ASPM_STATE_L1; + link->aspm_capable &= ~PCIE_LINK_STATE_L1; l1_switch_latency += NSEC_PER_USEC; link = link->parent; @@ -741,13 +740,13 @@ static void aspm_l1ss_init(struct pcie_link_state *link) child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2; if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1) - link->aspm_support |= ASPM_STATE_L1_1; + link->aspm_support |= PCIE_LINK_STATE_L1_1; if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2) - link->aspm_support |= ASPM_STATE_L1_2; + link->aspm_support |= PCIE_LINK_STATE_L1_2; if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1) - link->aspm_support |= ASPM_STATE_L1_1_PCIPM; + link->aspm_support |= PCIE_LINK_STATE_L1_1_PCIPM; if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2) - link->aspm_support |= ASPM_STATE_L1_2_PCIPM; + link->aspm_support |= PCIE_LINK_STATE_L1_2_PCIPM; if (parent_l1ss_cap) pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, @@ -757,15 +756,15 @@ static void aspm_l1ss_init(struct pcie_link_state *link) &child_l1ss_ctl1); if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1) - link->aspm_enabled |= ASPM_STATE_L1_1; + link->aspm_enabled |= PCIE_LINK_STATE_L1_1; if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2) - link->aspm_enabled |= ASPM_STATE_L1_2; + link->aspm_enabled |= PCIE_LINK_STATE_L1_2; if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1) - link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM; + link->aspm_enabled |= PCIE_LINK_STATE_L1_1_PCIPM; if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) - link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; + link->aspm_enabled |= PCIE_LINK_STATE_L1_2_PCIPM; - if (link->aspm_support & ASPM_STATE_L1_2_MASK) + if (link->aspm_support & PCIE_LINK_STATE_L1_2_MASK) aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap); } @@ -778,8 +777,8 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) if (blacklist) { /* Set enabled/disable so that we will disable ASPM later */ - link->aspm_enabled = ASPM_STATE_ALL; - link->aspm_disable = ASPM_STATE_ALL; + link->aspm_enabled = PCIE_LINK_STATE_ASPM_ALL; + link->aspm_disable = PCIE_LINK_STATE_ASPM_ALL; return; } @@ -814,19 +813,19 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) * support L0s. */ if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S) - link->aspm_support |= ASPM_STATE_L0S; + link->aspm_support |= PCIE_LINK_STATE_L0S; if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) - link->aspm_enabled |= ASPM_STATE_L0S_UP; + link->aspm_enabled |= PCIE_LINK_STATE_L0S_UP; if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S) - link->aspm_enabled |= ASPM_STATE_L0S_DW; + link->aspm_enabled |= PCIE_LINK_STATE_L0S_DW; /* Setup L1 state */ if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1) - link->aspm_support |= ASPM_STATE_L1; + link->aspm_support |= PCIE_LINK_STATE_L1; if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1) - link->aspm_enabled |= ASPM_STATE_L1; + link->aspm_enabled |= PCIE_LINK_STATE_L1; aspm_l1ss_init(link); @@ -876,7 +875,7 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). */ - if (enable_req & (ASPM_STATE_L1_1 | ASPM_STATE_L1_2)) { + if (enable_req & (PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1_2)) { pcie_capability_clear_word(child, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPM_L1); pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, @@ -884,13 +883,13 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) } val = 0; - if (state & ASPM_STATE_L1_1) + if (state & PCIE_LINK_STATE_L1_1) val |= PCI_L1SS_CTL1_ASPM_L1_1; - if (state & ASPM_STATE_L1_2) + if (state & PCIE_LINK_STATE_L1_2) val |= PCI_L1SS_CTL1_ASPM_L1_2; - if (state & ASPM_STATE_L1_1_PCIPM) + if (state & PCIE_LINK_STATE_L1_1_PCIPM) val |= PCI_L1SS_CTL1_PCIPM_L1_1; - if (state & ASPM_STATE_L1_2_PCIPM) + if (state & PCIE_LINK_STATE_L1_2_PCIPM) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ @@ -916,29 +915,29 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) state &= (link->aspm_capable & ~link->aspm_disable); /* Can't enable any substates if L1 is not enabled */ - if (!(state & ASPM_STATE_L1)) - state &= ~ASPM_STATE_L1SS; + if (!(state & PCIE_LINK_STATE_L1)) + state &= ~PCIE_LINK_STATE_L1SS; /* Spec says both ports must be in D0 before enabling PCI PM substates*/ if (parent->current_state != PCI_D0 || child->current_state != PCI_D0) { - state &= ~ASPM_STATE_L1_SS_PCIPM; - state |= (link->aspm_enabled & ASPM_STATE_L1_SS_PCIPM); + state &= ~PCIE_LINK_STATE_L1_SS_PCIPM; + state |= (link->aspm_enabled & PCIE_LINK_STATE_L1_SS_PCIPM); } /* Nothing to do if the link is already in the requested state */ if (link->aspm_enabled == state) return; /* Convert ASPM state to upstream/downstream ASPM register state */ - if (state & ASPM_STATE_L0S_UP) + if (state & PCIE_LINK_STATE_L0S_UP) dwstream |= PCI_EXP_LNKCTL_ASPM_L0S; - if (state & ASPM_STATE_L0S_DW) + if (state & PCIE_LINK_STATE_L0S_DW) upstream |= PCI_EXP_LNKCTL_ASPM_L0S; - if (state & ASPM_STATE_L1) { + if (state & PCIE_LINK_STATE_L1) { upstream |= PCI_EXP_LNKCTL_ASPM_L1; dwstream |= PCI_EXP_LNKCTL_ASPM_L1; } - if (link->aspm_capable & ASPM_STATE_L1SS) + if (link->aspm_capable & PCIE_LINK_STATE_L1SS) pcie_config_aspm_l1ss(link, state); /* @@ -947,11 +946,11 @@ static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state) * upstream component first and then downstream, and vice * versa for disabling ASPM L1. Spec doesn't mention L0S. */ - if (state & ASPM_STATE_L1) + if (state & PCIE_LINK_STATE_L1) pcie_config_aspm_dev(parent, upstream); list_for_each_entry(child, &linkbus->devices, bus_list) pcie_config_aspm_dev(child, dwstream); - if (!(state & ASPM_STATE_L1)) + if (!(state & PCIE_LINK_STATE_L1)) pcie_config_aspm_dev(parent, upstream); link->aspm_enabled = state; @@ -1347,18 +1346,18 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool locked down_read(&pci_bus_sem); mutex_lock(&aspm_lock); if (state & PCIE_LINK_STATE_L0S) - link->aspm_disable |= ASPM_STATE_L0S; + link->aspm_disable |= PCIE_LINK_STATE_L0S; if (state & PCIE_LINK_STATE_L1) /* L1 PM substates require L1 */ - link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS; + link->aspm_disable |= PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L1SS; if (state & PCIE_LINK_STATE_L1_1) - link->aspm_disable |= ASPM_STATE_L1_1; + link->aspm_disable |= PCIE_LINK_STATE_L1_1; if (state & PCIE_LINK_STATE_L1_2) - link->aspm_disable |= ASPM_STATE_L1_2; + link->aspm_disable |= PCIE_LINK_STATE_L1_2; if (state & PCIE_LINK_STATE_L1_1_PCIPM) - link->aspm_disable |= ASPM_STATE_L1_1_PCIPM; + link->aspm_disable |= PCIE_LINK_STATE_L1_1_PCIPM; if (state & PCIE_LINK_STATE_L1_2_PCIPM) - link->aspm_disable |= ASPM_STATE_L1_2_PCIPM; + link->aspm_disable |= PCIE_LINK_STATE_L1_2_PCIPM; pcie_config_aspm_link(link, policy_to_aspm_state(link)); if (state & PCIE_LINK_STATE_CLKPM) @@ -1416,18 +1415,18 @@ static int __pci_enable_link_state(struct pci_dev *pdev, int state, bool locked) mutex_lock(&aspm_lock); link->aspm_default = 0; if (state & PCIE_LINK_STATE_L0S) - link->aspm_default |= ASPM_STATE_L0S; + link->aspm_default |= PCIE_LINK_STATE_L0S; if (state & PCIE_LINK_STATE_L1) - link->aspm_default |= ASPM_STATE_L1; + link->aspm_default |= PCIE_LINK_STATE_L1; /* L1 PM substates require L1 */ if (state & PCIE_LINK_STATE_L1_1) - link->aspm_default |= ASPM_STATE_L1_1 | ASPM_STATE_L1; + link->aspm_default |= PCIE_LINK_STATE_L1_1 | PCIE_LINK_STATE_L1; if (state & PCIE_LINK_STATE_L1_2) - link->aspm_default |= ASPM_STATE_L1_2 | ASPM_STATE_L1; + link->aspm_default |= PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1; if (state & PCIE_LINK_STATE_L1_1_PCIPM) - link->aspm_default |= ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1; + link->aspm_default |= PCIE_LINK_STATE_L1_1_PCIPM | PCIE_LINK_STATE_L1; if (state & PCIE_LINK_STATE_L1_2_PCIPM) - link->aspm_default |= ASPM_STATE_L1_2_PCIPM | ASPM_STATE_L1; + link->aspm_default |= PCIE_LINK_STATE_L1_2_PCIPM | PCIE_LINK_STATE_L1; pcie_config_aspm_link(link, policy_to_aspm_state(link)); link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0; @@ -1563,12 +1562,12 @@ static ssize_t aspm_attr_store_common(struct device *dev, if (state_enable) { link->aspm_disable &= ~state; /* need to enable L1 for substates */ - if (state & ASPM_STATE_L1SS) - link->aspm_disable &= ~ASPM_STATE_L1; + if (state & PCIE_LINK_STATE_L1SS) + link->aspm_disable &= ~PCIE_LINK_STATE_L1; } else { link->aspm_disable |= state; - if (state & ASPM_STATE_L1) - link->aspm_disable |= ASPM_STATE_L1SS; + if (state & PCIE_LINK_STATE_L1) + link->aspm_disable |= PCIE_LINK_STATE_L1SS; } pcie_config_aspm_link(link, policy_to_aspm_state(link)); @@ -1582,12 +1581,12 @@ static ssize_t aspm_attr_store_common(struct device *dev, #define ASPM_ATTR(_f, _s) \ static ssize_t _f##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ -{ return aspm_attr_show_common(dev, attr, buf, ASPM_STATE_##_s); } \ +{ return aspm_attr_show_common(dev, attr, buf, PCIE_LINK_STATE_##_s); } \ \ static ssize_t _f##_store(struct device *dev, \ struct device_attribute *attr, \ const char *buf, size_t len) \ -{ return aspm_attr_store_common(dev, attr, buf, len, ASPM_STATE_##_s); } +{ return aspm_attr_store_common(dev, attr, buf, len, PCIE_LINK_STATE_##_s); } ASPM_ATTR(l0s_aspm, L0S) ASPM_ATTR(l1_aspm, L1) @@ -1654,12 +1653,12 @@ static umode_t aspm_ctrl_attrs_are_visible(struct kobject *kobj, struct pci_dev *pdev = to_pci_dev(dev); struct pcie_link_state *link = pcie_aspm_get_link(pdev); static const u8 aspm_state_map[] = { - ASPM_STATE_L0S, - ASPM_STATE_L1, - ASPM_STATE_L1_1, - ASPM_STATE_L1_2, - ASPM_STATE_L1_1_PCIPM, - ASPM_STATE_L1_2_PCIPM, + PCIE_LINK_STATE_L0S, + PCIE_LINK_STATE_L1, + PCIE_LINK_STATE_L1_1, + PCIE_LINK_STATE_L1_2, + PCIE_LINK_STATE_L1_1_PCIPM, + PCIE_LINK_STATE_L1_2_PCIPM, }; if (aspm_disabled || !link) diff --git a/include/linux/pci.h b/include/linux/pci.h index 16493426a04f..08c68d80c01a 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1821,17 +1821,21 @@ extern bool pcie_ports_native; #define pcie_ports_native false #endif -#define PCIE_LINK_STATE_L0S BIT(0) -#define PCIE_LINK_STATE_L1 BIT(1) -#define PCIE_LINK_STATE_CLKPM BIT(2) -#define PCIE_LINK_STATE_L1_1 BIT(3) -#define PCIE_LINK_STATE_L1_2 BIT(4) -#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) -#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) -#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |\ - PCIE_LINK_STATE_CLKPM | PCIE_LINK_STATE_L1_1 |\ - PCIE_LINK_STATE_L1_2 | PCIE_LINK_STATE_L1_1_PCIPM |\ +#define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */ +#define PCIE_LINK_STATE_L1 BIT(2) /* L1 state */ +#define PCIE_LINK_STATE_L1_1 BIT(3) /* ASPM L1.1 state */ +#define PCIE_LINK_STATE_L1_2 BIT(4) /* ASPM L1.2 state */ +#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5) /* PCI-PM L1.1 state */ +#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6) /* PCI-PM L1.2 state */ +#define PCIE_LINK_STATE_ASPM_ALL (PCIE_LINK_STATE_L0S |\ + PCIE_LINK_STATE_L1 |\ + PCIE_LINK_STATE_L1_1 |\ + PCIE_LINK_STATE_L1_2 |\ + PCIE_LINK_STATE_L1_1_PCIPM |\ PCIE_LINK_STATE_L1_2_PCIPM) +#define PCIE_LINK_STATE_CLKPM BIT(7) +#define PCIE_LINK_STATE_ALL (PCIE_LINK_STATE_ASPM_ALL |\ + PCIE_LINK_STATE_CLKPM) #ifdef CONFIG_PCIEASPM int pci_disable_link_state(struct pci_dev *pdev, int state); -- cgit From 1116b9fa15c09748ae05d2365a305fa22671eb1e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 12 Apr 2024 01:07:29 -0400 Subject: bdev: infrastructure for flags Replace bd_partno with a 32bit field (__bd_flags). The lower 8 bits contain the partition number, the upper 24 are for flags. Helpers: bdev_{test,set,clear}_flag(bdev, flag), with atomic_or() and atomic_andnot() used to set/clear. NOTE: this commit does not actually move any flags over there - they are still bool fields. As the result, it shifts the fields wrt cacheline boundaries; that's going to be restored once the first 3 flags are dealt with. Signed-off-by: Al Viro --- block/bdev.c | 2 +- include/linux/blk_types.h | 3 ++- include/linux/blkdev.h | 17 ++++++++++++++++- 3 files changed, 19 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index 7a5f611c3d2e..2ec223315500 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -411,7 +411,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) mutex_init(&bdev->bd_fsfreeze_mutex); spin_lock_init(&bdev->bd_size_lock); mutex_init(&bdev->bd_holder_lock); - bdev->bd_partno = partno; + atomic_set(&bdev->__bd_flags, partno); bdev->bd_inode = inode; bdev->bd_queue = disk->queue; if (partno) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index cb1526ec44b5..04f92737ab08 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -45,8 +45,9 @@ struct block_device { struct request_queue * bd_queue; struct disk_stats __percpu *bd_stats; unsigned long bd_stamp; + atomic_t __bd_flags; // partition number + flags +#define BD_PARTNO 255 // lower 8 bits; assign-once bool bd_read_only; /* read-only policy */ - u8 bd_partno; bool bd_write_holder; bool bd_has_submit_bio; dev_t bd_dev; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 32549d675955..99917e5860fd 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -722,7 +722,22 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action); static inline u8 bdev_partno(const struct block_device *bdev) { - return bdev->bd_partno; + return atomic_read(&bdev->__bd_flags) & BD_PARTNO; +} + +static inline bool bdev_test_flag(const struct block_device *bdev, unsigned flag) +{ + return atomic_read(&bdev->__bd_flags) & flag; +} + +static inline void bdev_set_flag(struct block_device *bdev, unsigned flag) +{ + atomic_or(flag, &bdev->__bd_flags); +} + +static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) +{ + atomic_andnot(flag, &bdev->__bd_flags); } static inline int get_disk_ro(struct gendisk *disk) -- cgit From 01e198f01d55880b79d8f5ac73064ff30a89d98a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 12 Apr 2024 01:15:55 -0400 Subject: bdev: move ->bd_read_only to ->__bd_flags Signed-off-by: Al Viro --- block/ioctl.c | 5 ++++- include/linux/blk_types.h | 2 +- include/linux/blkdev.h | 4 ++-- 3 files changed, 7 insertions(+), 4 deletions(-) (limited to 'include/linux') diff --git a/block/ioctl.c b/block/ioctl.c index 0c76137adcaa..be173e4ff43d 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -402,7 +402,10 @@ static int blkdev_roset(struct block_device *bdev, unsigned cmd, if (ret) return ret; } - bdev->bd_read_only = n; + if (n) + bdev_set_flag(bdev, BD_READ_ONLY); + else + bdev_clear_flag(bdev, BD_READ_ONLY); return 0; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 04f92737ab08..f70dd31cbcd1 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -47,7 +47,7 @@ struct block_device { unsigned long bd_stamp; atomic_t __bd_flags; // partition number + flags #define BD_PARTNO 255 // lower 8 bits; assign-once - bool bd_read_only; /* read-only policy */ +#define BD_READ_ONLY (1u<<8) // read-only policy bool bd_write_holder; bool bd_has_submit_bio; dev_t bd_dev; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99917e5860fd..1fe91231f85b 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -742,13 +742,13 @@ static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) static inline int get_disk_ro(struct gendisk *disk) { - return disk->part0->bd_read_only || + return bdev_test_flag(disk->part0, BD_READ_ONLY) || test_bit(GD_READ_ONLY, &disk->state); } static inline int bdev_read_only(struct block_device *bdev) { - return bdev->bd_read_only || get_disk_ro(bdev->bd_disk); + return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); } bool set_capacity_and_notify(struct gendisk *disk, sector_t size); -- cgit From 4c80105e3909b3aff634a40daa9c29059ce03d6a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 12 Apr 2024 01:18:24 -0400 Subject: bdev: move ->bd_write_holder into ->__bd_flags Signed-off-by: Al Viro --- block/bdev.c | 9 +++++---- include/linux/blk_types.h | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index 2ec223315500..9df9a59f0900 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -624,7 +624,7 @@ static void bd_end_claim(struct block_device *bdev, void *holder) bdev->bd_holder = NULL; bdev->bd_holder_ops = NULL; mutex_unlock(&bdev->bd_holder_lock); - if (bdev->bd_write_holder) + if (bdev_test_flag(bdev, BD_WRITE_HOLDER)) unblock = true; } if (!whole->bd_holders) @@ -640,7 +640,7 @@ static void bd_end_claim(struct block_device *bdev, void *holder) */ if (unblock) { disk_unblock_events(bdev->bd_disk); - bdev->bd_write_holder = false; + bdev_clear_flag(bdev, BD_WRITE_HOLDER); } } @@ -892,9 +892,10 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, * writeable reference is too fragile given the way @mode is * used in blkdev_get/put(). */ - if ((mode & BLK_OPEN_WRITE) && !bdev->bd_write_holder && + if ((mode & BLK_OPEN_WRITE) && + !bdev_test_flag(bdev, BD_WRITE_HOLDER) && (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) { - bdev->bd_write_holder = true; + bdev_set_flag(bdev, BD_WRITE_HOLDER); unblock_events = false; } } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index f70dd31cbcd1..e45a490d488e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -48,7 +48,7 @@ struct block_device { atomic_t __bd_flags; // partition number + flags #define BD_PARTNO 255 // lower 8 bits; assign-once #define BD_READ_ONLY (1u<<8) // read-only policy - bool bd_write_holder; +#define BD_WRITE_HOLDER (1u<<9) bool bd_has_submit_bio; dev_t bd_dev; struct inode *bd_inode; /* will die */ -- cgit From ac2b6f9dee8f41d5e9162959a8bbd2c8047ee382 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 12 Apr 2024 01:21:45 -0400 Subject: bdev: move ->bd_has_subit_bio to ->__bd_flags In bdev_alloc() we have all flags initialized to false, so assignment to ->bh_has_submit_bio n there is a no-op unless we have partno != 0 and flag already set on entire device. In device_add_disk() we have just allocated the block_device in question and it had been a full-device one, so the flag is guaranteed to be still clear when we get to assignment. Signed-off-by: Al Viro --- block/bdev.c | 6 ++---- block/blk-core.c | 4 ++-- block/genhd.c | 3 ++- include/linux/blk_types.h | 2 +- 4 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index 9df9a59f0900..fae30eae7741 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -414,10 +414,8 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) atomic_set(&bdev->__bd_flags, partno); bdev->bd_inode = inode; bdev->bd_queue = disk->queue; - if (partno) - bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio; - else - bdev->bd_has_submit_bio = false; + if (partno && bdev_test_flag(disk->part0, BD_HAS_SUBMIT_BIO)) + bdev_set_flag(bdev, BD_HAS_SUBMIT_BIO); bdev->bd_stats = alloc_percpu(struct disk_stats); if (!bdev->bd_stats) { iput(inode); diff --git a/block/blk-core.c b/block/blk-core.c index 20322abc6082..f61460b65408 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -615,7 +615,7 @@ static void __submit_bio(struct bio *bio) if (unlikely(!blk_crypto_bio_prep(&bio))) return; - if (!bio->bi_bdev->bd_has_submit_bio) { + if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) { blk_mq_submit_bio(bio); } else if (likely(bio_queue_enter(bio) == 0)) { struct gendisk *disk = bio->bi_bdev->bd_disk; @@ -723,7 +723,7 @@ void submit_bio_noacct_nocheck(struct bio *bio) */ if (current->bio_list) bio_list_add(¤t->bio_list[0], bio); - else if (!bio->bi_bdev->bd_has_submit_bio) + else if (!bdev_test_flag(bio->bi_bdev, BD_HAS_SUBMIT_BIO)) __submit_bio_noacct_mq(bio); else __submit_bio_noacct(bio); diff --git a/block/genhd.c b/block/genhd.c index bb29a68e1d67..19cd1a31fa80 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -413,7 +413,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, elevator_init_mq(disk->queue); /* Mark bdev as having a submit_bio, if needed */ - disk->part0->bd_has_submit_bio = disk->fops->submit_bio != NULL; + if (disk->fops->submit_bio) + bdev_set_flag(disk->part0, BD_HAS_SUBMIT_BIO); /* * If the driver provides an explicit major number it also must provide diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index e45a490d488e..11b9e8eeb79f 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -49,7 +49,7 @@ struct block_device { #define BD_PARTNO 255 // lower 8 bits; assign-once #define BD_READ_ONLY (1u<<8) // read-only policy #define BD_WRITE_HOLDER (1u<<9) - bool bd_has_submit_bio; +#define BD_HAS_SUBMIT_BIO (1u<<10) dev_t bd_dev; struct inode *bd_inode; /* will die */ -- cgit From 49a43dae93c8b0ee5c9797f7f407d1244dea8213 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 12 Apr 2024 01:24:27 -0400 Subject: bdev: move ->bd_ro_warned to ->__bd_flags Signed-off-by: Al Viro --- block/blk-core.c | 5 +++-- include/linux/blk_types.h | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index f61460b65408..1be49be9fac4 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -514,10 +514,11 @@ static inline void bio_check_ro(struct bio *bio) if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) return; - if (bio->bi_bdev->bd_ro_warned) + if (bdev_test_flag(bio->bi_bdev, BD_RO_WARNED)) return; - bio->bi_bdev->bd_ro_warned = true; + bdev_set_flag(bio->bi_bdev, BD_RO_WARNED); + /* * Use ioctl to set underlying disk of raid/dm to read-only * will trigger this. diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 11b9e8eeb79f..4e0c8785090c 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -50,6 +50,7 @@ struct block_device { #define BD_READ_ONLY (1u<<8) // read-only policy #define BD_WRITE_HOLDER (1u<<9) #define BD_HAS_SUBMIT_BIO (1u<<10) +#define BD_RO_WARNED (1u<<11) dev_t bd_dev; struct inode *bd_inode; /* will die */ @@ -69,7 +70,6 @@ struct block_device { #ifdef CONFIG_FAIL_MAKE_REQUEST bool bd_make_it_fail; #endif - bool bd_ro_warned; int bd_writers; /* * keep this out-of-line as it's both big and not needed in the fast -- cgit From 811ba89a8838e7c43ff46b6210ba1878bfe4437e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 28 Apr 2024 00:15:07 -0400 Subject: bdev: move ->bd_make_it_fail to ->__bd_flags Signed-off-by: Al Viro --- block/blk-core.c | 3 ++- block/genhd.c | 12 ++++++++---- include/linux/blk_types.h | 6 +++--- 3 files changed, 13 insertions(+), 8 deletions(-) (limited to 'include/linux') diff --git a/block/blk-core.c b/block/blk-core.c index 1be49be9fac4..1076336dd620 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -494,7 +494,8 @@ __setup("fail_make_request=", setup_fail_make_request); bool should_fail_request(struct block_device *part, unsigned int bytes) { - return part->bd_make_it_fail && should_fail(&fail_make_request, bytes); + return bdev_test_flag(part, BD_MAKE_IT_FAIL) && + should_fail(&fail_make_request, bytes); } static int __init fail_make_request_debugfs(void) diff --git a/block/genhd.c b/block/genhd.c index 19cd1a31fa80..0cce461952f6 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -1066,7 +1066,8 @@ static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL); ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_make_it_fail); + return sprintf(buf, "%d\n", + bdev_test_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL)); } ssize_t part_fail_store(struct device *dev, @@ -1075,9 +1076,12 @@ ssize_t part_fail_store(struct device *dev, { int i; - if (count > 0 && sscanf(buf, "%d", &i) > 0) - dev_to_bdev(dev)->bd_make_it_fail = i; - + if (count > 0 && sscanf(buf, "%d", &i) > 0) { + if (i) + bdev_set_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL); + else + bdev_clear_flag(dev_to_bdev(dev), BD_MAKE_IT_FAIL); + } return count; } diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 4e0c8785090c..5bb7805927ac 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -51,6 +51,9 @@ struct block_device { #define BD_WRITE_HOLDER (1u<<9) #define BD_HAS_SUBMIT_BIO (1u<<10) #define BD_RO_WARNED (1u<<11) +#ifdef CONFIG_FAIL_MAKE_REQUEST +#define BD_MAKE_IT_FAIL (1u<<12) +#endif dev_t bd_dev; struct inode *bd_inode; /* will die */ @@ -67,9 +70,6 @@ struct block_device { struct mutex bd_fsfreeze_mutex; /* serialize freeze/thaw */ struct partition_meta_info *bd_meta_info; -#ifdef CONFIG_FAIL_MAKE_REQUEST - bool bd_make_it_fail; -#endif int bd_writers; /* * keep this out-of-line as it's both big and not needed in the fast -- cgit From b250c20b64290808aa4b5cc6d68819a7ee28237f Mon Sep 17 00:00:00 2001 From: David Lechner Date: Mon, 29 Apr 2024 18:40:09 -0500 Subject: regulator: devres: add API for reference voltage supplies A common use case for regulators is to supply a reference voltage to an analog input or output device. This adds a new devres API to get, enable, and get the voltage in a single call. This allows eliminating boilerplate code in drivers that use reference supplies in this way. Signed-off-by: David Lechner Link: https://lore.kernel.org/r/20240429-regulator-get-enable-get-votlage-v2-1-b1f11ab766c1@baylibre.com Signed-off-by: Mark Brown --- Documentation/driver-api/driver-model/devres.rst | 1 + drivers/regulator/devres.c | 59 ++++++++++++++++++++++++ include/linux/regulator/consumer.h | 7 +++ 3 files changed, 67 insertions(+) (limited to 'include/linux') diff --git a/Documentation/driver-api/driver-model/devres.rst b/Documentation/driver-api/driver-model/devres.rst index 7be8b8dd5f00..18caebad7376 100644 --- a/Documentation/driver-api/driver-model/devres.rst +++ b/Documentation/driver-api/driver-model/devres.rst @@ -433,6 +433,7 @@ REGULATOR devm_regulator_bulk_put() devm_regulator_get() devm_regulator_get_enable() + devm_regulator_get_enable_read_voltage() devm_regulator_get_enable_optional() devm_regulator_get_exclusive() devm_regulator_get_optional() diff --git a/drivers/regulator/devres.c b/drivers/regulator/devres.c index 90bb0d178885..4f290b9b559b 100644 --- a/drivers/regulator/devres.c +++ b/drivers/regulator/devres.c @@ -145,6 +145,65 @@ struct regulator *devm_regulator_get_optional(struct device *dev, } EXPORT_SYMBOL_GPL(devm_regulator_get_optional); +/** + * devm_regulator_get_enable_read_voltage - Resource managed regulator get and + * enable that returns the voltage + * @dev: device to supply + * @id: supply name or regulator ID. + * + * Get and enable regulator for duration of the device life-time. + * regulator_disable() and regulator_put() are automatically called on driver + * detach. See regulator_get_optional(), regulator_enable(), and + * regulator_get_voltage() for more information. + * + * This is a convenience function for supplies that provide a reference voltage + * where the consumer driver just needs to know the voltage and keep the + * regulator enabled. + * + * In cases where the supply is not strictly required, callers can check for + * -ENODEV error and handle it accordingly. + * + * Returns: voltage in microvolts on success, or an error code on failure. + */ +int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id) +{ + struct regulator *r; + int ret; + + /* + * Since we need a real voltage, we use devm_regulator_get_optional() + * rather than getting a dummy regulator with devm_regulator_get() and + * then letting regulator_get_voltage() fail with -EINVAL. This way, the + * caller can handle the -ENODEV error code if needed instead of the + * ambiguous -EINVAL. + */ + r = devm_regulator_get_optional(dev, id); + if (IS_ERR(r)) + return PTR_ERR(r); + + ret = regulator_enable(r); + if (ret) + goto err_regulator_put; + + ret = devm_add_action_or_reset(dev, regulator_action_disable, r); + if (ret) + goto err_regulator_put; + + ret = regulator_get_voltage(r); + if (ret < 0) + goto err_release_action; + + return 0; + +err_release_action: + devm_release_action(dev, regulator_action_disable, r); +err_regulator_put: + devm_regulator_put(r); + + return ret; +} +EXPORT_SYMBOL_GPL(devm_regulator_get_enable_read_voltage); + static int devm_regulator_match(struct device *dev, void *res, void *data) { struct regulator **r = res; diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 4660582a3302..6f41a11c92e7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -164,6 +164,7 @@ struct regulator *__must_check devm_regulator_get_optional(struct device *dev, const char *id); int devm_regulator_get_enable(struct device *dev, const char *id); int devm_regulator_get_enable_optional(struct device *dev, const char *id); +int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id); void regulator_put(struct regulator *regulator); void devm_regulator_put(struct regulator *regulator); @@ -329,6 +330,12 @@ static inline int devm_regulator_get_enable_optional(struct device *dev, return -ENODEV; } +static inline int devm_regulator_get_enable_read_voltage(struct device *dev, + const char *id) +{ + return -ENODEV; +} + static inline struct regulator *__must_check regulator_get_optional(struct device *dev, const char *id) { -- cgit From 2a45166938f145294b73445b0af997b3100f02b4 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 17 Apr 2024 13:54:30 +0300 Subject: spi: pxa2xx: Move contents of linux/spi/pxa2xx_spi.h to a local one There is no user of the linux/spi/pxa2xx_spi.h. Move its contents to the drivers/spi/spi-pxa2xx.h. Suggested-by: Arnd Bergmann Signed-off-by: Andy Shevchenko Reviewed-by: Linus Walleij Link: https://lore.kernel.org/r/20240417110334.2671228-4-andriy.shevchenko@linux.intel.com Signed-off-by: Mark Brown --- drivers/spi/spi-pxa2xx-dma.c | 1 - drivers/spi/spi-pxa2xx-pci.c | 4 ++-- drivers/spi/spi-pxa2xx.c | 1 - drivers/spi/spi-pxa2xx.h | 36 ++++++++++++++++++++++++++++++- include/linux/spi/pxa2xx_spi.h | 48 ------------------------------------------ 5 files changed, 37 insertions(+), 53 deletions(-) delete mode 100644 include/linux/spi/pxa2xx_spi.h (limited to 'include/linux') diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c index be563f0dd03a..26416ced6505 100644 --- a/drivers/spi/spi-pxa2xx-dma.c +++ b/drivers/spi/spi-pxa2xx-dma.c @@ -12,7 +12,6 @@ #include #include -#include #include #include "spi-pxa2xx.h" diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 861b21c63504..e11a613bc340 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c @@ -10,11 +10,11 @@ #include #include -#include - #include #include +#include "spi-pxa2xx.h" + #define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0935 #define PCI_DEVICE_ID_INTEL_BYT 0x0f0e #define PCI_DEVICE_ID_INTEL_MRFLD 0x1194 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index ab6fd55237cd..82633682b581 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -24,7 +24,6 @@ #include #include -#include #include #include "spi-pxa2xx.h" diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h index 45cdbbc71c4b..08296729ea80 100644 --- a/drivers/spi/spi-pxa2xx.h +++ b/drivers/spi/spi-pxa2xx.h @@ -7,6 +7,7 @@ #ifndef SPI_PXA2XX_H #define SPI_PXA2XX_H +#include #include #include #include @@ -15,7 +16,40 @@ #include struct gpio_desc; -struct pxa2xx_spi_controller; + +/* + * The platform data for SSP controller devices + * (resides in device.platform_data). + */ +struct pxa2xx_spi_controller { + u8 num_chipselect; + u8 enable_dma; + u8 dma_burst_size; + bool is_target; + + /* DMA engine specific config */ + dma_filter_fn dma_filter; + void *tx_param; + void *rx_param; + + /* For non-PXA arches */ + struct ssp_device ssp; +}; + +/* + * The controller specific data for SPI target devices + * (resides in spi_board_info.controller_data), + * copied to spi_device.platform_data ... mostly for + * DMA tuning. + */ +struct pxa2xx_spi_chip { + u8 tx_threshold; + u8 tx_hi_threshold; + u8 rx_threshold; + u8 dma_burst_size; + u32 timeout; +}; + struct spi_controller; struct spi_device; struct spi_transfer; diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h deleted file mode 100644 index e5a4a045fb67..000000000000 --- a/include/linux/spi/pxa2xx_spi.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs - */ -#ifndef __LINUX_SPI_PXA2XX_SPI_H -#define __LINUX_SPI_PXA2XX_SPI_H - -#include -#include - -#include - -struct dma_chan; - -/* - * The platform data for SSP controller devices - * (resides in device.platform_data). - */ -struct pxa2xx_spi_controller { - u8 num_chipselect; - u8 enable_dma; - u8 dma_burst_size; - bool is_target; - - /* DMA engine specific config */ - dma_filter_fn dma_filter; - void *tx_param; - void *rx_param; - - /* For non-PXA arches */ - struct ssp_device ssp; -}; - -/* - * The controller specific data for SPI target devices - * (resides in spi_board_info.controller_data), - * copied to spi_device.platform_data ... mostly for - * DMA tuning. - */ -struct pxa2xx_spi_chip { - u8 tx_threshold; - u8 tx_hi_threshold; - u8 rx_threshold; - u8 dma_burst_size; - u32 timeout; -}; - -#endif /* __LINUX_SPI_PXA2XX_SPI_H */ -- cgit From 6d0ca4a2a7e25f9ad07c1f335f20b4d9e048cdd5 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Tue, 30 Apr 2024 09:49:11 +0100 Subject: nvmem: layouts: store owner from modules with nvmem_layout_driver_register() Modules registering driver with nvmem_layout_driver_register() might forget to set .owner field. The field is used by some of other kernel parts for reference counting (try_module_get()), so it is expected that drivers will set it. Solve the problem by moving this task away from the drivers to the core code, just like we did for platform_driver in commit 9447057eaff8 ("platform_device: use a macro instead of platform_driver_register"). Signed-off-by: Krzysztof Kozlowski Reviewed-by: Michael Walle Reviewed-by: Miquel Raynal Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20240430084921.33387-2-srinivas.kandagatla@linaro.org Signed-off-by: Greg Kroah-Hartman --- drivers/nvmem/layouts.c | 6 ++++-- include/linux/nvmem-provider.h | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c index 8b5e2de138eb..64dc7013a098 100644 --- a/drivers/nvmem/layouts.c +++ b/drivers/nvmem/layouts.c @@ -52,13 +52,15 @@ static const struct bus_type nvmem_layout_bus_type = { .remove = nvmem_layout_bus_remove, }; -int nvmem_layout_driver_register(struct nvmem_layout_driver *drv) +int __nvmem_layout_driver_register(struct nvmem_layout_driver *drv, + struct module *owner) { drv->driver.bus = &nvmem_layout_bus_type; + drv->driver.owner = owner; return driver_register(&drv->driver); } -EXPORT_SYMBOL_GPL(nvmem_layout_driver_register); +EXPORT_SYMBOL_GPL(__nvmem_layout_driver_register); void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv) { diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index f0ba0e03218f..3ebeaa0ded00 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -199,7 +199,10 @@ int nvmem_add_one_cell(struct nvmem_device *nvmem, int nvmem_layout_register(struct nvmem_layout *layout); void nvmem_layout_unregister(struct nvmem_layout *layout); -int nvmem_layout_driver_register(struct nvmem_layout_driver *drv); +#define nvmem_layout_driver_register(drv) \ + __nvmem_layout_driver_register(drv, THIS_MODULE) +int __nvmem_layout_driver_register(struct nvmem_layout_driver *drv, + struct module *owner); void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv); #define module_nvmem_layout_driver(__nvmem_layout_driver) \ module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \ -- cgit From 186ddac2072a8134798d72635d1ed0f29889369d Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Thu, 11 Apr 2024 15:53:43 +0100 Subject: block: move two helpers into bdev.c disk_live() and block_size() access bd_inode directly, prepare to remove the field bd_inode from block_device, and only access bd_inode in block layer. Signed-off-by: Yu Kuai Reviewed-by: Christoph Hellwig Reviewed-by: Jan Kara Signed-off-by: Al Viro Link: https://lore.kernel.org/r/20240411145346.2516848-8-viro@zeniv.linux.org.uk Signed-off-by: Christian Brauner --- block/bdev.c | 12 ++++++++++++ include/linux/blkdev.h | 12 ++---------- 2 files changed, 14 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index a89bce368b64..536233ac3e99 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -1257,6 +1257,18 @@ void bdev_statx_dioalign(struct inode *inode, struct kstat *stat) blkdev_put_no_open(bdev); } +bool disk_live(struct gendisk *disk) +{ + return !inode_unhashed(disk->part0->bd_inode); +} +EXPORT_SYMBOL_GPL(disk_live); + +unsigned int block_size(struct block_device *bdev) +{ + return 1 << bdev->bd_inode->i_blkbits; +} +EXPORT_SYMBOL_GPL(block_size); + static int __init setup_bdev_allow_write_mounted(char *str) { if (kstrtobool(str, &bdev_allow_write_mounted)) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 20c749b2ebc2..99ac98ed9548 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -211,11 +211,6 @@ struct gendisk { struct blk_independent_access_ranges *ia_ranges; }; -static inline bool disk_live(struct gendisk *disk) -{ - return !inode_unhashed(disk->part0->bd_inode); -} - /** * disk_openers - returns how many openers are there for a disk * @disk: disk to check @@ -1364,11 +1359,6 @@ static inline unsigned int blksize_bits(unsigned int size) return order_base_2(size >> SECTOR_SHIFT) + SECTOR_SHIFT; } -static inline unsigned int block_size(struct block_device *bdev) -{ - return 1 << bdev->bd_inode->i_blkbits; -} - int kblockd_schedule_work(struct work_struct *work); int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); @@ -1536,6 +1526,8 @@ void blkdev_put_no_open(struct block_device *bdev); struct block_device *I_BDEV(struct inode *inode); struct block_device *file_bdev(struct file *bdev_file); +bool disk_live(struct gendisk *disk); +unsigned int block_size(struct block_device *bdev); #ifdef CONFIG_BLOCK void invalidate_bdev(struct block_device *bdev); -- cgit From e33aef2c58577f51ec22736843a652576ce0ef7a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 11 Apr 2024 15:53:36 +0100 Subject: block_device: add a pointer to struct address_space (page cache of bdev) points to ->i_data of coallocated inode. Signed-off-by: Al Viro Link: https://lore.kernel.org/r/20240411145346.2516848-1-viro@zeniv.linux.org.uk Signed-off-by: Christian Brauner --- block/bdev.c | 1 + include/linux/blk_types.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index 28e6f0423857..8e19101cbbb0 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -419,6 +419,7 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) mutex_init(&bdev->bd_holder_lock); bdev->bd_partno = partno; bdev->bd_inode = inode; + bdev->bd_mapping = &inode->i_data; bdev->bd_queue = disk->queue; if (partno) bdev->bd_has_submit_bio = disk->part0->bd_has_submit_bio; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index cb1526ec44b5..6438c75cbb35 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -51,6 +51,7 @@ struct block_device { bool bd_has_submit_bio; dev_t bd_dev; struct inode *bd_inode; /* will die */ + struct address_space *bd_mapping; /* page cache */ atomic_t bd_openers; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ -- cgit From 224941e8379a0de8652ffec768cc8394f0b1cb95 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 11 Apr 2024 15:53:37 +0100 Subject: use ->bd_mapping instead of ->bd_inode->i_mapping Just the low-hanging fruit... Signed-off-by: Al Viro Link: https://lore.kernel.org/r/20240411145346.2516848-2-viro@zeniv.linux.org.uk Signed-off-by: Christian Brauner --- block/bdev.c | 18 +++++++++--------- block/blk-zoned.c | 4 ++-- block/genhd.c | 2 +- block/ioctl.c | 4 ++-- block/partitions/core.c | 2 +- drivers/md/bcache/super.c | 2 +- drivers/scsi/scsicam.c | 2 +- fs/btrfs/disk-io.c | 6 +++--- fs/btrfs/volumes.c | 2 +- fs/btrfs/zoned.c | 2 +- fs/buffer.c | 2 +- fs/cramfs/inode.c | 2 +- fs/erofs/data.c | 2 +- fs/ext4/dir.c | 2 +- fs/ext4/ext4_jbd2.c | 2 +- fs/ext4/super.c | 6 +++--- fs/jbd2/journal.c | 2 +- include/linux/buffer_head.h | 4 ++-- include/linux/jbd2.h | 4 ++-- 19 files changed, 35 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index 8e19101cbbb0..00017af92a51 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -76,7 +76,7 @@ static void bdev_write_inode(struct block_device *bdev) /* Kill _all_ buffers and pagecache , dirty or not.. */ static void kill_bdev(struct block_device *bdev) { - struct address_space *mapping = bdev->bd_inode->i_mapping; + struct address_space *mapping = bdev->bd_mapping; if (mapping_empty(mapping)) return; @@ -88,7 +88,7 @@ static void kill_bdev(struct block_device *bdev) /* Invalidate clean unused buffers and pagecache. */ void invalidate_bdev(struct block_device *bdev) { - struct address_space *mapping = bdev->bd_inode->i_mapping; + struct address_space *mapping = bdev->bd_mapping; if (mapping->nrpages) { invalidate_bh_lrus(); @@ -116,7 +116,7 @@ int truncate_bdev_range(struct block_device *bdev, blk_mode_t mode, goto invalidate; } - truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); + truncate_inode_pages_range(bdev->bd_mapping, lstart, lend); if (!(mode & BLK_OPEN_EXCL)) bd_abort_claiming(bdev, truncate_bdev_range); return 0; @@ -126,7 +126,7 @@ invalidate: * Someone else has handle exclusively open. Try invalidating instead. * The 'end' argument is inclusive so the rounding is safe. */ - return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, + return invalidate_inode_pages2_range(bdev->bd_mapping, lstart >> PAGE_SHIFT, lend >> PAGE_SHIFT); } @@ -198,7 +198,7 @@ int sync_blockdev_nowait(struct block_device *bdev) { if (!bdev) return 0; - return filemap_flush(bdev->bd_inode->i_mapping); + return filemap_flush(bdev->bd_mapping); } EXPORT_SYMBOL_GPL(sync_blockdev_nowait); @@ -210,13 +210,13 @@ int sync_blockdev(struct block_device *bdev) { if (!bdev) return 0; - return filemap_write_and_wait(bdev->bd_inode->i_mapping); + return filemap_write_and_wait(bdev->bd_mapping); } EXPORT_SYMBOL(sync_blockdev); int sync_blockdev_range(struct block_device *bdev, loff_t lstart, loff_t lend) { - return filemap_write_and_wait_range(bdev->bd_inode->i_mapping, + return filemap_write_and_wait_range(bdev->bd_mapping, lstart, lend); } EXPORT_SYMBOL(sync_blockdev_range); @@ -445,7 +445,7 @@ void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors) void bdev_add(struct block_device *bdev, dev_t dev) { if (bdev_stable_writes(bdev)) - mapping_set_stable_writes(bdev->bd_inode->i_mapping); + mapping_set_stable_writes(bdev->bd_mapping); bdev->bd_dev = dev; bdev->bd_inode->i_rdev = dev; bdev->bd_inode->i_ino = dev; @@ -925,7 +925,7 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder, bdev_file->f_mode |= FMODE_NOWAIT; if (mode & BLK_OPEN_RESTRICT_WRITES) bdev_file->f_mode |= FMODE_WRITE_RESTRICTED; - bdev_file->f_mapping = bdev->bd_inode->i_mapping; + bdev_file->f_mapping = bdev->bd_mapping; bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping); bdev_file->private_data = holder; diff --git a/block/blk-zoned.c b/block/blk-zoned.c index da0f4b2a8fa0..b008bcd4889c 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -398,7 +398,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, op = REQ_OP_ZONE_RESET; /* Invalidate the page cache, including dirty pages. */ - filemap_invalidate_lock(bdev->bd_inode->i_mapping); + filemap_invalidate_lock(bdev->bd_mapping); ret = blkdev_truncate_zone_range(bdev, mode, &zrange); if (ret) goto fail; @@ -420,7 +420,7 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, blk_mode_t mode, fail: if (cmd == BLKRESETZONE) - filemap_invalidate_unlock(bdev->bd_inode->i_mapping); + filemap_invalidate_unlock(bdev->bd_mapping); return ret; } diff --git a/block/genhd.c b/block/genhd.c index 93f5118b7d41..2bf05bd0f071 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -745,7 +745,7 @@ void invalidate_disk(struct gendisk *disk) struct block_device *bdev = disk->part0; invalidate_bdev(bdev); - bdev->bd_inode->i_mapping->wb_err = 0; + bdev->bd_mapping->wb_err = 0; set_capacity(disk, 0); } EXPORT_SYMBOL(invalidate_disk); diff --git a/block/ioctl.c b/block/ioctl.c index 1c800364bc70..7c13d8bed453 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -152,12 +152,12 @@ static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode, if (start + len > bdev_nr_bytes(bdev)) return -EINVAL; - filemap_invalidate_lock(bdev->bd_inode->i_mapping); + filemap_invalidate_lock(bdev->bd_mapping); err = truncate_bdev_range(bdev, mode, start, start + len - 1); if (!err) err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9, GFP_KERNEL); - filemap_invalidate_unlock(bdev->bd_inode->i_mapping); + filemap_invalidate_unlock(bdev->bd_mapping); return err; } diff --git a/block/partitions/core.c b/block/partitions/core.c index 2b75e325c63b..63ee317dfff0 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -704,7 +704,7 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed); void *read_part_sector(struct parsed_partitions *state, sector_t n, Sector *p) { - struct address_space *mapping = state->disk->part0->bd_inode->i_mapping; + struct address_space *mapping = state->disk->part0->bd_mapping; struct folio *folio; if (n >= get_capacity(state->disk)) { diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 0ee5e17ae2dd..277ad958cf53 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -171,7 +171,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, struct page *page; unsigned int i; - page = read_cache_page_gfp(bdev->bd_inode->i_mapping, + page = read_cache_page_gfp(bdev->bd_mapping, SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL); if (IS_ERR(page)) return "IO error"; diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c index e2c7d8ef205f..dd69342bbe78 100644 --- a/drivers/scsi/scsicam.c +++ b/drivers/scsi/scsicam.c @@ -32,7 +32,7 @@ */ unsigned char *scsi_bios_ptable(struct block_device *dev) { - struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping; + struct address_space *mapping = bdev_whole(dev)->bd_mapping; unsigned char *res = NULL; struct folio *folio; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3df5477d48a8..f10e894b0bf5 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3651,7 +3651,7 @@ struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, struct btrfs_super_block *super; struct page *page; u64 bytenr, bytenr_orig; - struct address_space *mapping = bdev->bd_inode->i_mapping; + struct address_space *mapping = bdev->bd_mapping; int ret; bytenr_orig = btrfs_sb_offset(copy_num); @@ -3738,7 +3738,7 @@ static int write_dev_supers(struct btrfs_device *device, struct btrfs_super_block *sb, int max_mirrors) { struct btrfs_fs_info *fs_info = device->fs_info; - struct address_space *mapping = device->bdev->bd_inode->i_mapping; + struct address_space *mapping = device->bdev->bd_mapping; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); int i; int errors = 0; @@ -3855,7 +3855,7 @@ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) device->commit_total_bytes) break; - page = find_get_page(device->bdev->bd_inode->i_mapping, + page = find_get_page(device->bdev->bd_mapping, bytenr >> PAGE_SHIFT); if (!page) { errors++; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 65c03ddecc59..33d357e5f9c5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1290,7 +1290,7 @@ static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev return ERR_PTR(-EINVAL); /* pull in the page with our super */ - page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL); + page = read_cache_page_gfp(bdev->bd_mapping, index, GFP_KERNEL); if (IS_ERR(page)) return ERR_CAST(page); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index 4cba80b34387..9b43fa493219 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -118,7 +118,7 @@ static int sb_write_pointer(struct block_device *bdev, struct blk_zone *zones, return -ENOENT; } else if (full[0] && full[1]) { /* Compare two super blocks */ - struct address_space *mapping = bdev->bd_inode->i_mapping; + struct address_space *mapping = bdev->bd_mapping; struct page *page[BTRFS_NR_SB_LOG_ZONES]; struct btrfs_super_block *super[BTRFS_NR_SB_LOG_ZONES]; int i; diff --git a/fs/buffer.c b/fs/buffer.c index 4f73d23c2c46..d5a0932ae68d 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -1463,7 +1463,7 @@ __bread_gfp(struct block_device *bdev, sector_t block, { struct buffer_head *bh; - gfp |= mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp |= mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); /* * Prefer looping in the allocator rather than here, at least that diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 9901057a15ba..460690ca0174 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -183,7 +183,7 @@ static int next_buffer; static void *cramfs_blkdev_read(struct super_block *sb, unsigned int offset, unsigned int len) { - struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; + struct address_space *mapping = sb->s_bdev->bd_mapping; struct file_ra_state ra = {}; struct page *pages[BLKS_PER_BUF]; unsigned i, blocknr, buffer; diff --git a/fs/erofs/data.c b/fs/erofs/data.c index e1a170e45c70..5fc03c1e2757 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -68,7 +68,7 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) if (erofs_is_fscache_mode(sb)) buf->mapping = EROFS_SB(sb)->s_fscache->inode->i_mapping; else - buf->mapping = sb->s_bdev->bd_inode->i_mapping; + buf->mapping = sb->s_bdev->bd_mapping; } void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 3985f8c33f95..ff4514e4626b 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -192,7 +192,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) (PAGE_SHIFT - inode->i_blkbits); if (!ra_has_index(&file->f_ra, index)) page_cache_sync_readahead( - sb->s_bdev->bd_inode->i_mapping, + sb->s_bdev->bd_mapping, &file->f_ra, file, index, 1); file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT; diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 5d8055161acd..da4a82456383 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -206,7 +206,7 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line, static void ext4_check_bdev_write_error(struct super_block *sb) { - struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; + struct address_space *mapping = sb->s_bdev->bd_mapping; struct ext4_sb_info *sbi = EXT4_SB(sb); int err; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b255f798f449..02042eb91806 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -244,7 +244,7 @@ static struct buffer_head *__ext4_sb_bread_gfp(struct super_block *sb, struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, blk_opf_t op_flags) { - gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping, + gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, ~__GFP_FS) | __GFP_MOVABLE; return __ext4_sb_bread_gfp(sb, block, op_flags, gfp); @@ -253,7 +253,7 @@ struct buffer_head *ext4_sb_bread(struct super_block *sb, sector_t block, struct buffer_head *ext4_sb_bread_unmovable(struct super_block *sb, sector_t block) { - gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_inode->i_mapping, + gfp_t gfp = mapping_gfp_constraint(sb->s_bdev->bd_mapping, ~__GFP_FS); return __ext4_sb_bread_gfp(sb, block, 0, gfp); @@ -5556,7 +5556,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) * used to detect the metadata async write error. */ spin_lock_init(&sbi->s_bdev_wb_lock); - errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, + errseq_check_and_advance(&sb->s_bdev->bd_mapping->wb_err, &sbi->s_bdev_wb_err); EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index b6c114c11b97..03c4b9214f56 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2009,7 +2009,7 @@ static int __jbd2_journal_erase(journal_t *journal, unsigned int flags) byte_count = (block_stop - block_start + 1) * journal->j_blocksize; - truncate_inode_pages_range(journal->j_dev->bd_inode->i_mapping, + truncate_inode_pages_range(journal->j_dev->bd_mapping, byte_start, byte_stop); if (flags & JBD2_JOURNAL_FLUSH_DISCARD) { diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index d78454a4dd1f..e58a0d63409a 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -338,7 +338,7 @@ static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, { gfp_t gfp; - gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); gfp |= __GFP_NOFAIL; return bdev_getblk(bdev, block, size, gfp); @@ -349,7 +349,7 @@ static inline struct buffer_head *__getblk(struct block_device *bdev, { gfp_t gfp; - gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS); + gfp = mapping_gfp_constraint(bdev->bd_mapping, ~__GFP_FS); gfp |= __GFP_MOVABLE | __GFP_NOFAIL; return bdev_getblk(bdev, block, size, gfp); diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 971f3e826e15..ac31c37816f7 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1696,7 +1696,7 @@ static inline void jbd2_journal_abort_handle(handle_t *handle) static inline void jbd2_init_fs_dev_write_error(journal_t *journal) { - struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; + struct address_space *mapping = journal->j_fs_dev->bd_mapping; /* * Save the original wb_err value of client fs's bdev mapping which @@ -1707,7 +1707,7 @@ static inline void jbd2_init_fs_dev_write_error(journal_t *journal) static inline int jbd2_check_fs_dev_write_error(journal_t *journal) { - struct address_space *mapping = journal->j_fs_dev->bd_inode->i_mapping; + struct address_space *mapping = journal->j_fs_dev->bd_mapping; return errseq_check(&mapping->wb_err, READ_ONCE(journal->j_fs_dev_wb_err)); -- cgit From 203c1ce0bb063d1620698e39637b64f2d09c1368 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 28 Apr 2024 20:47:14 -0400 Subject: RIP ->bd_inode Signed-off-by: Al Viro --- block/bdev.c | 1 - include/linux/blk_types.h | 1 - 2 files changed, 2 deletions(-) (limited to 'include/linux') diff --git a/block/bdev.c b/block/bdev.c index a8c66cc1d6b8..0849a9cfa2b6 100644 --- a/block/bdev.c +++ b/block/bdev.c @@ -423,7 +423,6 @@ struct block_device *bdev_alloc(struct gendisk *disk, u8 partno) spin_lock_init(&bdev->bd_size_lock); mutex_init(&bdev->bd_holder_lock); bdev->bd_partno = partno; - bdev->bd_inode = inode; bdev->bd_mapping = &inode->i_data; bdev->bd_queue = disk->queue; if (partno) diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 6438c75cbb35..5616d059cb23 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -50,7 +50,6 @@ struct block_device { bool bd_write_holder; bool bd_has_submit_bio; dev_t bd_dev; - struct inode *bd_inode; /* will die */ struct address_space *bd_mapping; /* page cache */ atomic_t bd_openers; -- cgit From e9006f81faf8e438ea83626db578610e49f31576 Mon Sep 17 00:00:00 2001 From: Alex Bee Date: Tue, 16 Apr 2024 18:12:34 +0200 Subject: mfd: rk8xx: Add RK816 support This integrates RK816 support in the this existing rk8xx mfd driver. This version has unaligned interrupt registers, which requires to define a separate get_irq_reg callback for the regmap. Apart from that the integration is straightforward and the existing structures can be used as is. The initialization sequence has been taken from vendor kernel. Signed-off-by: Alex Bee Link: https://lore.kernel.org/r/20240416161237.2500037-3-knaerzche@gmail.com Signed-off-by: Lee Jones --- drivers/mfd/Kconfig | 4 +- drivers/mfd/rk8xx-core.c | 104 +++++++++++++++++++++++++++++++++ drivers/mfd/rk8xx-i2c.c | 45 ++++++++++++++- include/linux/mfd/rk808.h | 144 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 294 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 4b023ee229cf..2e7286cc98e4 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1225,7 +1225,7 @@ config MFD_RK8XX select MFD_CORE config MFD_RK8XX_I2C - tristate "Rockchip RK805/RK808/RK809/RK817/RK818 Power Management Chip" + tristate "Rockchip RK805/RK808/RK809/RK816/RK817/RK818 Power Management Chip" depends on I2C && OF select MFD_CORE select REGMAP_I2C @@ -1233,7 +1233,7 @@ config MFD_RK8XX_I2C select MFD_RK8XX help If you say yes here you get support for the RK805, RK808, RK809, - RK817 and RK818 Power Management chips. + RK816, RK817 and RK818 Power Management chips. This driver provides common support for accessing the device through I2C interface. The device supports multiple sub-devices including interrupts, RTC, LDO & DCDC regulators, and onkey. diff --git a/drivers/mfd/rk8xx-core.c b/drivers/mfd/rk8xx-core.c index e2261b68b844..5eda3c0dbbdf 100644 --- a/drivers/mfd/rk8xx-core.c +++ b/drivers/mfd/rk8xx-core.c @@ -28,6 +28,10 @@ static const struct resource rtc_resources[] = { DEFINE_RES_IRQ(RK808_IRQ_RTC_ALARM), }; +static const struct resource rk816_rtc_resources[] = { + DEFINE_RES_IRQ(RK816_IRQ_RTC_ALARM), +}; + static const struct resource rk817_rtc_resources[] = { DEFINE_RES_IRQ(RK817_IRQ_RTC_ALARM), }; @@ -87,6 +91,22 @@ static const struct mfd_cell rk808s[] = { }, }; +static const struct mfd_cell rk816s[] = { + { .name = "rk805-pinctrl", }, + { .name = "rk808-clkout", }, + { .name = "rk808-regulator", }, + { + .name = "rk805-pwrkey", + .num_resources = ARRAY_SIZE(rk805_key_resources), + .resources = rk805_key_resources, + }, + { + .name = "rk808-rtc", + .num_resources = ARRAY_SIZE(rk816_rtc_resources), + .resources = rk816_rtc_resources, + }, +}; + static const struct mfd_cell rk817s[] = { { .name = "rk808-clkout", }, { .name = "rk808-regulator", }, @@ -148,6 +168,17 @@ static const struct rk808_reg_data rk808_pre_init_reg[] = { VB_LO_SEL_3500MV }, }; +static const struct rk808_reg_data rk816_pre_init_reg[] = { + { RK818_BUCK1_CONFIG_REG, RK817_RAMP_RATE_MASK, + RK817_RAMP_RATE_12_5MV_PER_US }, + { RK818_BUCK2_CONFIG_REG, RK817_RAMP_RATE_MASK, + RK817_RAMP_RATE_12_5MV_PER_US }, + { RK818_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_250MA }, + { RK808_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP105C}, + { RK808_VB_MON_REG, VBAT_LOW_VOL_MASK | VBAT_LOW_ACT_MASK, + RK808_VBAT_LOW_3V0 | EN_VABT_LOW_SHUT_DOWN }, +}; + static const struct rk808_reg_data rk817_pre_init_reg[] = { {RK817_RTC_CTRL_REG, RTC_STOP, RTC_STOP}, /* Codec specific registers */ @@ -350,6 +381,59 @@ static const struct regmap_irq rk808_irqs[] = { }, }; +static const unsigned int rk816_irq_status_offsets[] = { + RK816_IRQ_STS_OFFSET(RK816_INT_STS_REG1), + RK816_IRQ_STS_OFFSET(RK816_INT_STS_REG2), + RK816_IRQ_STS_OFFSET(RK816_INT_STS_REG3), +}; + +static const unsigned int rk816_irq_mask_offsets[] = { + RK816_IRQ_MSK_OFFSET(RK816_INT_STS_MSK_REG1), + RK816_IRQ_MSK_OFFSET(RK816_INT_STS_MSK_REG2), + RK816_IRQ_MSK_OFFSET(RK816_INT_STS_MSK_REG3), +}; + +static unsigned int rk816_get_irq_reg(struct regmap_irq_chip_data *data, + unsigned int base, int index) +{ + unsigned int irq_reg = base; + + switch (base) { + case RK816_INT_STS_REG1: + irq_reg += rk816_irq_status_offsets[index]; + break; + case RK816_INT_STS_MSK_REG1: + irq_reg += rk816_irq_mask_offsets[index]; + break; + } + + return irq_reg; +}; + +static const struct regmap_irq rk816_irqs[] = { + /* INT_STS_REG1 IRQs */ + REGMAP_IRQ_REG(RK816_IRQ_PWRON_FALL, 0, RK816_INT_STS_PWRON_FALL), + REGMAP_IRQ_REG(RK816_IRQ_PWRON_RISE, 0, RK816_INT_STS_PWRON_RISE), + + /* INT_STS_REG2 IRQs */ + REGMAP_IRQ_REG(RK816_IRQ_VB_LOW, 1, RK816_INT_STS_VB_LOW), + REGMAP_IRQ_REG(RK816_IRQ_PWRON, 1, RK816_INT_STS_PWRON), + REGMAP_IRQ_REG(RK816_IRQ_PWRON_LP, 1, RK816_INT_STS_PWRON_LP), + REGMAP_IRQ_REG(RK816_IRQ_HOTDIE, 1, RK816_INT_STS_HOTDIE), + REGMAP_IRQ_REG(RK816_IRQ_RTC_ALARM, 1, RK816_INT_STS_RTC_ALARM), + REGMAP_IRQ_REG(RK816_IRQ_RTC_PERIOD, 1, RK816_INT_STS_RTC_PERIOD), + REGMAP_IRQ_REG(RK816_IRQ_USB_OV, 1, RK816_INT_STS_USB_OV), + + /* INT_STS3 IRQs */ + REGMAP_IRQ_REG(RK816_IRQ_PLUG_IN, 2, RK816_INT_STS_PLUG_IN), + REGMAP_IRQ_REG(RK816_IRQ_PLUG_OUT, 2, RK816_INT_STS_PLUG_OUT), + REGMAP_IRQ_REG(RK816_IRQ_CHG_OK, 2, RK816_INT_STS_CHG_OK), + REGMAP_IRQ_REG(RK816_IRQ_CHG_TE, 2, RK816_INT_STS_CHG_TE), + REGMAP_IRQ_REG(RK816_IRQ_CHG_TS, 2, RK816_INT_STS_CHG_TS), + REGMAP_IRQ_REG(RK816_IRQ_CHG_CVTLIM, 2, RK816_INT_STS_CHG_CVTLIM), + REGMAP_IRQ_REG(RK816_IRQ_DISCHG_ILIM, 2, RK816_INT_STS_DISCHG_ILIM), +}; + static const struct regmap_irq rk818_irqs[] = { /* INT_STS */ [RK818_IRQ_VOUT_LO] = { @@ -482,6 +566,18 @@ static const struct regmap_irq_chip rk808_irq_chip = { .init_ack_masked = true, }; +static const struct regmap_irq_chip rk816_irq_chip = { + .name = "rk816", + .irqs = rk816_irqs, + .num_irqs = ARRAY_SIZE(rk816_irqs), + .num_regs = 3, + .get_irq_reg = rk816_get_irq_reg, + .status_base = RK816_INT_STS_REG1, + .mask_base = RK816_INT_STS_MSK_REG1, + .ack_base = RK816_INT_STS_REG1, + .init_ack_masked = true, +}; + static struct regmap_irq_chip rk817_irq_chip = { .name = "rk817", .irqs = rk817_irqs, @@ -530,6 +626,7 @@ static int rk808_power_off(struct sys_off_data *data) reg = RK817_SYS_CFG(3); bit = DEV_OFF; break; + case RK816_ID: case RK818_ID: reg = RK818_DEVCTRL_REG; bit = DEV_OFF; @@ -637,6 +734,13 @@ int rk8xx_probe(struct device *dev, int variant, unsigned int irq, struct regmap cells = rk808s; nr_cells = ARRAY_SIZE(rk808s); break; + case RK816_ID: + rk808->regmap_irq_chip = &rk816_irq_chip; + pre_init_reg = rk816_pre_init_reg; + nr_pre_init_regs = ARRAY_SIZE(rk816_pre_init_reg); + cells = rk816s; + nr_cells = ARRAY_SIZE(rk816s); + break; case RK818_ID: rk808->regmap_irq_chip = &rk818_irq_chip; pre_init_reg = rk818_pre_init_reg; diff --git a/drivers/mfd/rk8xx-i2c.c b/drivers/mfd/rk8xx-i2c.c index 75b5cf09d5a0..69a6b297d723 100644 --- a/drivers/mfd/rk8xx-i2c.c +++ b/drivers/mfd/rk8xx-i2c.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Rockchip RK808/RK818 Core (I2C) driver + * Rockchip RK805/RK808/RK816/RK817/RK818 Core (I2C) driver * * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd * Copyright (C) 2016 PHYTEC Messtechnik GmbH @@ -49,6 +49,35 @@ static bool rk808_is_volatile_reg(struct device *dev, unsigned int reg) return false; } +static bool rk816_is_volatile_reg(struct device *dev, unsigned int reg) +{ + /* + * Technically the ROUND_30s bit makes RTC_CTRL_REG volatile, but + * we don't use that feature. It's better to cache. + */ + + switch (reg) { + case RK808_SECONDS_REG ... RK808_WEEKS_REG: + case RK808_RTC_STATUS_REG: + case RK808_VB_MON_REG: + case RK808_THERMAL_REG: + case RK816_DCDC_EN_REG1: + case RK816_DCDC_EN_REG2: + case RK816_INT_STS_REG1: + case RK816_INT_STS_REG2: + case RK816_INT_STS_REG3: + case RK808_DEVCTRL_REG: + case RK816_SUP_STS_REG: + case RK816_GGSTS_REG: + case RK816_ZERO_CUR_ADC_REGH: + case RK816_ZERO_CUR_ADC_REGL: + case RK816_GASCNT_REG(0) ... RK816_BAT_VOL_REGL: + return true; + } + + return false; +} + static bool rk817_is_volatile_reg(struct device *dev, unsigned int reg) { /* @@ -100,6 +129,14 @@ static const struct regmap_config rk808_regmap_config = { .volatile_reg = rk808_is_volatile_reg, }; +static const struct regmap_config rk816_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = RK816_DATA_REG(18), + .cache_type = REGCACHE_MAPLE, + .volatile_reg = rk816_is_volatile_reg, +}; + static const struct regmap_config rk817_regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -123,6 +160,11 @@ static const struct rk8xx_i2c_platform_data rk809_data = { .variant = RK809_ID, }; +static const struct rk8xx_i2c_platform_data rk816_data = { + .regmap_cfg = &rk816_regmap_config, + .variant = RK816_ID, +}; + static const struct rk8xx_i2c_platform_data rk817_data = { .regmap_cfg = &rk817_regmap_config, .variant = RK817_ID, @@ -161,6 +203,7 @@ static const struct of_device_id rk8xx_i2c_of_match[] = { { .compatible = "rockchip,rk805", .data = &rk805_data }, { .compatible = "rockchip,rk808", .data = &rk808_data }, { .compatible = "rockchip,rk809", .data = &rk809_data }, + { .compatible = "rockchip,rk816", .data = &rk816_data }, { .compatible = "rockchip,rk817", .data = &rk817_data }, { .compatible = "rockchip,rk818", .data = &rk818_data }, { }, diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 78e167a92483..69cbea78b430 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -113,6 +113,148 @@ enum rk808_reg { #define RK808_INT_STS_MSK_REG2 0x4f #define RK808_IO_POL_REG 0x50 +/* RK816 */ +enum rk816_reg { + RK816_ID_DCDC1, + RK816_ID_DCDC2, + RK816_ID_DCDC3, + RK816_ID_DCDC4, + RK816_ID_LDO1, + RK816_ID_LDO2, + RK816_ID_LDO3, + RK816_ID_LDO4, + RK816_ID_LDO5, + RK816_ID_LDO6, + RK816_ID_BOOST, + RK816_ID_OTG_SW, +}; + +enum rk816_irqs { + /* INT_STS_REG1 */ + RK816_IRQ_PWRON_FALL, + RK816_IRQ_PWRON_RISE, + + /* INT_STS_REG2 */ + RK816_IRQ_VB_LOW, + RK816_IRQ_PWRON, + RK816_IRQ_PWRON_LP, + RK816_IRQ_HOTDIE, + RK816_IRQ_RTC_ALARM, + RK816_IRQ_RTC_PERIOD, + RK816_IRQ_USB_OV, + + /* INT_STS_REG3 */ + RK816_IRQ_PLUG_IN, + RK816_IRQ_PLUG_OUT, + RK816_IRQ_CHG_OK, + RK816_IRQ_CHG_TE, + RK816_IRQ_CHG_TS, + RK816_IRQ_CHG_CVTLIM, + RK816_IRQ_DISCHG_ILIM, +}; + +/* power channel registers */ +#define RK816_DCDC_EN_REG1 0x23 + +#define RK816_DCDC_EN_REG2 0x24 +#define RK816_BOOST_EN BIT(1) +#define RK816_OTG_EN BIT(2) +#define RK816_BOOST_EN_MSK BIT(5) +#define RK816_OTG_EN_MSK BIT(6) +#define RK816_BUCK_DVS_CONFIRM BIT(7) + +#define RK816_LDO_EN_REG1 0x27 + +#define RK816_LDO_EN_REG2 0x28 + +/* interrupt registers and irq definitions */ +#define RK816_INT_STS_REG1 0x49 +#define RK816_INT_STS_MSK_REG1 0x4a +#define RK816_INT_STS_PWRON_FALL BIT(5) +#define RK816_INT_STS_PWRON_RISE BIT(6) + +#define RK816_INT_STS_REG2 0x4c +#define RK816_INT_STS_MSK_REG2 0x4d +#define RK816_INT_STS_VB_LOW BIT(1) +#define RK816_INT_STS_PWRON BIT(2) +#define RK816_INT_STS_PWRON_LP BIT(3) +#define RK816_INT_STS_HOTDIE BIT(4) +#define RK816_INT_STS_RTC_ALARM BIT(5) +#define RK816_INT_STS_RTC_PERIOD BIT(6) +#define RK816_INT_STS_USB_OV BIT(7) + +#define RK816_INT_STS_REG3 0x4e +#define RK816_INT_STS_MSK_REG3 0x4f +#define RK816_INT_STS_PLUG_IN BIT(0) +#define RK816_INT_STS_PLUG_OUT BIT(1) +#define RK816_INT_STS_CHG_OK BIT(2) +#define RK816_INT_STS_CHG_TE BIT(3) +#define RK816_INT_STS_CHG_TS BIT(4) +#define RK816_INT_STS_CHG_CVTLIM BIT(6) +#define RK816_INT_STS_DISCHG_ILIM BIT(7) + +#define RK816_IRQ_STS_OFFSET(x) ((x) - RK816_INT_STS_REG1) +#define RK816_IRQ_MSK_OFFSET(x) ((x) - RK816_INT_STS_MSK_REG1) + +/* charger, boost and OTG registers */ +#define RK816_OTG_BUCK_LDO_CONFIG_REG 0x2a +#define RK816_CHRG_CONFIG_REG 0x2b +#define RK816_BOOST_ON_VESL_REG 0x54 +#define RK816_BOOST_SLP_VSEL_REG 0x55 +#define RK816_CHRG_BOOST_CONFIG_REG 0x9a +#define RK816_SUP_STS_REG 0xa0 +#define RK816_USB_CTRL_REG 0xa1 +#define RK816_CHRG_CTRL(x) (0xa3 + (x)) +#define RK816_BAT_CTRL_REG 0xa6 +#define RK816_BAT_HTS_TS_REG 0xa8 +#define RK816_BAT_LTS_TS_REG 0xa9 + +/* adc and fuel gauge registers */ +#define RK816_TS_CTRL_REG 0xac +#define RK816_ADC_CTRL_REG 0xad +#define RK816_GGCON_REG 0xb0 +#define RK816_GGSTS_REG 0xb1 +#define RK816_ZERO_CUR_ADC_REGH 0xb2 +#define RK816_ZERO_CUR_ADC_REGL 0xb3 +#define RK816_GASCNT_CAL_REG(x) (0xb7 - (x)) +#define RK816_GASCNT_REG(x) (0xbb - (x)) +#define RK816_BAT_CUR_AVG_REGH 0xbc +#define RK816_BAT_CUR_AVG_REGL 0xbd +#define RK816_TS_ADC_REGH 0xbe +#define RK816_TS_ADC_REGL 0xbf +#define RK816_USB_ADC_REGH 0xc0 +#define RK816_USB_ADC_REGL 0xc1 +#define RK816_BAT_OCV_REGH 0xc2 +#define RK816_BAT_OCV_REGL 0xc3 +#define RK816_BAT_VOL_REGH 0xc4 +#define RK816_BAT_VOL_REGL 0xc5 +#define RK816_RELAX_ENTRY_THRES_REGH 0xc6 +#define RK816_RELAX_ENTRY_THRES_REGL 0xc7 +#define RK816_RELAX_EXIT_THRES_REGH 0xc8 +#define RK816_RELAX_EXIT_THRES_REGL 0xc9 +#define RK816_RELAX_VOL1_REGH 0xca +#define RK816_RELAX_VOL1_REGL 0xcb +#define RK816_RELAX_VOL2_REGH 0xcc +#define RK816_RELAX_VOL2_REGL 0xcd +#define RK816_RELAX_CUR1_REGH 0xce +#define RK816_RELAX_CUR1_REGL 0xcf +#define RK816_RELAX_CUR2_REGH 0xd0 +#define RK816_RELAX_CUR2_REGL 0xd1 +#define RK816_CAL_OFFSET_REGH 0xd2 +#define RK816_CAL_OFFSET_REGL 0xd3 +#define RK816_NON_ACT_TIMER_CNT_REG 0xd4 +#define RK816_VCALIB0_REGH 0xd5 +#define RK816_VCALIB0_REGL 0xd6 +#define RK816_VCALIB1_REGH 0xd7 +#define RK816_VCALIB1_REGL 0xd8 +#define RK816_FCC_GASCNT_REG(x) (0xdc - (x)) +#define RK816_IOFFSET_REGH 0xdd +#define RK816_IOFFSET_REGL 0xde +#define RK816_SLEEP_CON_SAMP_CUR_REG 0xdf + +/* general purpose data registers 0xe0 ~ 0xf2 */ +#define RK816_DATA_REG(x) (0xe0 + (x)) + /* RK818 */ #define RK818_DCDC1 0 #define RK818_LDO1 4 @@ -791,6 +933,7 @@ enum rk806_dvs_mode { #define VOUT_LO_INT BIT(0) #define CLK32KOUT2_EN BIT(0) +#define TEMP105C 0x08 #define TEMP115C 0x0c #define TEMP_HOTDIE_MSK 0x0c #define SLP_SD_MSK (0x3 << 2) @@ -1191,6 +1334,7 @@ enum { RK806_ID = 0x8060, RK808_ID = 0x0000, RK809_ID = 0x8090, + RK816_ID = 0x8160, RK817_ID = 0x8170, RK818_ID = 0x8180, }; -- cgit From 84ccfaee29fe46e305244a69c4471e83629ad5d1 Mon Sep 17 00:00:00 2001 From: Nirmala Devi Mal Nadar Date: Tue, 30 Apr 2024 13:16:34 +0000 Subject: mfd: tps6594: Add register definitions for TI TPS65224 PMIC Extend TPS6594 PMIC register and field definitions to support TPS65224 power management IC. TPS65224 is software compatible to TPS6594 and can re-use many of the same definitions, new definitions are added to support additional controls available on TPS65224. Signed-off-by: Nirmala Devi Mal Nadar Signed-off-by: Bhargav Raviprakash Link: https://lore.kernel.org/r/0109018f2f265d30-a87711fa-31d9-48db-b8cb-7109d0213e2e-000000@ap-south-1.amazonses.com Signed-off-by: Lee Jones --- include/linux/mfd/tps6594.h | 347 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 335 insertions(+), 12 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h index 3f7c5e23cd4c..e754c01acc64 100644 --- a/include/linux/mfd/tps6594.h +++ b/include/linux/mfd/tps6594.h @@ -18,12 +18,13 @@ enum pmic_id { TPS6594, TPS6593, LP8764, + TPS65224, }; /* Macro to get page index from register address */ #define TPS6594_REG_TO_PAGE(reg) ((reg) >> 8) -/* Registers for page 0 of TPS6594 */ +/* Registers for page 0 */ #define TPS6594_REG_DEV_REV 0x01 #define TPS6594_REG_NVM_CODE_1 0x02 @@ -56,9 +57,6 @@ enum pmic_id { #define TPS6594_REG_GPIOX_OUT(gpio_inst) (TPS6594_REG_GPIO_OUT_1 + (gpio_inst) / 8) #define TPS6594_REG_GPIOX_IN(gpio_inst) (TPS6594_REG_GPIO_IN_1 + (gpio_inst) / 8) -#define TPS6594_REG_GPIO_IN_1 0x3f -#define TPS6594_REG_GPIO_IN_2 0x40 - #define TPS6594_REG_RAIL_SEL_1 0x41 #define TPS6594_REG_RAIL_SEL_2 0x42 #define TPS6594_REG_RAIL_SEL_3 0x43 @@ -70,13 +68,15 @@ enum pmic_id { #define TPS6594_REG_FSM_TRIG_MASK_3 0x48 #define TPS6594_REG_MASK_BUCK1_2 0x49 +#define TPS65224_REG_MASK_BUCKS 0x49 #define TPS6594_REG_MASK_BUCK3_4 0x4a #define TPS6594_REG_MASK_BUCK5 0x4b #define TPS6594_REG_MASK_LDO1_2 0x4c +#define TPS65224_REG_MASK_LDOS 0x4c #define TPS6594_REG_MASK_LDO3_4 0x4d #define TPS6594_REG_MASK_VMON 0x4e -#define TPS6594_REG_MASK_GPIO1_8_FALL 0x4f -#define TPS6594_REG_MASK_GPIO1_8_RISE 0x50 +#define TPS6594_REG_MASK_GPIO_FALL 0x4f +#define TPS6594_REG_MASK_GPIO_RISE 0x50 #define TPS6594_REG_MASK_GPIO9_11 0x51 #define TPS6594_REG_MASK_STARTUP 0x52 #define TPS6594_REG_MASK_MISC 0x53 @@ -174,6 +174,10 @@ enum pmic_id { #define TPS6594_REG_REGISTER_LOCK 0xa1 +#define TPS65224_REG_SRAM_ACCESS_1 0xa2 +#define TPS65224_REG_SRAM_ACCESS_2 0xa3 +#define TPS65224_REG_SRAM_ADDR_CTRL 0xa4 +#define TPS65224_REG_RECOV_CNT_PFSM_INCR 0xa5 #define TPS6594_REG_MANUFACTURING_VER 0xa6 #define TPS6594_REG_CUSTOMER_NVM_ID_REG 0xa7 @@ -182,6 +186,9 @@ enum pmic_id { #define TPS6594_REG_SOFT_REBOOT_REG 0xab +#define TPS65224_REG_ADC_CTRL 0xac +#define TPS65224_REG_ADC_RESULT_REG_1 0xad +#define TPS65224_REG_ADC_RESULT_REG_2 0xae #define TPS6594_REG_RTC_SECONDS 0xb5 #define TPS6594_REG_RTC_MINUTES 0xb6 #define TPS6594_REG_RTC_HOURS 0xb7 @@ -199,6 +206,7 @@ enum pmic_id { #define TPS6594_REG_RTC_CTRL_1 0xc2 #define TPS6594_REG_RTC_CTRL_2 0xc3 +#define TPS65224_REG_STARTUP_CTRL 0xc3 #define TPS6594_REG_RTC_STATUS 0xc4 #define TPS6594_REG_RTC_INTERRUPTS 0xc5 #define TPS6594_REG_RTC_COMP_LSB 0xc6 @@ -214,13 +222,17 @@ enum pmic_id { #define TPS6594_REG_PFSM_DELAY_REG_2 0xce #define TPS6594_REG_PFSM_DELAY_REG_3 0xcf #define TPS6594_REG_PFSM_DELAY_REG_4 0xd0 +#define TPS65224_REG_ADC_GAIN_COMP_REG 0xd0 +#define TPS65224_REG_CRC_CALC_CONTROL 0xef +#define TPS65224_REG_REGMAP_USER_CRC_LOW 0xf0 +#define TPS65224_REG_REGMAP_USER_CRC_HIGH 0xf1 -/* Registers for page 1 of TPS6594 */ +/* Registers for page 1 */ #define TPS6594_REG_SERIAL_IF_CONFIG 0x11a #define TPS6594_REG_I2C1_ID 0x122 #define TPS6594_REG_I2C2_ID 0x123 -/* Registers for page 4 of TPS6594 */ +/* Registers for page 4 */ #define TPS6594_REG_WD_ANSWER_REG 0x401 #define TPS6594_REG_WD_QUESTION_ANSW_CNT 0x402 #define TPS6594_REG_WD_WIN1_CFG 0x403 @@ -241,16 +253,26 @@ enum pmic_id { #define TPS6594_BIT_BUCK_PLDN BIT(5) #define TPS6594_BIT_BUCK_RV_SEL BIT(7) -/* BUCKX_CONF register field definition */ +/* TPS6594 BUCKX_CONF register field definition */ #define TPS6594_MASK_BUCK_SLEW_RATE GENMASK(2, 0) #define TPS6594_MASK_BUCK_ILIM GENMASK(5, 3) -/* BUCKX_PG_WINDOW register field definition */ +/* TPS65224 BUCKX_CONF register field definition */ +#define TPS65224_MASK_BUCK_SLEW_RATE GENMASK(1, 0) + +/* TPS6594 BUCKX_PG_WINDOW register field definition */ #define TPS6594_MASK_BUCK_OV_THR GENMASK(2, 0) #define TPS6594_MASK_BUCK_UV_THR GENMASK(5, 3) -/* BUCKX VSET */ -#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0) +/* TPS65224 BUCKX_PG_WINDOW register field definition */ +#define TPS65224_MASK_BUCK_VMON_THR GENMASK(1, 0) + +/* TPS6594 BUCKX_VOUT register field definition */ +#define TPS6594_MASK_BUCKS_VSET GENMASK(7, 0) + +/* TPS65224 BUCKX_VOUT register field definition */ +#define TPS65224_MASK_BUCK1_VSET GENMASK(7, 0) +#define TPS65224_MASK_BUCKS_VSET GENMASK(6, 0) /* LDOX_CTRL register field definition */ #define TPS6594_BIT_LDO_EN BIT(0) @@ -258,6 +280,7 @@ enum pmic_id { #define TPS6594_BIT_LDO_VMON_EN BIT(4) #define TPS6594_MASK_LDO_PLDN GENMASK(6, 5) #define TPS6594_BIT_LDO_RV_SEL BIT(7) +#define TPS65224_BIT_LDO_DISCHARGE_EN BIT(5) /* LDORTC_CTRL register field definition */ #define TPS6594_BIT_LDORTC_DIS BIT(0) @@ -271,6 +294,9 @@ enum pmic_id { #define TPS6594_MASK_LDO_OV_THR GENMASK(2, 0) #define TPS6594_MASK_LDO_UV_THR GENMASK(5, 3) +/* LDOX_PG_WINDOW register field definition */ +#define TPS65224_MASK_LDO_VMON_THR GENMASK(1, 0) + /* VCCA_VMON_CTRL register field definition */ #define TPS6594_BIT_VMON_EN BIT(0) #define TPS6594_BIT_VMON1_EN BIT(1) @@ -278,10 +304,12 @@ enum pmic_id { #define TPS6594_BIT_VMON2_EN BIT(3) #define TPS6594_BIT_VMON2_RV_SEL BIT(4) #define TPS6594_BIT_VMON_DEGLITCH_SEL BIT(5) +#define TPS65224_BIT_VMON_DEGLITCH_SEL GENMASK(7, 5) /* VCCA_PG_WINDOW register field definition */ #define TPS6594_MASK_VCCA_OV_THR GENMASK(2, 0) #define TPS6594_MASK_VCCA_UV_THR GENMASK(5, 3) +#define TPS65224_MASK_VCCA_VMON_THR GENMASK(1, 0) #define TPS6594_BIT_VCCA_PG_SET BIT(6) /* VMONX_PG_WINDOW register field definition */ @@ -289,6 +317,9 @@ enum pmic_id { #define TPS6594_MASK_VMONX_UV_THR GENMASK(5, 3) #define TPS6594_BIT_VMONX_RANGE BIT(6) +/* VMONX_PG_WINDOW register field definition */ +#define TPS65224_MASK_VMONX_THR GENMASK(1, 0) + /* GPIOX_CONF register field definition */ #define TPS6594_BIT_GPIO_DIR BIT(0) #define TPS6594_BIT_GPIO_OD BIT(1) @@ -296,6 +327,8 @@ enum pmic_id { #define TPS6594_BIT_GPIO_PU_PD_EN BIT(3) #define TPS6594_BIT_GPIO_DEGLITCH_EN BIT(4) #define TPS6594_MASK_GPIO_SEL GENMASK(7, 5) +#define TPS65224_MASK_GPIO_SEL GENMASK(6, 5) +#define TPS65224_MASK_GPIO_SEL_GPIO6 GENMASK(7, 5) /* NPWRON_CONF register field definition */ #define TPS6594_BIT_NRSTOUT_OD BIT(0) @@ -305,6 +338,12 @@ enum pmic_id { #define TPS6594_BIT_ENABLE_POL BIT(5) #define TPS6594_MASK_NPWRON_SEL GENMASK(7, 6) +/* POWER_ON_CONFIG register field definition */ +#define TPS65224_BIT_NINT_ENDRV_PU_SEL BIT(0) +#define TPS65224_BIT_NINT_ENDRV_SEL BIT(1) +#define TPS65224_BIT_EN_PB_DEGL BIT(5) +#define TPS65224_MASK_EN_PB_VSENSE_CONFIG GENMASK(7, 6) + /* GPIO_OUT_X register field definition */ #define TPS6594_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst) % 8) @@ -312,6 +351,12 @@ enum pmic_id { #define TPS6594_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst) % 8) #define TPS6594_BIT_NPWRON_IN BIT(3) +/* GPIO_OUT_X register field definition */ +#define TPS65224_BIT_GPIOX_OUT(gpio_inst) BIT((gpio_inst)) + +/* GPIO_IN_X register field definition */ +#define TPS65224_BIT_GPIOX_IN(gpio_inst) BIT((gpio_inst)) + /* RAIL_SEL_1 register field definition */ #define TPS6594_MASK_BUCK1_GRP_SEL GENMASK(1, 0) #define TPS6594_MASK_BUCK2_GRP_SEL GENMASK(3, 2) @@ -343,6 +388,9 @@ enum pmic_id { #define TPS6594_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 8) #define TPS6594_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 8 + 1) +#define TPS65224_BIT_GPIOX_FSM_MASK(gpio_inst) BIT(((gpio_inst) << 1) % 6) +#define TPS65224_BIT_GPIOX_FSM_MASK_POL(gpio_inst) BIT(((gpio_inst) << 1) % 6 + 1) + /* MASK_BUCKX register field definition */ #define TPS6594_BIT_BUCKX_OV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8) #define TPS6594_BIT_BUCKX_UV_MASK(buck_inst) BIT(((buck_inst) << 2) % 8 + 1) @@ -361,22 +409,46 @@ enum pmic_id { #define TPS6594_BIT_VMON2_OV_MASK BIT(5) #define TPS6594_BIT_VMON2_UV_MASK BIT(6) +/* MASK_BUCK Register field definition */ +#define TPS65224_BIT_BUCK1_UVOV_MASK BIT(0) +#define TPS65224_BIT_BUCK2_UVOV_MASK BIT(1) +#define TPS65224_BIT_BUCK3_UVOV_MASK BIT(2) +#define TPS65224_BIT_BUCK4_UVOV_MASK BIT(4) + +/* MASK_LDO_VMON register field definition */ +#define TPS65224_BIT_LDO1_UVOV_MASK BIT(0) +#define TPS65224_BIT_LDO2_UVOV_MASK BIT(1) +#define TPS65224_BIT_LDO3_UVOV_MASK BIT(2) +#define TPS65224_BIT_VCCA_UVOV_MASK BIT(4) +#define TPS65224_BIT_VMON1_UVOV_MASK BIT(5) +#define TPS65224_BIT_VMON2_UVOV_MASK BIT(6) + /* MASK_GPIOX register field definition */ #define TPS6594_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \ (gpio_inst) : (gpio_inst) % 8) #define TPS6594_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst) < 8 ? \ (gpio_inst) : (gpio_inst) % 8 + 3) +/* MASK_GPIOX register field definition */ +#define TPS65224_BIT_GPIOX_FALL_MASK(gpio_inst) BIT((gpio_inst)) +#define TPS65224_BIT_GPIOX_RISE_MASK(gpio_inst) BIT((gpio_inst)) /* MASK_STARTUP register field definition */ #define TPS6594_BIT_NPWRON_START_MASK BIT(0) #define TPS6594_BIT_ENABLE_MASK BIT(1) #define TPS6594_BIT_FSD_MASK BIT(4) #define TPS6594_BIT_SOFT_REBOOT_MASK BIT(5) +#define TPS65224_BIT_VSENSE_MASK BIT(0) +#define TPS65224_BIT_PB_SHORT_MASK BIT(2) /* MASK_MISC register field definition */ #define TPS6594_BIT_BIST_PASS_MASK BIT(0) #define TPS6594_BIT_EXT_CLK_MASK BIT(1) +#define TPS65224_BIT_REG_UNLOCK_MASK BIT(2) #define TPS6594_BIT_TWARN_MASK BIT(3) +#define TPS65224_BIT_PB_LONG_MASK BIT(4) +#define TPS65224_BIT_PB_FALL_MASK BIT(5) +#define TPS65224_BIT_PB_RISE_MASK BIT(6) +#define TPS65224_BIT_ADC_CONV_READY_MASK BIT(7) /* MASK_MODERATE_ERR register field definition */ #define TPS6594_BIT_BIST_FAIL_MASK BIT(1) @@ -391,6 +463,8 @@ enum pmic_id { #define TPS6594_BIT_ORD_SHUTDOWN_MASK BIT(1) #define TPS6594_BIT_MCU_PWR_ERR_MASK BIT(2) #define TPS6594_BIT_SOC_PWR_ERR_MASK BIT(3) +#define TPS65224_BIT_COMM_ERR_MASK BIT(4) +#define TPS65224_BIT_I2C2_ERR_MASK BIT(5) /* MASK_COMM_ERR register field definition */ #define TPS6594_BIT_COMM_FRM_ERR_MASK BIT(0) @@ -426,6 +500,12 @@ enum pmic_id { #define TPS6594_BIT_BUCK3_4_INT BIT(1) #define TPS6594_BIT_BUCK5_INT BIT(2) +/* INT_BUCK register field definition */ +#define TPS65224_BIT_BUCK1_UVOV_INT BIT(0) +#define TPS65224_BIT_BUCK2_UVOV_INT BIT(1) +#define TPS65224_BIT_BUCK3_UVOV_INT BIT(2) +#define TPS65224_BIT_BUCK4_UVOV_INT BIT(3) + /* INT_BUCKX register field definition */ #define TPS6594_BIT_BUCKX_OV_INT(buck_inst) BIT(((buck_inst) << 2) % 8) #define TPS6594_BIT_BUCKX_UV_INT(buck_inst) BIT(((buck_inst) << 2) % 8 + 1) @@ -437,6 +517,14 @@ enum pmic_id { #define TPS6594_BIT_LDO3_4_INT BIT(1) #define TPS6594_BIT_VCCA_INT BIT(4) +/* INT_LDO_VMON register field definition */ +#define TPS65224_BIT_LDO1_UVOV_INT BIT(0) +#define TPS65224_BIT_LDO2_UVOV_INT BIT(1) +#define TPS65224_BIT_LDO3_UVOV_INT BIT(2) +#define TPS65224_BIT_VCCA_UVOV_INT BIT(4) +#define TPS65224_BIT_VMON1_UVOV_INT BIT(5) +#define TPS65224_BIT_VMON2_UVOV_INT BIT(6) + /* INT_LDOX register field definition */ #define TPS6594_BIT_LDOX_OV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8) #define TPS6594_BIT_LDOX_UV_INT(ldo_inst) BIT(((ldo_inst) << 2) % 8 + 1) @@ -462,17 +550,32 @@ enum pmic_id { /* INT_GPIOX register field definition */ #define TPS6594_BIT_GPIOX_INT(gpio_inst) BIT(gpio_inst) +/* INT_GPIO register field definition */ +#define TPS65224_BIT_GPIO1_INT BIT(0) +#define TPS65224_BIT_GPIO2_INT BIT(1) +#define TPS65224_BIT_GPIO3_INT BIT(2) +#define TPS65224_BIT_GPIO4_INT BIT(3) +#define TPS65224_BIT_GPIO5_INT BIT(4) +#define TPS65224_BIT_GPIO6_INT BIT(5) + /* INT_STARTUP register field definition */ #define TPS6594_BIT_NPWRON_START_INT BIT(0) +#define TPS65224_BIT_VSENSE_INT BIT(0) #define TPS6594_BIT_ENABLE_INT BIT(1) #define TPS6594_BIT_RTC_INT BIT(2) +#define TPS65224_BIT_PB_SHORT_INT BIT(2) #define TPS6594_BIT_FSD_INT BIT(4) #define TPS6594_BIT_SOFT_REBOOT_INT BIT(5) /* INT_MISC register field definition */ #define TPS6594_BIT_BIST_PASS_INT BIT(0) #define TPS6594_BIT_EXT_CLK_INT BIT(1) +#define TPS65224_BIT_REG_UNLOCK_INT BIT(2) #define TPS6594_BIT_TWARN_INT BIT(3) +#define TPS65224_BIT_PB_LONG_INT BIT(4) +#define TPS65224_BIT_PB_FALL_INT BIT(5) +#define TPS65224_BIT_PB_RISE_INT BIT(6) +#define TPS65224_BIT_ADC_CONV_READY_INT BIT(7) /* INT_MODERATE_ERR register field definition */ #define TPS6594_BIT_TSD_ORD_INT BIT(0) @@ -488,6 +591,7 @@ enum pmic_id { #define TPS6594_BIT_TSD_IMM_INT BIT(0) #define TPS6594_BIT_VCCA_OVP_INT BIT(1) #define TPS6594_BIT_PFSM_ERR_INT BIT(2) +#define TPS65224_BIT_BG_XMON_INT BIT(3) /* INT_FSM_ERR register field definition */ #define TPS6594_BIT_IMM_SHUTDOWN_INT BIT(0) @@ -496,6 +600,7 @@ enum pmic_id { #define TPS6594_BIT_SOC_PWR_ERR_INT BIT(3) #define TPS6594_BIT_COMM_ERR_INT BIT(4) #define TPS6594_BIT_READBACK_ERR_INT BIT(5) +#define TPS65224_BIT_I2C2_ERR_INT BIT(5) #define TPS6594_BIT_ESM_INT BIT(6) #define TPS6594_BIT_WD_INT BIT(7) @@ -536,8 +641,18 @@ enum pmic_id { #define TPS6594_BIT_VMON2_OV_STAT BIT(5) #define TPS6594_BIT_VMON2_UV_STAT BIT(6) +/* STAT_LDO_VMON register field definition */ +#define TPS65224_BIT_LDO1_UVOV_STAT BIT(0) +#define TPS65224_BIT_LDO2_UVOV_STAT BIT(1) +#define TPS65224_BIT_LDO3_UVOV_STAT BIT(2) +#define TPS65224_BIT_VCCA_UVOV_STAT BIT(4) +#define TPS65224_BIT_VMON1_UVOV_STAT BIT(5) +#define TPS65224_BIT_VMON2_UVOV_STAT BIT(6) + /* STAT_STARTUP register field definition */ +#define TPS65224_BIT_VSENSE_STAT BIT(0) #define TPS6594_BIT_ENABLE_STAT BIT(1) +#define TPS65224_BIT_PB_LEVEL_STAT BIT(2) /* STAT_MISC register field definition */ #define TPS6594_BIT_EXT_CLK_STAT BIT(1) @@ -549,6 +664,7 @@ enum pmic_id { /* STAT_SEVERE_ERR register field definition */ #define TPS6594_BIT_TSD_IMM_STAT BIT(0) #define TPS6594_BIT_VCCA_OVP_STAT BIT(1) +#define TPS65224_BIT_BG_XMON_STAT BIT(3) /* STAT_READBACK_ERR register field definition */ #define TPS6594_BIT_EN_DRV_READBACK_STAT BIT(0) @@ -597,6 +713,8 @@ enum pmic_id { #define TPS6594_BIT_BB_CHARGER_EN BIT(0) #define TPS6594_BIT_BB_ICHR BIT(1) #define TPS6594_MASK_BB_VEOC GENMASK(3, 2) +#define TPS65224_BIT_I2C1_SPI_CRC_EN BIT(4) +#define TPS65224_BIT_I2C2_CRC_EN BIT(5) #define TPS6594_BB_EOC_RDY BIT(7) /* ENABLE_DRV_REG register field definition */ @@ -617,6 +735,7 @@ enum pmic_id { #define TPS6594_BIT_NRSTOUT_SOC_IN BIT(2) #define TPS6594_BIT_FORCE_EN_DRV_LOW BIT(3) #define TPS6594_BIT_SPMI_LPM_EN BIT(4) +#define TPS65224_BIT_TSD_DISABLE BIT(5) /* RECOV_CNT_REG_1 register field definition */ #define TPS6594_MASK_RECOV_CNT GENMASK(3, 0) @@ -671,15 +790,27 @@ enum pmic_id { /* ESM_SOC_START_REG register field definition */ #define TPS6594_BIT_ESM_SOC_START BIT(0) +/* ESM_MCU_START_REG register field definition */ +#define TPS65224_BIT_ESM_MCU_START BIT(0) + /* ESM_SOC_MODE_CFG register field definition */ #define TPS6594_MASK_ESM_SOC_ERR_CNT_TH GENMASK(3, 0) #define TPS6594_BIT_ESM_SOC_ENDRV BIT(5) #define TPS6594_BIT_ESM_SOC_EN BIT(6) #define TPS6594_BIT_ESM_SOC_MODE BIT(7) +/* ESM_MCU_MODE_CFG register field definition */ +#define TPS65224_MASK_ESM_MCU_ERR_CNT_TH GENMASK(3, 0) +#define TPS65224_BIT_ESM_MCU_ENDRV BIT(5) +#define TPS65224_BIT_ESM_MCU_EN BIT(6) +#define TPS65224_BIT_ESM_MCU_MODE BIT(7) + /* ESM_SOC_ERR_CNT_REG register field definition */ #define TPS6594_MASK_ESM_SOC_ERR_CNT GENMASK(4, 0) +/* ESM_MCU_ERR_CNT_REG register field definition */ +#define TPS6594_MASK_ESM_MCU_ERR_CNT GENMASK(4, 0) + /* REGISTER_LOCK register field definition */ #define TPS6594_BIT_REGISTER_LOCK_STATUS BIT(0) @@ -687,6 +818,29 @@ enum pmic_id { #define TPS6594_MASK_VMON1_SLEW_RATE GENMASK(2, 0) #define TPS6594_MASK_VMON2_SLEW_RATE GENMASK(5, 3) +/* SRAM_ACCESS_1 Register field definition */ +#define TPS65224_MASk_SRAM_UNLOCK_SEQ GENMASK(7, 0) + +/* SRAM_ACCESS_2 Register field definition */ +#define TPS65224_BIT_SRAM_WRITE_MODE BIT(0) +#define TPS65224_BIT_OTP_PROG_USER BIT(1) +#define TPS65224_BIT_OTP_PROG_PFSM BIT(2) +#define TPS65224_BIT_OTP_PROG_STATUS BIT(3) +#define TPS65224_BIT_SRAM_UNLOCKED BIT(6) +#define TPS65224_USER_PROG_ALLOWED BIT(7) + +/* SRAM_ADDR_CTRL Register field definition */ +#define TPS65224_MASk_SRAM_SEL GENMASK(1, 0) + +/* RECOV_CNT_PFSM_INCR Register field definition */ +#define TPS65224_BIT_INCREMENT_RECOV_CNT BIT(0) + +/* MANUFACTURING_VER Register field definition */ +#define TPS65224_MASK_SILICON_REV GENMASK(7, 0) + +/* CUSTOMER_NVM_ID_REG Register field definition */ +#define TPS65224_MASK_CUSTOMER_NVM_ID GENMASK(7, 0) + /* SOFT_REBOOT_REG register field definition */ #define TPS6594_BIT_SOFT_REBOOT BIT(0) @@ -755,14 +909,83 @@ enum pmic_id { #define TPS6594_BIT_I2C2_CRC_EN BIT(2) #define TPS6594_MASK_T_CRC GENMASK(7, 3) +/* ADC_CTRL Register field definition */ +#define TPS65224_BIT_ADC_START BIT(0) +#define TPS65224_BIT_ADC_CONT_CONV BIT(1) +#define TPS65224_BIT_ADC_THERMAL_SEL BIT(2) +#define TPS65224_BIT_ADC_RDIV_EN BIT(3) +#define TPS65224_BIT_ADC_STATUS BIT(7) + +/* ADC_RESULT_REG_1 Register field definition */ +#define TPS65224_MASK_ADC_RESULT_11_4 GENMASK(7, 0) + +/* ADC_RESULT_REG_2 Register field definition */ +#define TPS65224_MASK_ADC_RESULT_3_0 GENMASK(7, 4) + +/* STARTUP_CTRL Register field definition */ +#define TPS65224_MASK_STARTUP_DEST GENMASK(6, 5) +#define TPS65224_BIT_FIRST_STARTUP_DONE BIT(7) + +/* SCRATCH_PAD_REG_1 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_1 GENMASK(7, 0) + +/* SCRATCH_PAD_REG_2 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_2 GENMASK(7, 0) + +/* SCRATCH_PAD_REG_3 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_3 GENMASK(7, 0) + +/* SCRATCH_PAD_REG_4 Register field definition */ +#define TPS6594_MASK_SCRATCH_PAD_4 GENMASK(7, 0) + +/* PFSM_DELAY_REG_1 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY1 GENMASK(7, 0) + +/* PFSM_DELAY_REG_2 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY2 GENMASK(7, 0) + +/* PFSM_DELAY_REG_3 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY3 GENMASK(7, 0) + +/* PFSM_DELAY_REG_4 Register field definition */ +#define TPS6594_MASK_PFSM_DELAY4 GENMASK(7, 0) + +/* CRC_CALC_CONTROL Register field definition */ +#define TPS65224_BIT_RUN_CRC_BIST BIT(0) +#define TPS65224_BIT_RUN_CRC_UPDATE BIT(1) + +/* ADC_GAIN_COMP_REG Register field definition */ +#define TPS65224_MASK_ADC_GAIN_COMP GENMASK(7, 0) + +/* REGMAP_USER_CRC_LOW Register field definition */ +#define TPS65224_MASK_REGMAP_USER_CRC16_LOW GENMASK(7, 0) + +/* REGMAP_USER_CRC_HIGH Register field definition */ +#define TPS65224_MASK_REGMAP_USER_CRC16_HIGH GENMASK(7, 0) + +/* WD_ANSWER_REG Register field definition */ +#define TPS6594_MASK_WD_ANSWER GENMASK(7, 0) + /* WD_QUESTION_ANSW_CNT register field definition */ #define TPS6594_MASK_WD_QUESTION GENMASK(3, 0) #define TPS6594_MASK_WD_ANSW_CNT GENMASK(5, 4) +#define TPS65224_BIT_INT_TOP_STATUS BIT(7) + +/* WD WIN1_CFG register field definition */ +#define TPS6594_MASK_WD_WIN1_CFG GENMASK(6, 0) + +/* WD WIN2_CFG register field definition */ +#define TPS6594_MASK_WD_WIN2_CFG GENMASK(6, 0) + +/* WD LongWin register field definition */ +#define TPS6594_MASK_WD_LONGWIN_CFG GENMASK(7, 0) /* WD_MODE_REG register field definition */ #define TPS6594_BIT_WD_RETURN_LONGWIN BIT(0) #define TPS6594_BIT_WD_MODE_SELECT BIT(1) #define TPS6594_BIT_WD_PWRHOLD BIT(2) +#define TPS65224_BIT_WD_ENDRV_SEL BIT(6) +#define TPS65224_BIT_WD_CNT_SEL BIT(7) /* WD_QA_CFG register field definition */ #define TPS6594_MASK_WD_QUESTION_SEED GENMASK(3, 0) @@ -993,6 +1216,106 @@ enum tps6594_irqs { #define TPS6594_IRQ_NAME_ALARM "alarm" #define TPS6594_IRQ_NAME_POWERUP "powerup" +/* IRQs */ +enum tps65224_irqs { + /* INT_BUCK register */ + TPS65224_IRQ_BUCK1_UVOV, + TPS65224_IRQ_BUCK2_UVOV, + TPS65224_IRQ_BUCK3_UVOV, + TPS65224_IRQ_BUCK4_UVOV, + /* INT_LDO_VMON register */ + TPS65224_IRQ_LDO1_UVOV, + TPS65224_IRQ_LDO2_UVOV, + TPS65224_IRQ_LDO3_UVOV, + TPS65224_IRQ_VCCA_UVOV, + TPS65224_IRQ_VMON1_UVOV, + TPS65224_IRQ_VMON2_UVOV, + /* INT_GPIO register */ + TPS65224_IRQ_GPIO1, + TPS65224_IRQ_GPIO2, + TPS65224_IRQ_GPIO3, + TPS65224_IRQ_GPIO4, + TPS65224_IRQ_GPIO5, + TPS65224_IRQ_GPIO6, + /* INT_STARTUP register */ + TPS65224_IRQ_VSENSE, + TPS65224_IRQ_ENABLE, + TPS65224_IRQ_PB_SHORT, + TPS65224_IRQ_FSD, + TPS65224_IRQ_SOFT_REBOOT, + /* INT_MISC register */ + TPS65224_IRQ_BIST_PASS, + TPS65224_IRQ_EXT_CLK, + TPS65224_IRQ_REG_UNLOCK, + TPS65224_IRQ_TWARN, + TPS65224_IRQ_PB_LONG, + TPS65224_IRQ_PB_FALL, + TPS65224_IRQ_PB_RISE, + TPS65224_IRQ_ADC_CONV_READY, + /* INT_MODERATE_ERR register */ + TPS65224_IRQ_TSD_ORD, + TPS65224_IRQ_BIST_FAIL, + TPS65224_IRQ_REG_CRC_ERR, + TPS65224_IRQ_RECOV_CNT, + /* INT_SEVERE_ERR register */ + TPS65224_IRQ_TSD_IMM, + TPS65224_IRQ_VCCA_OVP, + TPS65224_IRQ_PFSM_ERR, + TPS65224_IRQ_BG_XMON, + /* INT_FSM_ERR register */ + TPS65224_IRQ_IMM_SHUTDOWN, + TPS65224_IRQ_ORD_SHUTDOWN, + TPS65224_IRQ_MCU_PWR_ERR, + TPS65224_IRQ_SOC_PWR_ERR, + TPS65224_IRQ_COMM_ERR, + TPS65224_IRQ_I2C2_ERR, +}; + +#define TPS65224_IRQ_NAME_BUCK1_UVOV "buck1_uvov" +#define TPS65224_IRQ_NAME_BUCK2_UVOV "buck2_uvov" +#define TPS65224_IRQ_NAME_BUCK3_UVOV "buck3_uvov" +#define TPS65224_IRQ_NAME_BUCK4_UVOV "buck4_uvov" +#define TPS65224_IRQ_NAME_LDO1_UVOV "ldo1_uvov" +#define TPS65224_IRQ_NAME_LDO2_UVOV "ldo2_uvov" +#define TPS65224_IRQ_NAME_LDO3_UVOV "ldo3_uvov" +#define TPS65224_IRQ_NAME_VCCA_UVOV "vcca_uvov" +#define TPS65224_IRQ_NAME_VMON1_UVOV "vmon1_uvov" +#define TPS65224_IRQ_NAME_VMON2_UVOV "vmon2_uvov" +#define TPS65224_IRQ_NAME_GPIO1 "gpio1" +#define TPS65224_IRQ_NAME_GPIO2 "gpio2" +#define TPS65224_IRQ_NAME_GPIO3 "gpio3" +#define TPS65224_IRQ_NAME_GPIO4 "gpio4" +#define TPS65224_IRQ_NAME_GPIO5 "gpio5" +#define TPS65224_IRQ_NAME_GPIO6 "gpio6" +#define TPS65224_IRQ_NAME_VSENSE "vsense" +#define TPS65224_IRQ_NAME_ENABLE "enable" +#define TPS65224_IRQ_NAME_PB_SHORT "pb_short" +#define TPS65224_IRQ_NAME_FSD "fsd" +#define TPS65224_IRQ_NAME_SOFT_REBOOT "soft_reboot" +#define TPS65224_IRQ_NAME_BIST_PASS "bist_pass" +#define TPS65224_IRQ_NAME_EXT_CLK "ext_clk" +#define TPS65224_IRQ_NAME_REG_UNLOCK "reg_unlock" +#define TPS65224_IRQ_NAME_TWARN "twarn" +#define TPS65224_IRQ_NAME_PB_LONG "pb_long" +#define TPS65224_IRQ_NAME_PB_FALL "pb_fall" +#define TPS65224_IRQ_NAME_PB_RISE "pb_rise" +#define TPS65224_IRQ_NAME_ADC_CONV_READY "adc_conv_ready" +#define TPS65224_IRQ_NAME_TSD_ORD "tsd_ord" +#define TPS65224_IRQ_NAME_BIST_FAIL "bist_fail" +#define TPS65224_IRQ_NAME_REG_CRC_ERR "reg_crc_err" +#define TPS65224_IRQ_NAME_RECOV_CNT "recov_cnt" +#define TPS65224_IRQ_NAME_TSD_IMM "tsd_imm" +#define TPS65224_IRQ_NAME_VCCA_OVP "vcca_ovp" +#define TPS65224_IRQ_NAME_PFSM_ERR "pfsm_err" +#define TPS65224_IRQ_NAME_BG_XMON "bg_xmon" +#define TPS65224_IRQ_NAME_IMM_SHUTDOWN "imm_shutdown" +#define TPS65224_IRQ_NAME_ORD_SHUTDOWN "ord_shutdown" +#define TPS65224_IRQ_NAME_MCU_PWR_ERR "mcu_pwr_err" +#define TPS65224_IRQ_NAME_SOC_PWR_ERR "soc_pwr_err" +#define TPS65224_IRQ_NAME_COMM_ERR "comm_err" +#define TPS65224_IRQ_NAME_I2C2_ERR "i2c2_err" +#define TPS65224_IRQ_NAME_POWERUP "powerup" + /** * struct tps6594 - device private data structure * -- cgit From 436250638b6d8e6cf8dceed82cdbbfc90ce3a775 Mon Sep 17 00:00:00 2001 From: Bhargav Raviprakash Date: Tue, 30 Apr 2024 13:16:43 +0000 Subject: mfd: tps6594: Use volatile_table instead of volatile_reg In regmap_config use volatile_table instead of volatile_reg. This change makes it easier to add support for TPS65224 PMIC. Signed-off-by: Bhargav Raviprakash Acked-by: Julien Panis Link: https://lore.kernel.org/r/0109018f2f267f6e-3121fa42-4816-45f7-a96d-0d6b4678da5a-000000@ap-south-1.amazonses.com Signed-off-by: Lee Jones --- drivers/mfd/tps6594-core.c | 16 ++++++++++------ drivers/mfd/tps6594-i2c.c | 2 +- drivers/mfd/tps6594-spi.c | 2 +- include/linux/mfd/tps6594.h | 4 +++- 4 files changed, 15 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/mfd/tps6594-core.c b/drivers/mfd/tps6594-core.c index 783ee59901e8..089ab8cc8664 100644 --- a/drivers/mfd/tps6594-core.c +++ b/drivers/mfd/tps6594-core.c @@ -319,12 +319,16 @@ static struct regmap_irq_chip tps6594_irq_chip = { .handle_post_irq = tps6594_handle_post_irq, }; -bool tps6594_is_volatile_reg(struct device *dev, unsigned int reg) -{ - return (reg >= TPS6594_REG_INT_TOP && reg <= TPS6594_REG_STAT_READBACK_ERR) || - reg == TPS6594_REG_RTC_STATUS; -} -EXPORT_SYMBOL_GPL(tps6594_is_volatile_reg); +static const struct regmap_range tps6594_volatile_ranges[] = { + regmap_reg_range(TPS6594_REG_INT_TOP, TPS6594_REG_STAT_READBACK_ERR), + regmap_reg_range(TPS6594_REG_RTC_STATUS, TPS6594_REG_RTC_STATUS), +}; + +const struct regmap_access_table tps6594_volatile_table = { + .yes_ranges = tps6594_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(tps6594_volatile_ranges), +}; +EXPORT_SYMBOL_GPL(tps6594_volatile_table); static int tps6594_check_crc_mode(struct tps6594 *tps, bool primary_pmic) { diff --git a/drivers/mfd/tps6594-i2c.c b/drivers/mfd/tps6594-i2c.c index 899c88c0fe77..c125b474ba8a 100644 --- a/drivers/mfd/tps6594-i2c.c +++ b/drivers/mfd/tps6594-i2c.c @@ -187,7 +187,7 @@ static const struct regmap_config tps6594_i2c_regmap_config = { .reg_bits = 16, .val_bits = 8, .max_register = TPS6594_REG_DWD_FAIL_CNT_REG, - .volatile_reg = tps6594_is_volatile_reg, + .volatile_table = &tps6594_volatile_table, .read = tps6594_i2c_read, .write = tps6594_i2c_write, }; diff --git a/drivers/mfd/tps6594-spi.c b/drivers/mfd/tps6594-spi.c index 24b72847e3f5..5afb1736f0ae 100644 --- a/drivers/mfd/tps6594-spi.c +++ b/drivers/mfd/tps6594-spi.c @@ -70,7 +70,7 @@ static const struct regmap_config tps6594_spi_regmap_config = { .reg_bits = 16, .val_bits = 8, .max_register = TPS6594_REG_DWD_FAIL_CNT_REG, - .volatile_reg = tps6594_is_volatile_reg, + .volatile_table = &tps6594_volatile_table, .reg_read = tps6594_spi_reg_read, .reg_write = tps6594_spi_reg_write, .use_single_read = true, diff --git a/include/linux/mfd/tps6594.h b/include/linux/mfd/tps6594.h index e754c01acc64..16543fd4d83e 100644 --- a/include/linux/mfd/tps6594.h +++ b/include/linux/mfd/tps6594.h @@ -1337,7 +1337,9 @@ struct tps6594 { struct regmap_irq_chip_data *irq_data; }; -bool tps6594_is_volatile_reg(struct device *dev, unsigned int reg); +extern const struct regmap_access_table tps6594_volatile_table; +extern const struct regmap_access_table tps65224_volatile_table; + int tps6594_device_init(struct tps6594 *tps, bool enable_crc); #endif /* __LINUX_MFD_TPS6594_H */ -- cgit From a2e25c8165f93ca8a2d54bf230e1bb7e0319b46c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 24 Apr 2024 08:33:27 +0200 Subject: backlight: lcd: Constify lcd_ops 'struct lcd_ops' passed in lcd_device_register() is not modified by core backlight code, so it can be made const for code safety. This allows drivers to also define the structure as const. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Daniel Thompson Link: https://lore.kernel.org/r/20240424-video-backlight-lcd-ops-v2-1-1aaa82b07bc6@kernel.org Signed-off-by: Lee Jones --- drivers/video/backlight/lcd.c | 4 ++-- include/linux/lcd.h | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/linux') diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 77c5cb2a44e2..ba3e37b5b584 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c @@ -188,7 +188,7 @@ ATTRIBUTE_GROUPS(lcd_device); * or a pointer to the newly allocated device. */ struct lcd_device *lcd_device_register(const char *name, struct device *parent, - void *devdata, struct lcd_ops *ops) + void *devdata, const struct lcd_ops *ops) { struct lcd_device *new_ld; int rc; @@ -276,7 +276,7 @@ static int devm_lcd_device_match(struct device *dev, void *res, void *data) */ struct lcd_device *devm_lcd_device_register(struct device *dev, const char *name, struct device *parent, - void *devdata, struct lcd_ops *ops) + void *devdata, const struct lcd_ops *ops) { struct lcd_device **ptr, *lcd; diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 238fb1dfed98..68703a51dc53 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -61,7 +61,7 @@ struct lcd_device { points to something in the body of that driver, it is also invalid. */ struct mutex ops_lock; /* If this is NULL, the backing module is unloaded */ - struct lcd_ops *ops; + const struct lcd_ops *ops; /* Serialise access to set_power method */ struct mutex update_lock; /* The framebuffer notifier block */ @@ -102,10 +102,10 @@ static inline void lcd_set_power(struct lcd_device *ld, int power) } extern struct lcd_device *lcd_device_register(const char *name, - struct device *parent, void *devdata, struct lcd_ops *ops); + struct device *parent, void *devdata, const struct lcd_ops *ops); extern struct lcd_device *devm_lcd_device_register(struct device *dev, const char *name, struct device *parent, - void *devdata, struct lcd_ops *ops); + void *devdata, const struct lcd_ops *ops); extern void lcd_device_unregister(struct lcd_device *ld); extern void devm_lcd_device_unregister(struct device *dev, struct lcd_device *ld); -- cgit From bf8367b00c33c64a9391c262bb2e11d274c9f2a4 Mon Sep 17 00:00:00 2001 From: Jean-Baptiste Maneyrol Date: Fri, 26 Apr 2024 09:48:35 +0000 Subject: iio: invensense: fix timestamp glitches when switching frequency When a sensor is running and there is a FIFO frequency change due to another sensor turned on/off, there are glitches on timestamp. Fix that by using only interrupt timestamp when there is the corresponding sensor data in the FIFO. Delete FIFO period handling and simplify internal functions. Update integration inside inv_mpu6050 and inv_icm42600 drivers. Fixes: 0ecc363ccea7 ("iio: make invensense timestamp module generic") Cc: Stable@vger.kernel.org Signed-off-by: Jean-Baptiste Maneyrol Link: https://lore.kernel.org/r/20240426094835.138389-1-inv.git-commit@tdk.com Signed-off-by: Jonathan Cameron --- .../iio/common/inv_sensors/inv_sensors_timestamp.c | 24 ++++++++++------------ drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c | 20 ++++++++---------- drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c | 2 +- include/linux/iio/common/inv_sensors_timestamp.h | 3 +-- 4 files changed, 21 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c index 4b8ec16240b5..fa205f17bd90 100644 --- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c +++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c @@ -70,13 +70,13 @@ int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts, } EXPORT_SYMBOL_NS_GPL(inv_sensors_timestamp_update_odr, IIO_INV_SENSORS_TIMESTAMP); -static bool inv_validate_period(struct inv_sensors_timestamp *ts, uint32_t period, uint32_t mult) +static bool inv_validate_period(struct inv_sensors_timestamp *ts, uint32_t period) { uint32_t period_min, period_max; /* check that period is acceptable */ - period_min = ts->min_period * mult; - period_max = ts->max_period * mult; + period_min = ts->min_period * ts->mult; + period_max = ts->max_period * ts->mult; if (period > period_min && period < period_max) return true; else @@ -84,15 +84,15 @@ static bool inv_validate_period(struct inv_sensors_timestamp *ts, uint32_t perio } static bool inv_update_chip_period(struct inv_sensors_timestamp *ts, - uint32_t mult, uint32_t period) + uint32_t period) { uint32_t new_chip_period; - if (!inv_validate_period(ts, period, mult)) + if (!inv_validate_period(ts, period)) return false; /* update chip internal period estimation */ - new_chip_period = period / mult; + new_chip_period = period / ts->mult; inv_update_acc(&ts->chip_period, new_chip_period); ts->period = ts->mult * ts->chip_period.val; @@ -125,16 +125,14 @@ static void inv_align_timestamp_it(struct inv_sensors_timestamp *ts) } void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts, - uint32_t fifo_period, size_t fifo_nb, - size_t sensor_nb, int64_t timestamp) + size_t sample_nb, int64_t timestamp) { struct inv_sensors_timestamp_interval *it; int64_t delta, interval; - const uint32_t fifo_mult = fifo_period / ts->chip.clock_period; uint32_t period; bool valid = false; - if (fifo_nb == 0) + if (sample_nb == 0) return; /* update interrupt timestamp and compute chip and sensor periods */ @@ -144,14 +142,14 @@ void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts, delta = it->up - it->lo; if (it->lo != 0) { /* compute period: delta time divided by number of samples */ - period = div_s64(delta, fifo_nb); - valid = inv_update_chip_period(ts, fifo_mult, period); + period = div_s64(delta, sample_nb); + valid = inv_update_chip_period(ts, period); } /* no previous data, compute theoritical value from interrupt */ if (ts->timestamp == 0) { /* elapsed time: sensor period * sensor samples number */ - interval = (int64_t)ts->period * (int64_t)sensor_nb; + interval = (int64_t)ts->period * (int64_t)sample_nb; ts->timestamp = it->up - interval; return; } diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c index cfb4a41ab7c1..63b85ec88c13 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -512,20 +512,20 @@ int inv_icm42600_buffer_fifo_parse(struct inv_icm42600_state *st) return 0; /* handle gyroscope timestamp and FIFO data parsing */ - ts = &gyro_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.period, st->fifo.nb.total, - st->fifo.nb.gyro, st->timestamp.gyro); if (st->fifo.nb.gyro > 0) { + ts = &gyro_st->ts; + inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro, + st->timestamp.gyro); ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro); if (ret) return ret; } /* handle accelerometer timestamp and FIFO data parsing */ - ts = &accel_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.period, st->fifo.nb.total, - st->fifo.nb.accel, st->timestamp.accel); if (st->fifo.nb.accel > 0) { + ts = &accel_st->ts; + inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel, + st->timestamp.accel); ret = inv_icm42600_accel_parse_fifo(st->indio_accel); if (ret) return ret; @@ -555,9 +555,7 @@ int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st, if (st->fifo.nb.gyro > 0) { ts = &gyro_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.period, - st->fifo.nb.total, st->fifo.nb.gyro, - gyro_ts); + inv_sensors_timestamp_interrupt(ts, st->fifo.nb.gyro, gyro_ts); ret = inv_icm42600_gyro_parse_fifo(st->indio_gyro); if (ret) return ret; @@ -565,9 +563,7 @@ int inv_icm42600_buffer_hwfifo_flush(struct inv_icm42600_state *st, if (st->fifo.nb.accel > 0) { ts = &accel_st->ts; - inv_sensors_timestamp_interrupt(ts, st->fifo.period, - st->fifo.nb.total, st->fifo.nb.accel, - accel_ts); + inv_sensors_timestamp_interrupt(ts, st->fifo.nb.accel, accel_ts); ret = inv_icm42600_accel_parse_fifo(st->indio_accel); if (ret) return ret; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c index 86465226f7e1..0dc0f22a5582 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_ring.c @@ -100,7 +100,7 @@ irqreturn_t inv_mpu6050_read_fifo(int irq, void *p) goto end_session; /* Each FIFO data contains all sensors, so same number for FIFO and sensor data */ fifo_period = NSEC_PER_SEC / INV_MPU6050_DIVIDER_TO_FIFO_RATE(st->chip_config.divider); - inv_sensors_timestamp_interrupt(&st->timestamp, fifo_period, nb, nb, pf->timestamp); + inv_sensors_timestamp_interrupt(&st->timestamp, nb, pf->timestamp); inv_sensors_timestamp_apply_odr(&st->timestamp, fifo_period, nb, 0); /* clear internal data buffer for avoiding kernel data leak */ diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h index a47d304d1ba7..8d506f1e9df2 100644 --- a/include/linux/iio/common/inv_sensors_timestamp.h +++ b/include/linux/iio/common/inv_sensors_timestamp.h @@ -71,8 +71,7 @@ int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts, uint32_t period, bool fifo); void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts, - uint32_t fifo_period, size_t fifo_nb, - size_t sensor_nb, int64_t timestamp); + size_t sample_nb, int64_t timestamp); static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts) { -- cgit From c56436ef17520c5fb0f9c2fc47aa961a7946895f Mon Sep 17 00:00:00 2001 From: Oreoluwa Babatunde Date: Thu, 2 May 2024 12:24:03 -0700 Subject: of: reserved_mem: Remove the use of phandle from the reserved_mem APIs The __find_rmem() function is the only place that references the phandle field of the reserved_mem struct. __find_rmem() is used to match a device_node object to its corresponding entry in the reserved_mem array using its phandle value. But, there is already a function called of_reserved_mem_lookup() which carries out the same action using the name of the node. Using the of_reserved_mem_lookup() function is more reliable because every node is guaranteed to have a name, but not all nodes will have a phandle. Nodes are only assigned a phandle if they are explicitly defined in the DT using "phandle = ", or if they are referenced by another node in the DT. Hence, If the phandle field is empty, then __find_rmem() will return a false negative. Hence, delete the __find_rmem() function and switch to using the of_reserved_mem_lookup() function to find the corresponding entry of a device_node in the reserved_mem array. Since the phandle field of the reserved_mem struct is now unused, delete that as well. Signed-off-by: Oreoluwa Babatunde Link: https://lore.kernel.org/r/20240502192403.3307277-1-quic_obabatun@quicinc.com Signed-off-by: Rob Herring (Arm) --- drivers/of/of_reserved_mem.c | 22 +--------------------- include/linux/of_reserved_mem.h | 1 - 2 files changed, 1 insertion(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 8236ecae2953..46e1c3fbc769 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -437,17 +437,10 @@ void __init fdt_init_reserved_mem(void) for (i = 0; i < reserved_mem_count; i++) { struct reserved_mem *rmem = &reserved_mem[i]; unsigned long node = rmem->fdt_node; - int len; - const __be32 *prop; int err = 0; bool nomap; nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; - prop = of_get_flat_dt_prop(node, "phandle", &len); - if (!prop) - prop = of_get_flat_dt_prop(node, "linux,phandle", &len); - if (prop) - rmem->phandle = of_read_number(prop, len/4); if (rmem->size == 0) err = __reserved_mem_alloc_size(node, rmem->name, @@ -477,19 +470,6 @@ void __init fdt_init_reserved_mem(void) } } -static inline struct reserved_mem *__find_rmem(struct device_node *node) -{ - unsigned int i; - - if (!node->phandle) - return NULL; - - for (i = 0; i < reserved_mem_count; i++) - if (reserved_mem[i].phandle == node->phandle) - return &reserved_mem[i]; - return NULL; -} - struct rmem_assigned_device { struct device *dev; struct reserved_mem *rmem; @@ -534,7 +514,7 @@ int of_reserved_mem_device_init_by_idx(struct device *dev, return 0; } - rmem = __find_rmem(target); + rmem = of_reserved_mem_lookup(target); of_node_put(target); if (!rmem || !rmem->ops || !rmem->ops->device_init) diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 4de2a24cadc9..e338282da652 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h @@ -11,7 +11,6 @@ struct reserved_mem_ops; struct reserved_mem { const char *name; unsigned long fdt_node; - unsigned long phandle; const struct reserved_mem_ops *ops; phys_addr_t base; phys_addr_t size; -- cgit From 140ce28dd3bee8e53acc27f123ae474d69ef66f0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 2 May 2024 15:00:32 +0200 Subject: block: add a disk_has_partscan helper Add a helper to check if partition scanning is enabled instead of open coding the check in a few places. This now always checks for the hidden flag even if all but one of the callers are never reachable for hidden gendisks. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20240502130033.1958492-2-hch@lst.de Signed-off-by: Jens Axboe --- block/genhd.c | 7 ++----- block/partitions/core.c | 5 +---- include/linux/blkdev.h | 13 +++++++++++++ 3 files changed, 16 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/block/genhd.c b/block/genhd.c index eb893df56d51..4b85963d09db 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -345,9 +345,7 @@ int disk_scan_partitions(struct gendisk *disk, blk_mode_t mode) struct file *file; int ret = 0; - if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) - return -EINVAL; - if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) + if (!disk_has_partscan(disk)) return -EINVAL; if (disk->open_partitions) return -EBUSY; @@ -503,8 +501,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, goto out_unregister_bdi; /* Make sure the first partition scan will be proceed */ - if (get_capacity(disk) && !(disk->flags & GENHD_FL_NO_PART) && - !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) + if (get_capacity(disk) && disk_has_partscan(disk)) set_bit(GD_NEED_PART_SCAN, &disk->state); bdev_add(disk->part0, ddev->devt); diff --git a/block/partitions/core.c b/block/partitions/core.c index b11e88c82c8c..37b5f92d07fe 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -573,10 +573,7 @@ static int blk_add_partitions(struct gendisk *disk) struct parsed_partitions *state; int ret = -EAGAIN, p; - if (disk->flags & GENHD_FL_NO_PART) - return 0; - - if (test_bit(GD_SUPPRESS_PART_SCAN, &disk->state)) + if (!disk_has_partscan(disk)) return 0; state = check_partition(disk); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 040a22e0eda0..3b18a40a1fc1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -230,6 +230,19 @@ static inline unsigned int disk_openers(struct gendisk *disk) return atomic_read(&disk->part0->bd_openers); } +/** + * disk_has_partscan - return %true if partition scanning is enabled on a disk + * @disk: disk to check + * + * Returns %true if partitions scanning is enabled for @disk, or %false if + * partition scanning is disabled either permanently or temporarily. + */ +static inline bool disk_has_partscan(struct gendisk *disk) +{ + return !(disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN)) && + !test_bit(GD_SUPPRESS_PART_SCAN, &disk->state); +} + /* * The gendisk is refcounted by the part0 block_device, and the bd_device * therein is also used for device model presentation in sysfs. -- cgit From 9c313ccdfc079f71f16c9b3d3a6c6d60996b9367 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Sun, 21 Apr 2024 00:38:37 +0200 Subject: bitops: Change function return types from long to int Change the return types of bitops functions (ffs, fls, and fns) from long to int. The expected return values are in the range [0, 64], for which int is sufficient. Additionally, int aligns well with the return types of the corresponding __builtin_* functions, potentially reducing overall type conversions. Many of the existing bitops functions already return an int and don't need to be changed. The bitops functions in arch/ should be considered separately. Adjust some return variables to match the function return types. With GCC 13 and defconfig, these changes reduced the size of a test kernel image by 5,432 bytes on arm64 and by 248 bytes on riscv; there were no changes in size on x86_64, powerpc, or m68k. Signed-off-by: Thorsten Blum Signed-off-by: Arnd Bergmann --- include/asm-generic/bitops/__ffs.h | 4 ++-- include/asm-generic/bitops/__fls.h | 4 ++-- include/asm-generic/bitops/builtin-__ffs.h | 2 +- include/asm-generic/bitops/builtin-__fls.h | 2 +- include/linux/bitops.h | 6 +++--- tools/include/asm-generic/bitops/__ffs.h | 4 ++-- tools/include/asm-generic/bitops/__fls.h | 4 ++-- tools/include/linux/bitops.h | 2 +- 8 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h index 446fea6dda78..2d08c750c8a7 100644 --- a/include/asm-generic/bitops/__ffs.h +++ b/include/asm-generic/bitops/__ffs.h @@ -10,9 +10,9 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned long generic___ffs(unsigned long word) +static __always_inline unsigned int generic___ffs(unsigned long word) { - int num = 0; + unsigned int num = 0; #if BITS_PER_LONG == 64 if ((word & 0xffffffff) == 0) { diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h index 54ccccf96e21..e974ec932ec1 100644 --- a/include/asm-generic/bitops/__fls.h +++ b/include/asm-generic/bitops/__fls.h @@ -10,9 +10,9 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long generic___fls(unsigned long word) +static __always_inline unsigned int generic___fls(unsigned long word) { - int num = BITS_PER_LONG - 1; + unsigned int num = BITS_PER_LONG - 1; #if BITS_PER_LONG == 64 if (!(word & (~0ul << 32))) { diff --git a/include/asm-generic/bitops/builtin-__ffs.h b/include/asm-generic/bitops/builtin-__ffs.h index 87024da44d10..cf4b3d33bf96 100644 --- a/include/asm-generic/bitops/builtin-__ffs.h +++ b/include/asm-generic/bitops/builtin-__ffs.h @@ -8,7 +8,7 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __ffs(unsigned long word) +static __always_inline unsigned int __ffs(unsigned long word) { return __builtin_ctzl(word); } diff --git a/include/asm-generic/bitops/builtin-__fls.h b/include/asm-generic/bitops/builtin-__fls.h index 43a5aa9afbdb..6d72fc8a5259 100644 --- a/include/asm-generic/bitops/builtin-__fls.h +++ b/include/asm-generic/bitops/builtin-__fls.h @@ -8,7 +8,7 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __fls(unsigned long word) +static __always_inline unsigned int __fls(unsigned long word) { return (sizeof(word) * 8) - 1 - __builtin_clzl(word); } diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 2ba557e067fe..f60220f119e2 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -200,7 +200,7 @@ static __always_inline __s64 sign_extend64(__u64 value, int index) return (__s64)(value << shift) >> shift; } -static inline unsigned fls_long(unsigned long l) +static inline unsigned int fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); @@ -236,7 +236,7 @@ static inline int get_count_order_long(unsigned long l) * The result is not defined if no bits are set, so check that @word * is non-zero before calling this. */ -static inline unsigned long __ffs64(u64 word) +static inline unsigned int __ffs64(u64 word) { #if BITS_PER_LONG == 32 if (((u32)word) == 0UL) @@ -252,7 +252,7 @@ static inline unsigned long __ffs64(u64 word) * @word: The word to search * @n: Bit to find */ -static inline unsigned long fns(unsigned long word, unsigned int n) +static inline unsigned int fns(unsigned long word, unsigned int n) { unsigned int bit; diff --git a/tools/include/asm-generic/bitops/__ffs.h b/tools/include/asm-generic/bitops/__ffs.h index 9d1310519497..2d94c1e9b2f3 100644 --- a/tools/include/asm-generic/bitops/__ffs.h +++ b/tools/include/asm-generic/bitops/__ffs.h @@ -11,9 +11,9 @@ * * Undefined if no bit exists, so code should check against 0 first. */ -static __always_inline unsigned long __ffs(unsigned long word) +static __always_inline unsigned int __ffs(unsigned long word) { - int num = 0; + unsigned int num = 0; #if __BITS_PER_LONG == 64 if ((word & 0xffffffff) == 0) { diff --git a/tools/include/asm-generic/bitops/__fls.h b/tools/include/asm-generic/bitops/__fls.h index 54ccccf96e21..e974ec932ec1 100644 --- a/tools/include/asm-generic/bitops/__fls.h +++ b/tools/include/asm-generic/bitops/__fls.h @@ -10,9 +10,9 @@ * * Undefined if no set bit exists, so code should check against 0 first. */ -static __always_inline unsigned long generic___fls(unsigned long word) +static __always_inline unsigned int generic___fls(unsigned long word) { - int num = BITS_PER_LONG - 1; + unsigned int num = BITS_PER_LONG - 1; #if BITS_PER_LONG == 64 if (!(word & (~0ul << 32))) { diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index 7319f6ced108..8e073a111d2a 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -70,7 +70,7 @@ static inline unsigned long hweight_long(unsigned long w) return sizeof(w) == 4 ? hweight32(w) : hweight64(w); } -static inline unsigned fls_long(unsigned long l) +static inline unsigned int fls_long(unsigned long l) { if (sizeof(l) == 4) return fls(l); -- cgit From 2fd001cd36005846caa6456fff1008c6f5bae9d4 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 29 Mar 2024 21:32:12 +0100 Subject: arch: Rename fbdev header and source files The per-architecture fbdev code has no dependencies on fbdev and can be used for any video-related subsystem. Rename the files to 'video'. Use video-sti.c on parisc as the source file depends on CONFIG_STI_CORE. On arc, arm, arm64, sh, and um the asm header file is an empty wrapper around the file in asm-generic. Let Kbuild generate the file. The build system does this automatically. Only um needs to generate video.h explicitly, so that it overrides the host architecture's header. The latter would otherwise interfere with the build. Further update all includes statements, include guards, and Makefiles. Also update a few strings and comments to refer to video instead of fbdev. v3: - arc, arm, arm64, sh: generate asm header via build system (Sam, Helge, Arnd) - um: rename fb.h to video.h - fix typo in commit message (Sam) Signed-off-by: Thomas Zimmermann Reviewed-by: Sam Ravnborg Cc: Vineet Gupta Cc: Catalin Marinas Cc: Will Deacon Cc: Huacai Chen Cc: WANG Xuerui Cc: Geert Uytterhoeven Cc: Thomas Bogendoerfer Cc: "James E.J. Bottomley" Cc: Helge Deller Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Yoshinori Sato Cc: Rich Felker Cc: John Paul Adrian Glaubitz Cc: "David S. Miller" Cc: Andreas Larsson Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: x86@kernel.org Cc: "H. Peter Anvin" Signed-off-by: Arnd Bergmann --- arch/arc/include/asm/fb.h | 8 --- arch/arm/include/asm/fb.h | 6 -- arch/arm64/include/asm/fb.h | 10 --- arch/loongarch/include/asm/fb.h | 31 --------- arch/loongarch/include/asm/video.h | 31 +++++++++ arch/m68k/include/asm/fb.h | 32 --------- arch/m68k/include/asm/video.h | 32 +++++++++ arch/mips/include/asm/fb.h | 38 ----------- arch/mips/include/asm/video.h | 38 +++++++++++ arch/parisc/include/asm/fb.h | 16 ----- arch/parisc/include/asm/video.h | 16 +++++ arch/parisc/video/Makefile | 2 +- arch/parisc/video/fbdev.c | 27 -------- arch/parisc/video/video-sti.c | 27 ++++++++ arch/powerpc/include/asm/fb.h | 17 ----- arch/powerpc/include/asm/video.h | 17 +++++ arch/powerpc/kernel/pci-common.c | 2 +- arch/sh/include/asm/fb.h | 7 -- arch/sparc/include/asm/fb.h | 45 ------------ arch/sparc/include/asm/video.h | 45 ++++++++++++ arch/sparc/video/Makefile | 2 +- arch/sparc/video/fbdev.c | 25 ------- arch/sparc/video/video.c | 25 +++++++ arch/um/include/asm/Kbuild | 2 +- arch/x86/include/asm/fb.h | 21 ------ arch/x86/include/asm/video.h | 21 ++++++ arch/x86/video/Makefile | 2 +- arch/x86/video/fbdev.c | 40 ----------- arch/x86/video/video.c | 41 +++++++++++ include/asm-generic/Kbuild | 2 +- include/asm-generic/fb.h | 136 ------------------------------------- include/asm-generic/video.h | 136 +++++++++++++++++++++++++++++++++++++ include/linux/fb.h | 2 +- 33 files changed, 436 insertions(+), 466 deletions(-) delete mode 100644 arch/arc/include/asm/fb.h delete mode 100644 arch/arm/include/asm/fb.h delete mode 100644 arch/arm64/include/asm/fb.h delete mode 100644 arch/loongarch/include/asm/fb.h create mode 100644 arch/loongarch/include/asm/video.h delete mode 100644 arch/m68k/include/asm/fb.h create mode 100644 arch/m68k/include/asm/video.h delete mode 100644 arch/mips/include/asm/fb.h create mode 100644 arch/mips/include/asm/video.h delete mode 100644 arch/parisc/include/asm/fb.h create mode 100644 arch/parisc/include/asm/video.h delete mode 100644 arch/parisc/video/fbdev.c create mode 100644 arch/parisc/video/video-sti.c delete mode 100644 arch/powerpc/include/asm/fb.h create mode 100644 arch/powerpc/include/asm/video.h delete mode 100644 arch/sh/include/asm/fb.h delete mode 100644 arch/sparc/include/asm/fb.h create mode 100644 arch/sparc/include/asm/video.h delete mode 100644 arch/sparc/video/fbdev.c create mode 100644 arch/sparc/video/video.c delete mode 100644 arch/x86/include/asm/fb.h create mode 100644 arch/x86/include/asm/video.h delete mode 100644 arch/x86/video/fbdev.c create mode 100644 arch/x86/video/video.c delete mode 100644 include/asm-generic/fb.h create mode 100644 include/asm-generic/video.h (limited to 'include/linux') diff --git a/arch/arc/include/asm/fb.h b/arch/arc/include/asm/fb.h deleted file mode 100644 index 9c2383d29cbb..000000000000 --- a/arch/arc/include/asm/fb.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include - -#endif /* _ASM_FB_H_ */ diff --git a/arch/arm/include/asm/fb.h b/arch/arm/include/asm/fb.h deleted file mode 100644 index ce20a43c3033..000000000000 --- a/arch/arm/include/asm/fb.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include - -#endif /* _ASM_FB_H_ */ diff --git a/arch/arm64/include/asm/fb.h b/arch/arm64/include/asm/fb.h deleted file mode 100644 index 1a495d8fb2ce..000000000000 --- a/arch/arm64/include/asm/fb.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 ARM Ltd. - */ -#ifndef __ASM_FB_H_ -#define __ASM_FB_H_ - -#include - -#endif /* __ASM_FB_H_ */ diff --git a/arch/loongarch/include/asm/fb.h b/arch/loongarch/include/asm/fb.h deleted file mode 100644 index 0b218b10a9ec..000000000000 --- a/arch/loongarch/include/asm/fb.h +++ /dev/null @@ -1,31 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (C) 2020-2022 Loongson Technology Corporation Limited - */ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include -#include - -static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) -{ - memcpy(to, (void __force *)from, n); -} -#define fb_memcpy_fromio fb_memcpy_fromio - -static inline void fb_memcpy_toio(volatile void __iomem *to, const void *from, size_t n) -{ - memcpy((void __force *)to, from, n); -} -#define fb_memcpy_toio fb_memcpy_toio - -static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n) -{ - memset((void __force *)addr, c, n); -} -#define fb_memset fb_memset_io - -#include - -#endif /* _ASM_FB_H_ */ diff --git a/arch/loongarch/include/asm/video.h b/arch/loongarch/include/asm/video.h new file mode 100644 index 000000000000..9f76845f2d4f --- /dev/null +++ b/arch/loongarch/include/asm/video.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2022 Loongson Technology Corporation Limited + */ +#ifndef _ASM_VIDEO_H_ +#define _ASM_VIDEO_H_ + +#include +#include + +static inline void fb_memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) +{ + memcpy(to, (void __force *)from, n); +} +#define fb_memcpy_fromio fb_memcpy_fromio + +static inline void fb_memcpy_toio(volatile void __iomem *to, const void *from, size_t n) +{ + memcpy((void __force *)to, from, n); +} +#define fb_memcpy_toio fb_memcpy_toio + +static inline void fb_memset_io(volatile void __iomem *addr, int c, size_t n) +{ + memset((void __force *)addr, c, n); +} +#define fb_memset fb_memset_io + +#include + +#endif /* _ASM_VIDEO_H_ */ diff --git a/arch/m68k/include/asm/fb.h b/arch/m68k/include/asm/fb.h deleted file mode 100644 index 9941b7434b69..000000000000 --- a/arch/m68k/include/asm/fb.h +++ /dev/null @@ -1,32 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include -#include - -static inline pgprot_t pgprot_framebuffer(pgprot_t prot, - unsigned long vm_start, unsigned long vm_end, - unsigned long offset) -{ -#ifdef CONFIG_MMU -#ifdef CONFIG_SUN3 - pgprot_val(prot) |= SUN3_PAGE_NOCACHE; -#else - if (CPU_IS_020_OR_030) - pgprot_val(prot) |= _PAGE_NOCACHE030; - if (CPU_IS_040_OR_060) { - pgprot_val(prot) &= _CACHEMASK040; - /* Use no-cache mode, serialized */ - pgprot_val(prot) |= _PAGE_NOCACHE_S; - } -#endif /* CONFIG_SUN3 */ -#endif /* CONFIG_MMU */ - - return prot; -} -#define pgprot_framebuffer pgprot_framebuffer - -#include - -#endif /* _ASM_FB_H_ */ diff --git a/arch/m68k/include/asm/video.h b/arch/m68k/include/asm/video.h new file mode 100644 index 000000000000..6cf2194c413d --- /dev/null +++ b/arch/m68k/include/asm/video.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_VIDEO_H_ +#define _ASM_VIDEO_H_ + +#include +#include + +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) +{ +#ifdef CONFIG_MMU +#ifdef CONFIG_SUN3 + pgprot_val(prot) |= SUN3_PAGE_NOCACHE; +#else + if (CPU_IS_020_OR_030) + pgprot_val(prot) |= _PAGE_NOCACHE030; + if (CPU_IS_040_OR_060) { + pgprot_val(prot) &= _CACHEMASK040; + /* Use no-cache mode, serialized */ + pgprot_val(prot) |= _PAGE_NOCACHE_S; + } +#endif /* CONFIG_SUN3 */ +#endif /* CONFIG_MMU */ + + return prot; +} +#define pgprot_framebuffer pgprot_framebuffer + +#include + +#endif /* _ASM_VIDEO_H_ */ diff --git a/arch/mips/include/asm/fb.h b/arch/mips/include/asm/fb.h deleted file mode 100644 index d98d6681d64e..000000000000 --- a/arch/mips/include/asm/fb.h +++ /dev/null @@ -1,38 +0,0 @@ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include - -static inline pgprot_t pgprot_framebuffer(pgprot_t prot, - unsigned long vm_start, unsigned long vm_end, - unsigned long offset) -{ - return pgprot_noncached(prot); -} -#define pgprot_framebuffer pgprot_framebuffer - -/* - * MIPS doesn't define __raw_ I/O macros, so the helpers - * in don't generate fb_readq() and - * fb_write(). We have to provide them here. - * - * TODO: Convert MIPS to generic I/O. The helpers below can - * then be removed. - */ -#ifdef CONFIG_64BIT -static inline u64 fb_readq(const volatile void __iomem *addr) -{ - return __raw_readq(addr); -} -#define fb_readq fb_readq - -static inline void fb_writeq(u64 b, volatile void __iomem *addr) -{ - __raw_writeq(b, addr); -} -#define fb_writeq fb_writeq -#endif - -#include - -#endif /* _ASM_FB_H_ */ diff --git a/arch/mips/include/asm/video.h b/arch/mips/include/asm/video.h new file mode 100644 index 000000000000..007c106d980f --- /dev/null +++ b/arch/mips/include/asm/video.h @@ -0,0 +1,38 @@ +#ifndef _ASM_VIDEO_H_ +#define _ASM_VIDEO_H_ + +#include + +static inline pgprot_t pgprot_framebuffer(pgprot_t prot, + unsigned long vm_start, unsigned long vm_end, + unsigned long offset) +{ + return pgprot_noncached(prot); +} +#define pgprot_framebuffer pgprot_framebuffer + +/* + * MIPS doesn't define __raw_ I/O macros, so the helpers + * in don't generate fb_readq() and + * fb_writeq(). We have to provide them here. + * + * TODO: Convert MIPS to generic I/O. The helpers below can + * then be removed. + */ +#ifdef CONFIG_64BIT +static inline u64 fb_readq(const volatile void __iomem *addr) +{ + return __raw_readq(addr); +} +#define fb_readq fb_readq + +static inline void fb_writeq(u64 b, volatile void __iomem *addr) +{ + __raw_writeq(b, addr); +} +#define fb_writeq fb_writeq +#endif + +#include + +#endif /* _ASM_VIDEO_H_ */ diff --git a/arch/parisc/include/asm/fb.h b/arch/parisc/include/asm/fb.h deleted file mode 100644 index ed2a195a3e76..000000000000 --- a/arch/parisc/include/asm/fb.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_FB_H_ -#define _ASM_FB_H_ - -#include - -struct device; - -#if defined(CONFIG_STI_CORE) -bool video_is_primary_device(struct device *dev); -#define video_is_primary_device video_is_primary_device -#endif - -#include - -#endif /* _ASM_FB_H_ */ diff --git a/arch/parisc/include/asm/video.h b/arch/parisc/include/asm/video.h new file mode 100644 index 000000000000..c5dff3223194 --- /dev/null +++ b/arch/parisc/include/asm/video.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_VIDEO_H_ +#define _ASM_VIDEO_H_ + +#include + +struct device; + +#if defined(CONFIG_STI_CORE) +bool video_is_primary_device(struct device *dev); +#define video_is_primary_device video_is_primary_device +#endif + +#include + +#endif /* _ASM_VIDEO_H_ */ diff --git a/arch/parisc/video/Makefile b/arch/parisc/video/Makefile index 16a73cce4661..b5db5b42880f 100644 --- a/arch/parisc/video/Makefile +++ b/arch/parisc/video/Makefile @@ -1,3 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_STI_CORE) += fbdev.o +obj-$(CONFIG_STI_CORE) += video-sti.o diff --git a/arch/parisc/video/fbdev.c b/arch/parisc/video/fbdev.c deleted file mode 100644 index 540fa0c919d5..000000000000 --- a/arch/parisc/video/fbdev.c +++ /dev/null @@ -1,27 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2000 Philipp Rumpf - * Copyright (C) 2001-2020 Helge Deller - * Copyright (C) 2001-2002 Thomas Bogendoerfer - */ - -#include - -#include