diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/debug/debug_core.c | 3 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_main.c | 7 | ||||
-rw-r--r-- | kernel/printk/printk.c | 12 | ||||
-rw-r--r-- | kernel/rcu/rcu.h | 2 | ||||
-rw-r--r-- | kernel/rcu/tasks.h | 32 | ||||
-rw-r--r-- | kernel/rcu/tree.c | 43 | ||||
-rw-r--r-- | kernel/resource.c | 51 |
7 files changed, 103 insertions, 47 deletions
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 621037a0aa87..ce1bb2301c06 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -1006,6 +1006,9 @@ void kgdb_panic(const char *msg) if (panic_timeout) return; + debug_locks_off(); + console_flush_on_panic(CONSOLE_FLUSH_PENDING); + if (dbg_kdb_mode) kdb_printf("PANIC: %s\n", msg); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 438b868cbfa9..6b213c8252d6 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -272,11 +272,10 @@ char *kdbgetenv(const char *match) * kdballocenv - This function is used to allocate bytes for * environment entries. * Parameters: - * match A character string representing a numeric value - * Outputs: - * *value the unsigned long representation of the env variable 'match' + * bytes The number of bytes to allocate in the static buffer. * Returns: - * Zero on success, a kdb diagnostic on failure. + * A pointer to the allocated space in the buffer on success. + * NULL if bytes > size available in the envbuffer. * Remarks: * We use a static environment buffer (envbuffer) to hold the values * of dynamically generated environment variables (see kdb_set). Buffer diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index be95a6851164..f2444b581e16 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2384,13 +2384,21 @@ static void set_user_specified(struct console_cmdline *c, bool user_specified) console_set_on_cmdline = 1; } -static int __add_preferred_console(char *name, int idx, char *options, +static int __add_preferred_console(const char *name, const short idx, char *options, char *brl_options, bool user_specified) { struct console_cmdline *c; int i; /* + * We use a signed short index for struct console for device drivers to + * indicate a not yet assigned index or port. However, a negative index + * value is not valid for preferred console. + */ + if (idx < 0) + return -EINVAL; + + /* * See if this tty is not yet registered, and * if we have a slot free. */ @@ -2493,7 +2501,7 @@ __setup("console=", console_setup); * commonly to provide a default console (ie from PROM variables) when * the user has not supplied one. */ -int add_preferred_console(char *name, int idx, char *options) +int add_preferred_console(const char *name, const short idx, char *options) { return __add_preferred_console(name, idx, options, NULL, false); } diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h index 0d866eaa4cc8..b531c33e9545 100644 --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h @@ -500,6 +500,7 @@ static inline void rcu_expedite_gp(void) { } static inline void rcu_unexpedite_gp(void) { } static inline void rcu_async_hurry(void) { } static inline void rcu_async_relax(void) { } +static inline bool rcu_cpu_online(int cpu) { return true; } #else /* #ifdef CONFIG_TINY_RCU */ bool rcu_gp_is_normal(void); /* Internal RCU use. */ bool rcu_gp_is_expedited(void); /* Internal RCU use. */ @@ -509,6 +510,7 @@ void rcu_unexpedite_gp(void); void rcu_async_hurry(void); void rcu_async_relax(void); void rcupdate_announce_bootup_oddness(void); +bool rcu_cpu_online(int cpu); #ifdef CONFIG_TASKS_RCU_GENERIC void show_rcu_tasks_gp_kthreads(void); #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */ diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 1fa631168594..f54d5782eca0 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -895,10 +895,36 @@ static void rcu_tasks_pregp_step(struct list_head *hop) synchronize_rcu(); } +/* Check for quiescent states since the pregp's synchronize_rcu() */ +static bool rcu_tasks_is_holdout(struct task_struct *t) +{ + int cpu; + + /* Has the task been seen voluntarily sleeping? */ + if (!READ_ONCE(t->on_rq)) + return false; + + /* + * Idle tasks (or idle injection) within the idle loop are RCU-tasks + * quiescent states. But CPU boot code performed by the idle task + * isn't a quiescent state. + */ + if (is_idle_task(t)) + return false; + + cpu = task_cpu(t); + + /* Idle tasks on offline CPUs are RCU-tasks quiescent states. */ + if (t == idle_task(cpu) && !rcu_cpu_online(cpu)) + return false; + + return true; +} + /* Per-task initial processing. */ static void rcu_tasks_pertask(struct task_struct *t, struct list_head *hop) { - if (t != current && READ_ONCE(t->on_rq) && !is_idle_task(t)) { + if (t != current && rcu_tasks_is_holdout(t)) { get_task_struct(t); t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); WRITE_ONCE(t->rcu_tasks_holdout, true); @@ -947,7 +973,7 @@ static void check_holdout_task(struct task_struct *t, if (!READ_ONCE(t->rcu_tasks_holdout) || t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || - !READ_ONCE(t->on_rq) || + !rcu_tasks_is_holdout(t) || (IS_ENABLED(CONFIG_NO_HZ_FULL) && !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) { WRITE_ONCE(t->rcu_tasks_holdout, false); @@ -1525,7 +1551,7 @@ static int trc_inspect_reader(struct task_struct *t, void *bhp_in) } else { // The task is not running, so C-language access is safe. nesting = t->trc_reader_nesting; - WARN_ON_ONCE(ofl && task_curr(t) && !is_idle_task(t)); + WARN_ON_ONCE(ofl && task_curr(t) && (t != idle_task(task_cpu(t)))); if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) && ofl) n_heavy_reader_ofl_updates++; } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index d3a97e129020..3ac3c846105f 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -755,14 +755,19 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) } /* - * Return true if the specified CPU has passed through a quiescent - * state by virtue of being in or having passed through an dynticks - * idle state since the last call to dyntick_save_progress_counter() - * for this same CPU, or by virtue of having been offline. + * Returns positive if the specified CPU has passed through a quiescent state + * by virtue of being in or having passed through an dynticks idle state since + * the last call to dyntick_save_progress_counter() for this same CPU, or by + * virtue of having been offline. + * + * Returns negative if the specified CPU needs a force resched. + * + * Returns zero otherwise. */ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { unsigned long jtsq; + int ret = 0; struct rcu_node *rnp = rdp->mynode; /* @@ -848,8 +853,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq * 3) || rcu_state.cbovld)) { WRITE_ONCE(rdp->rcu_urgent_qs, true); - resched_cpu(rdp->cpu); WRITE_ONCE(rdp->last_fqs_resched, jiffies); + ret = -1; } /* @@ -862,8 +867,8 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) if (time_after(jiffies, rcu_state.jiffies_resched)) { if (time_after(jiffies, READ_ONCE(rdp->last_fqs_resched) + jtsq)) { - resched_cpu(rdp->cpu); WRITE_ONCE(rdp->last_fqs_resched, jiffies); + ret = -1; } if (IS_ENABLED(CONFIG_IRQ_WORK) && !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && @@ -892,7 +897,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) } } - return 0; + return ret; } /* Trace-event wrapper function for trace_rcu_future_grace_period. */ @@ -2271,15 +2276,15 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) { int cpu; unsigned long flags; - unsigned long mask; - struct rcu_data *rdp; struct rcu_node *rnp; rcu_state.cbovld = rcu_state.cbovldnext; rcu_state.cbovldnext = false; rcu_for_each_leaf_node(rnp) { + unsigned long mask = 0; + unsigned long rsmask = 0; + cond_resched_tasks_rcu_qs(); - mask = 0; raw_spin_lock_irqsave_rcu_node(rnp, flags); rcu_state.cbovldnext |= !!rnp->cbovldmask; if (rnp->qsmask == 0) { @@ -2297,11 +2302,17 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) continue; } for_each_leaf_node_cpu_mask(rnp, cpu, rnp->qsmask) { + struct rcu_data *rdp; + int ret; + rdp = per_cpu_ptr(&rcu_data, cpu); - if (f(rdp)) { + ret = f(rdp); + if (ret > 0) { mask |= rdp->grpmask; rcu_disable_urgency_upon_qs(rdp); } + if (ret < 0) + rsmask |= rdp->grpmask; } if (mask != 0) { /* Idle/offline CPUs, report (releases rnp->lock). */ @@ -2310,6 +2321,9 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp)) /* Nothing to do here, so just drop the lock. */ raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } + + for_each_leaf_node_cpu_mask(rnp, cpu, rsmask) + resched_cpu(cpu); } } @@ -4195,6 +4209,13 @@ static bool rcu_rdp_cpu_online(struct rcu_data *rdp) return !!(rdp->grpmask & rcu_rnp_online_cpus(rdp->mynode)); } +bool rcu_cpu_online(int cpu) +{ + struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); + + return rcu_rdp_cpu_online(rdp); +} + #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) /* diff --git a/kernel/resource.c b/kernel/resource.c index b1763b2fd7ef..866ef3663a0b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -56,33 +56,17 @@ struct resource_constraint { static DEFINE_RWLOCK(resource_lock); -static struct resource *next_resource(struct resource *p) +static struct resource *next_resource(struct resource *p, bool skip_children) { - if (p->child) + if (!skip_children && p->child) return p->child; while (!p->sibling && p->parent) p = p->parent; return p->sibling; } -static struct resource *next_resource_skip_children(struct resource *p) -{ - while (!p->sibling && p->parent) - p = p->parent; - return p->sibling; -} - #define for_each_resource(_root, _p, _skip_children) \ - for ((_p) = (_root)->child; (_p); \ - (_p) = (_skip_children) ? next_resource_skip_children(_p) : \ - next_resource(_p)) - -static void *r_next(struct seq_file *m, void *v, loff_t *pos) -{ - struct resource *p = v; - (*pos)++; - return (void *)next_resource(p); -} + for ((_p) = (_root)->child; (_p); (_p) = next_resource(_p, _skip_children)) #ifdef CONFIG_PROC_FS @@ -91,14 +75,28 @@ enum { MAX_IORES_LEVEL = 5 }; static void *r_start(struct seq_file *m, loff_t *pos) __acquires(resource_lock) { - struct resource *p = pde_data(file_inode(m->file)); - loff_t l = 0; + struct resource *root = pde_data(file_inode(m->file)); + struct resource *p; + loff_t l = *pos; + read_lock(&resource_lock); - for (p = p->child; p && l < *pos; p = r_next(m, p, &l)) - ; + for_each_resource(root, p, false) { + if (l-- == 0) + break; + } + return p; } +static void *r_next(struct seq_file *m, void *v, loff_t *pos) +{ + struct resource *p = v; + + (*pos)++; + + return (void *)next_resource(p, false); +} + static void r_stop(struct seq_file *m, void *v) __releases(resource_lock) { @@ -336,7 +334,7 @@ static int find_next_iomem_res(resource_size_t start, resource_size_t end, read_lock(&resource_lock); - for (p = iomem_resource.child; p; p = next_resource(p)) { + for_each_resource(&iomem_resource, p, false) { /* If we passed the resource we are looking for, stop */ if (p->start > end) { p = NULL; @@ -1641,13 +1639,12 @@ __setup("reserve=", reserve_setup); */ int iomem_map_sanity_check(resource_size_t addr, unsigned long size) { - struct resource *p = &iomem_resource; resource_size_t end = addr + size - 1; + struct resource *p; int err = 0; - loff_t l; read_lock(&resource_lock); - for (p = p->child; p ; p = r_next(NULL, p, &l)) { + for_each_resource(&iomem_resource, p, false) { /* * We can probably skip the resources without * IORESOURCE_IO attribute? |