aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/kasan/hw_tags.c8
-rw-r--r--mm/memcontrol.c5
-rw-r--r--mm/mremap.c5
-rw-r--r--mm/slub.c18
4 files changed, 23 insertions, 13 deletions
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index e529428e7a11..d558799b25b3 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -134,12 +134,8 @@ void __init kasan_init_hw_tags(void)
switch (kasan_arg_stacktrace) {
case KASAN_ARG_STACKTRACE_DEFAULT:
- /*
- * Default to enabling stack trace collection for
- * debug kernels.
- */
- if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
- static_branch_enable(&kasan_flag_stacktrace);
+ /* Default to enabling stack trace collection. */
+ static_branch_enable(&kasan_flag_stacktrace);
break;
case KASAN_ARG_STACKTRACE_OFF:
/* Do nothing, kasan_flag_stacktrace keeps its default value. */
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e2de77b5bcc2..913c2b9e5c72 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -6271,6 +6271,8 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
if (err)
return err;
+ page_counter_set_high(&memcg->memory, high);
+
for (;;) {
unsigned long nr_pages = page_counter_read(&memcg->memory);
unsigned long reclaimed;
@@ -6294,10 +6296,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of,
break;
}
- page_counter_set_high(&memcg->memory, high);
-
memcg_wb_domain_size_changed(memcg);
-
return nbytes;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index f554320281cc..aa63bfd3cad2 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -336,8 +336,9 @@ enum pgt_entry {
* valid. Else returns a smaller extent bounded by the end of the source and
* destination pgt_entry.
*/
-static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
- unsigned long old_end, unsigned long new_addr)
+static __always_inline unsigned long get_extent(enum pgt_entry entry,
+ unsigned long old_addr, unsigned long old_end,
+ unsigned long new_addr)
{
unsigned long next, extent, mask, size;
diff --git a/mm/slub.c b/mm/slub.c
index 7ecbbbe5bc0c..b22a4b101c84 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3423,6 +3423,7 @@ static inline int calculate_order(unsigned int size)
unsigned int order;
unsigned int min_objects;
unsigned int max_objects;
+ unsigned int nr_cpus;
/*
* Attempt to find best configuration for a slab. This
@@ -3433,8 +3434,21 @@ static inline int calculate_order(unsigned int size)
* we reduce the minimum objects required in a slab.
*/
min_objects = slub_min_objects;
- if (!min_objects)
- min_objects = 4 * (fls(num_online_cpus()) + 1);
+ if (!min_objects) {
+ /*
+ * Some architectures will only update present cpus when
+ * onlining them, so don't trust the number if it's just 1. But
+ * we also don't want to use nr_cpu_ids always, as on some other
+ * architectures, there can be many possible cpus, but never
+ * onlined. Here we compromise between trying to avoid too high
+ * order on systems that appear larger than they are, and too
+ * low order on systems that appear smaller than they are.
+ */
+ nr_cpus = num_present_cpus();
+ if (nr_cpus <= 1)
+ nr_cpus = nr_cpu_ids;
+ min_objects = 4 * (fls(nr_cpus) + 1);
+ }
max_objects = order_objects(slub_max_order, size);
min_objects = min(min_objects, max_objects);