diff options
-rw-r--r-- | arch/x86/events/amd/uncore.c | 44 | ||||
-rw-r--r-- | arch/x86/include/asm/perf_event.h | 15 | ||||
-rw-r--r-- | include/linux/min_heap.h | 134 | ||||
-rw-r--r-- | include/linux/perf_event.h | 7 | ||||
-rw-r--r-- | kernel/events/core.c | 327 | ||||
-rw-r--r-- | lib/Kconfig.debug | 10 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/test_min_heap.c | 194 |
8 files changed, 627 insertions, 105 deletions
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index a6ea07f2aa84..46018e515fe2 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -180,6 +180,31 @@ static void amd_uncore_del(struct perf_event *event, int flags) hwc->idx = -1; } +/* + * Convert logical CPU number to L3 PMC Config ThreadMask format + */ +static u64 l3_thread_slice_mask(int cpu) +{ + u64 thread_mask, core = topology_core_id(cpu); + unsigned int shift, thread = 0; + + if (topology_smt_supported() && !topology_is_primary_thread(cpu)) + thread = 1; + + if (boot_cpu_data.x86 <= 0x18) { + shift = AMD64_L3_THREAD_SHIFT + 2 * (core % 4) + thread; + thread_mask = BIT_ULL(shift); + + return AMD64_L3_SLICE_MASK | thread_mask; + } + + core = (core << AMD64_L3_COREID_SHIFT) & AMD64_L3_COREID_MASK; + shift = AMD64_L3_THREAD_SHIFT + thread; + thread_mask = BIT_ULL(shift); + + return AMD64_L3_EN_ALL_SLICES | core | thread_mask; +} + static int amd_uncore_event_init(struct perf_event *event) { struct amd_uncore *uncore; @@ -206,18 +231,11 @@ static int amd_uncore_event_init(struct perf_event *event) return -EINVAL; /* - * SliceMask and ThreadMask need to be set for certain L3 events in - * Family 17h. For other events, the two fields do not affect the count. + * SliceMask and ThreadMask need to be set for certain L3 events. + * For other events, the two fields do not affect the count. */ - if (l3_mask && is_llc_event(event)) { - int thread = 2 * (cpu_data(event->cpu).cpu_core_id % 4); - - if (smp_num_siblings > 1) - thread += cpu_data(event->cpu).apicid & 1; - - hwc->config |= (1ULL << (AMD64_L3_THREAD_SHIFT + thread) & - AMD64_L3_THREAD_MASK) | AMD64_L3_SLICE_MASK; - } + if (l3_mask && is_llc_event(event)) + hwc->config |= l3_thread_slice_mask(event->cpu); uncore = event_to_amd_uncore(event); if (!uncore) @@ -523,9 +541,9 @@ static int __init amd_uncore_init(void) if (!boot_cpu_has(X86_FEATURE_TOPOEXT)) return -ENODEV; - if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86 >= 0x17) { /* - * For F17h or F18h, the Northbridge counters are + * For F17h and above, the Northbridge counters are * repurposed as Data Fabric counters. Also, L3 * counters are supported too. The PMUs are exported * based on family as either L2 or L3 and NB or DF. diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 29964b0e1075..e855e9cf2c37 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -50,11 +50,22 @@ #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ - ((0xFULL) << AMD64_L3_SLICE_SHIFT) + (0xFULL << AMD64_L3_SLICE_SHIFT) +#define AMD64_L3_SLICEID_MASK \ + (0x7ULL << AMD64_L3_SLICE_SHIFT) #define AMD64_L3_THREAD_SHIFT 56 #define AMD64_L3_THREAD_MASK \ - ((0xFFULL) << AMD64_L3_THREAD_SHIFT) + (0xFFULL << AMD64_L3_THREAD_SHIFT) +#define AMD64_L3_F19H_THREAD_MASK \ + (0x3ULL << AMD64_L3_THREAD_SHIFT) + +#define AMD64_L3_EN_ALL_CORES BIT_ULL(47) +#define AMD64_L3_EN_ALL_SLICES BIT_ULL(46) + +#define AMD64_L3_COREID_SHIFT 42 +#define AMD64_L3_COREID_MASK \ + (0x7ULL << AMD64_L3_COREID_SHIFT) #define X86_RAW_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_EVENT | \ diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h new file mode 100644 index 000000000000..44077837385f --- /dev/null +++ b/include/linux/min_heap.h @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_MIN_HEAP_H +#define _LINUX_MIN_HEAP_H + +#include <linux/bug.h> +#include <linux/string.h> +#include <linux/types.h> + +/** + * struct min_heap - Data structure to hold a min-heap. + * @data: Start of array holding the heap elements. + * @nr: Number of elements currently in the heap. + * @size: Maximum number of elements that can be held in current storage. + */ +struct min_heap { + void *data; + int nr; + int size; +}; + +/** + * struct min_heap_callbacks - Data/functions to customise the min_heap. + * @elem_size: The nr of each element in bytes. + * @less: Partial order function for this heap. + * @swp: Swap elements function. + */ +struct min_heap_callbacks { + int elem_size; + bool (*less)(const void *lhs, const void *rhs); + void (*swp)(void *lhs, void *rhs); +}; + +/* Sift the element at pos down the heap. */ +static __always_inline +void min_heapify(struct min_heap *heap, int pos, + const struct min_heap_callbacks *func) +{ + void *left, *right, *parent, *smallest; + void *data = heap->data; + + for (;;) { + if (pos * 2 + 1 >= heap->nr) + break; + + left = data + ((pos * 2 + 1) * func->elem_size); + parent = data + (pos * func->elem_size); + smallest = parent; + if (func->less(left, smallest)) + smallest = left; + + if (pos * 2 + 2 < heap->nr) { + right = data + ((pos * 2 + 2) * func->elem_size); + if (func->less(right, smallest)) + smallest = right; + } + if (smallest == parent) + break; + func->swp(smallest, parent); + if (smallest == left) + pos = (pos * 2) + 1; + else + pos = (pos * 2) + 2; + } +} + +/* Floyd's approach to heapification that is O(nr). */ +static __always_inline +void min_heapify_all(struct min_heap *heap, + const struct min_heap_callbacks *func) +{ + int i; + + for (i = heap->nr / 2; i >= 0; i--) + min_heapify(heap, i, func); +} + +/* Remove minimum element from the heap, O(log2(nr)). */ +static __always_inline +void min_heap_pop(struct min_heap *heap, + const struct min_heap_callbacks *func) +{ + void *data = heap->data; + + if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) + return; + + /* Place last element at the root (position 0) and then sift down. */ + heap->nr--; + memcpy(data, data + (heap->nr * func->elem_size), func->elem_size); + min_heapify(heap, 0, func); +} + +/* + * Remove the minimum element and then push the given element. The + * implementation performs 1 sift (O(log2(nr))) and is therefore more + * efficient than a pop followed by a push that does 2. + */ +static __always_inline +void min_heap_pop_push(struct min_heap *heap, + const void *element, + const struct min_heap_callbacks *func) +{ + memcpy(heap->data, element, func->elem_size); + min_heapify(heap, 0, func); +} + +/* Push an element on to the heap, O(log2(nr)). */ +static __always_inline +void min_heap_push(struct min_heap *heap, const void *element, + const struct min_heap_callbacks *func) +{ + void *data = heap->data; + void *child, *parent; + int pos; + + if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap")) + return; + + /* Place at the end of data. */ + pos = heap->nr; + memcpy(data + (pos * func->elem_size), element, func->elem_size); + heap->nr++; + + /* Sift child at pos up. */ + for (; pos > 0; pos = (pos - 1) / 2) { + child = data + (pos * func->elem_size); + parent = data + ((pos - 1) / 2) * func->elem_size; + if (func->less(parent, child)) + break; + func->swp(parent, child); + } +} + +#endif /* _LINUX_MIN_HEAP_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 68e21e828893..8768a39b5258 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -862,6 +862,13 @@ struct perf_cpu_context { int sched_cb_usage; int online; + /* + * Per-CPU storage for iterators used in visit_groups_merge. The default + * storage is of size 2 to hold the CPU and any CPU event iterators. + */ + int heap_size; + struct perf_event **heap; + struct perf_event *heap_default[2]; }; struct perf_output_handle { diff --git a/kernel/events/core.c b/kernel/events/core.c index 3f1f77de7247..ccf8d4fc6374 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -49,6 +49,7 @@ #include <linux/sched/mm.h> #include <linux/proc_ns.h> #include <linux/mount.h> +#include <linux/min_heap.h> #include "internal.h" @@ -891,6 +892,47 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev, rcu_read_unlock(); } +static int perf_cgroup_ensure_storage(struct perf_event *event, + struct cgroup_subsys_state *css) +{ + struct perf_cpu_context *cpuctx; + struct perf_event **storage; + int cpu, heap_size, ret = 0; + + /* + * Allow storage to have sufficent space for an iterator for each + * possibly nested cgroup plus an iterator for events with no cgroup. + */ + for (heap_size = 1; css; css = css->parent) + heap_size++; + + for_each_possible_cpu(cpu) { + cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu); + if (heap_size <= cpuctx->heap_size) + continue; + + storage = kmalloc_node(heap_size * sizeof(struct perf_event *), + GFP_KERNEL, cpu_to_node(cpu)); + if (!storage) { + ret = -ENOMEM; + break; + } + + raw_spin_lock_irq(&cpuctx->ctx.lock); + if (cpuctx->heap_size < heap_size) { + swap(cpuctx->heap, storage); + if (storage == cpuctx->heap_default) + storage = NULL; + cpuctx->heap_size = heap_size; + } + raw_spin_unlock_irq(&cpuctx->ctx.lock); + + kfree(storage); + } + + return ret; +} + static inline int perf_cgroup_connect(int fd, struct perf_event *event, struct perf_event_attr *attr, struct perf_event *group_leader) @@ -910,6 +952,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, goto out; } + ret = perf_cgroup_ensure_storage(event, css); + if (ret) + goto out; + cgrp = container_of(css, struct perf_cgroup, css); event->cgrp = cgrp; @@ -1531,6 +1577,30 @@ perf_event_groups_less(struct perf_event *left, struct perf_event *right) if (left->cpu > right->cpu) return false; +#ifdef CONFIG_CGROUP_PERF + if (left->cgrp != right->cgrp) { + if (!left->cgrp || !left->cgrp->css.cgroup) { + /* + * Left has no cgroup but right does, no cgroups come + * first. + */ + return true; + } + if (!right->cgrp || right->cgrp->css.cgroup) { + /* + * Right has no cgroup but left does, no cgroups come + * first. + */ + return false; + } + /* Two dissimilar cgroups, order by id. */ + if (left->cgrp->css.cgroup->kn->id < right->cgrp->css.cgroup->kn->id) + return true; + + return false; + } +#endif + if (left->group_index < right->group_index) return true; if (left->group_index > right->group_index) @@ -1610,25 +1680,48 @@ del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) } /* - * Get the leftmost event in the @cpu subtree. + * Get the leftmost event in the cpu/cgroup subtree. */ static struct perf_event * -perf_event_groups_first(struct perf_event_groups *groups, int cpu) +perf_event_groups_first(struct perf_event_groups *groups, int cpu, + struct cgroup *cgrp) { struct perf_event *node_event = NULL, *match = NULL; struct rb_node *node = groups->tree.rb_node; +#ifdef CONFIG_CGROUP_PERF + u64 node_cgrp_id, cgrp_id = 0; + + if (cgrp) + cgrp_id = cgrp->kn->id; +#endif while (node) { node_event = container_of(node, struct perf_event, group_node); if (cpu < node_event->cpu) { node = node->rb_left; - } else if (cpu > node_event->cpu) { + continue; + } + if (cpu > node_event->cpu) { node = node->rb_right; - } else { - match = node_event; + continue; + } +#ifdef CONFIG_CGROUP_PERF + node_cgrp_id = 0; + if (node_event->cgrp && node_event->cgrp->css.cgroup) + node_cgrp_id = node_event->cgrp->css.cgroup->kn->id; + + if (cgrp_id < node_cgrp_id) { node = node->rb_left; + continue; + } + if (cgrp_id > node_cgrp_id) { + node = node->rb_right; + continue; } +#endif + match = node_event; + node = node->rb_left; } return match; @@ -1641,12 +1734,26 @@ static struct perf_event * perf_event_groups_next(struct perf_event *event) { struct perf_event *next; +#ifdef CONFIG_CGROUP_PERF + u64 curr_cgrp_id = 0; + u64 next_cgrp_id = 0; +#endif next = rb_entry_safe(rb_next(&event->group_node), typeof(*event), group_node); - if (next && next->cpu == event->cpu) - return next; + if (next == NULL || next->cpu != event->cpu) + return NULL; - return NULL; +#ifdef CONFIG_CGROUP_PERF + if (event->cgrp && event->cgrp->css.cgroup) + curr_cgrp_id = event->cgrp->css.cgroup->kn->id; + + if (next->cgrp && next->cgrp->css.cgroup) + next_cgrp_id = next->cgrp->css.cgroup->kn->id; + + if (curr_cgrp_id != next_cgrp_id) + return NULL; +#endif + return next; } /* @@ -1986,6 +2093,12 @@ static int perf_get_aux_event(struct perf_event *event, return 1; } +static inline struct list_head *get_event_list(struct perf_event *event) +{ + struct perf_event_context *ctx = event->ctx; + return event->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; +} + static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; @@ -2028,12 +2141,8 @@ static void perf_group_detach(struct perf_event *event) if (!RB_EMPTY_NODE(&event->group_node)) { add_event_to_groups(sibling, event->ctx); - if (sibling->state == PERF_EVENT_STATE_ACTIVE) { - struct list_head *list = sibling->attr.pinned ? - &ctx->pinned_active : &ctx->flexible_active; - - list_add_tail(&sibling->active_list, list); - } + if (sibling->state == PERF_EVENT_STATE_ACTIVE) + list_add_tail(&sibling->active_list, get_event_list(sibling)); } WARN_ON_ONCE(sibling->ctx != event->ctx); @@ -2350,6 +2459,8 @@ event_sched_in(struct perf_event *event, { int ret = 0; + WARN_ON_ONCE(event->ctx != ctx); + lockdep_assert_held(&ctx->lock); if (event->state <= PERF_EVENT_STATE_OFF) @@ -3388,71 +3499,103 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, ctx_sched_out(&cpuctx->ctx, cpuctx, event_type); } -static int visit_groups_merge(struct perf_event_groups *groups, int cpu, - int (*func)(struct perf_event *, void *), void *data) +static bool perf_less_group_idx(const void *l, const void *r) { - struct perf_event **evt, *evt1, *evt2; - int ret; - - evt1 = perf_event_groups_first(groups, -1); - evt2 = perf_event_groups_first(groups, cpu); + const struct perf_event *le = l, *re = r; - while (evt1 || evt2) { - if (evt1 && evt2) { - if (evt1->group_index < evt2->group_index) - evt = &evt1; - else - evt = &evt2; - } else if (evt1) { - evt = &evt1; - } else { - evt = &evt2; - } - - ret = func(*evt, data); - if (ret) - return ret; + return le->group_index < re->group_index; +} - *evt = perf_event_groups_next(*evt); - } +static void swap_ptr(void *l, void *r) +{ + void **lp = l, **rp = r; - return 0; + swap(*lp, *rp); } -struct sched_in_data { - struct perf_event_context *ctx; - struct perf_cpu_context *cpuctx; - int can_add_hw; +static const struct min_heap_callbacks perf_min_heap = { + .elem_size = sizeof(struct perf_event *), + .less = perf_less_group_idx, + .swp = swap_ptr, }; -static int pinned_sched_in(struct perf_event *event, void *data) +static void __heap_add(struct min_heap *heap, struct perf_event *event) { - struct sched_in_data *sid = data; + struct perf_event **itrs = heap->data; - if (event->state <= PERF_EVENT_STATE_OFF) - return 0; + if (event) { + itrs[heap->nr] = event; + heap->nr++; + } +} - if (!event_filter_match(event)) - return 0; +static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, + struct perf_event_groups *groups, int cpu, + int (*func)(struct perf_event *, void *), + void *data) +{ +#ifdef CONFIG_CGROUP_PERF + struct cgroup_subsys_state *css = NULL; +#endif + /* Space for per CPU and/or any CPU event iterators. */ + struct perf_event *itrs[2]; + struct min_heap event_heap; + struct perf_event **evt; + int ret; - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { - if (!group_sched_in(event, sid->cpuctx, sid->ctx)) - list_add_tail(&event->active_list, &sid->ctx->pinned_active); + if (cpuctx) { + event_heap = (struct min_heap){ + .data = cpuctx->heap, + .nr = 0, + .size = cpuctx->heap_size, + }; + + lockdep_assert_held(&cpuctx->ctx.lock); + +#ifdef CONFIG_CGROUP_PERF + if (cpuctx->cgrp) + css = &cpuctx->cgrp->css; +#endif + } else { + event_heap = (struct min_heap){ + .data = itrs, + .nr = 0, + .size = ARRAY_SIZE(itrs), + }; + /* Events not within a CPU context may be on any CPU. */ + __heap_add(&event_heap, perf_event_groups_first(groups, -1, NULL)); } + evt = event_heap.data; - /* - * If this pinned group hasn't been scheduled, - * put it in error state. - */ - if (event->state == PERF_EVENT_STATE_INACTIVE) - perf_event_set_state(event, PERF_EVENT_STATE_ERROR); + __heap_add(&event_heap, perf_event_groups_first(groups, cpu, NULL)); + +#ifdef CONFIG_CGROUP_PERF + for (; css; css = css->parent) + __heap_add(&event_heap, perf_event_groups_first(groups, cpu, css->cgroup)); +#endif + + min_heapify_all(&event_heap, &perf_min_heap); + + while (event_heap.nr) { + ret = func(*evt, data); + if (ret) + return ret; + + *evt = perf_event_groups_next(*evt); + if (*evt) + min_heapify(&event_heap, 0, &perf_min_heap); + else + min_heap_pop(&event_heap, &perf_min_heap); + } return 0; } -static int flexible_sched_in(struct perf_event *event, void *data) +static int merge_sched_in(struct perf_event *event, void *data) { - struct sched_in_data *sid = data; + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + int *can_add_hw = data; if (event->state <= PERF_EVENT_STATE_OFF) return 0; @@ -3460,14 +3603,17 @@ static int flexible_sched_in(struct perf_event *event, void *data) if (!event_filter_match(event)) return 0; - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { - int ret = group_sched_in(event, sid->cpuctx, sid->ctx); - if (ret) { - sid->can_add_hw = 0; - sid->ctx->rotate_necessary = 1; - return 0; - } - list_add_tail(&event->active_list, &sid->ctx->flexible_active); + if (group_can_go_on(event, cpuctx, *can_add_hw)) { + if (!group_sched_in(event, cpuctx, ctx)) + list_add_tail(&event->active_list, get_event_list(event)); + } + + if (event->state == PERF_EVENT_STATE_INACTIVE) { + if (event->attr.pinned) + perf_event_set_state(event, PERF_EVENT_STATE_ERROR); + + *can_add_hw = 0; + ctx->rotate_necessary = 1; } return 0; @@ -3477,30 +3623,28 @@ static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { - struct sched_in_data sid = { - .ctx = ctx, - .cpuctx = cpuctx, - .can_add_hw = 1, - }; + int can_add_hw = 1; + + if (ctx != &cpuctx->ctx) + cpuctx = NULL; - visit_groups_merge(&ctx->pinned_groups, + visit_groups_merge(cpuctx, &ctx->pinned_groups, smp_processor_id(), - pinned_sched_in, &sid); + merge_sched_in, &can_add_hw); } static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { - struct sched_in_data sid = { - .ctx = ctx, - .cpuctx = cpuctx, - .can_add_hw = 1, - }; + int can_add_hw = 1; + + if (ctx != &cpuctx->ctx) + cpuctx = NULL; - visit_groups_merge(&ctx->flexible_groups, + visit_groups_merge(cpuctx, &ctx->flexible_groups, smp_processor_id(), - flexible_sched_in, &sid); + merge_sched_in, &can_add_hw); } static void @@ -10359,6 +10503,9 @@ skip_type: cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask); __perf_mux_hrtimer_init(cpuctx, cpu); + + cpuctx->heap_size = ARRAY_SIZE(cpuctx->heap_default); + cpuctx->heap = cpuctx->heap_default; } got_cpu_context: @@ -10804,12 +10951,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!has_branch_stack(event)) event->attr.branch_sample_type = 0; - if (cgroup_fd != -1) { - err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); - if (err) - goto err_ns; - } - pmu = perf_init_event(event); if (IS_ERR(pmu)) { err = PTR_ERR(pmu); @@ -10831,6 +10972,12 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, goto err_pmu; } + if (cgroup_fd != -1) { + err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); + if (err) + goto err_pmu; + } + err = exclusive_event_init(event); if (err) goto err_pmu; @@ -10891,12 +11038,12 @@ err_per_task: exclusive_event_destroy(event); err_pmu: + if (is_cgroup_event(event)) + perf_detach_cgroup(event); if (event->destroy) event->destroy(event); module_put(pmu->module); err_ns: - if (is_cgroup_event(event)) - perf_detach_cgroup(event); if (event->ns) put_pid_ns(event->ns); if (event->hw.target) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 69def4a9df00..f04b61c1a1cc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1769,6 +1769,16 @@ config TEST_LIST_SORT If unsure, say N. +config TEST_MIN_HEAP + tristate "Min heap test" + depends on DEBUG_KERNEL || m + help + Enable this to turn on min heap function tests. This test is + executed only once during system boot (so affects only boot time), + or at module load time. + + If unsure, say N. + config TEST_SORT tristate "Array-based sort test" depends on DEBUG_KERNEL || m diff --git a/lib/Makefile b/lib/Makefile index 611872c06926..09a8acb0cf92 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -67,6 +67,7 @@ CFLAGS_test_ubsan.o += $(call cc-disable-warning, vla) UBSAN_SANITIZE_test_ubsan.o := y obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o obj-$(CONFIG_TEST_LIST_SORT) += test_list_sort.o +obj-$(CONFIG_TEST_MIN_HEAP) += test_min_heap.o obj-$(CONFIG_TEST_LKM) += test_module.o obj-$(CONFIG_TEST_VMALLOC) += test_vmalloc.o obj-$(CONFIG_TEST_OVERFLOW) += test_overflow.o diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c new file mode 100644 index 000000000000..d19c8080fd4d --- /dev/null +++ b/lib/test_min_heap.c @@ -0,0 +1,194 @@ +// SPDX-License-Identifier: GPL-2.0-only +#define pr_fmt(fmt) "min_heap_test: " fmt + +/* + * Test cases for the min max heap. + */ + +#include <linux/log2.h> +#include <linux/min_heap.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/random.h> + +static __init bool less_than(const void *lhs, const void *rhs) +{ + return *(int *)lhs < *(int *)rhs; +} + +static __init bool greater_than(const void *lhs, const void *rhs) +{ + return *(int *)lhs > *(int *)rhs; +} + +static __init void swap_ints(void *lhs, void *rhs) +{ + int temp = *(int *)lhs; + + *(int *)lhs = *(int *)rhs; + *(int *)rhs = temp; +} + +static __init int pop_verify_heap(bool min_heap, + struct min_heap *heap, + const struct min_heap_callbacks *funcs) +{ + int *values = heap->data; + int err = 0; + int last; + + last = values[0]; + min_heap_pop(heap, funcs); + while (heap->nr > 0) { + if (min_heap) { + if (last > values[0]) { + pr_err("error: expected %d <= %d\n", last, + values[0]); + err++; + } + } else { + if (last < values[0]) { + pr_err("error: expected %d >= %d\n", last, + values[0]); + err++; + } + } + last = values[0]; + min_heap_pop(heap, funcs); + } + return err; +} + +static __init int test_heapify_all(bool min_heap) +{ + int values[] = { 3, 1, 2, 4, 0x8000000, 0x7FFFFFF, 0, + -3, -1, -2, -4, 0x8000000, 0x7FFFFFF }; + struct min_heap heap = { + .data = values, + .nr = ARRAY_SIZE(values), + .size = ARRAY_SIZE(values), + }; + struct min_heap_callbacks funcs = { + .elem_size = sizeof(int), + .less = min_heap ? less_than : greater_than, + .swp = swap_ints, + }; + int i, err; + + /* Test with known set of values. */ + min_heapify_all(&heap, &funcs); + err = pop_verify_heap(min_heap, &heap, &funcs); + + + /* Test with randomly generated values. */ + heap.nr = ARRAY_SIZE(values); + for (i = 0; i < heap.nr; i++) + values[i] = get_random_int(); + + min_heapify_all(&heap, &funcs); + err += pop_verify_heap(min_heap, &heap, &funcs); + + return err; +} + +static __init int test_heap_push(bool min_heap) +{ + const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0, + -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF }; + int values[ARRAY_SIZE(data)]; + struct min_heap heap = { + .data = values, + .nr = 0, + .size = ARRAY_SIZE(values), + }; + struct min_heap_callbacks funcs = { + .elem_size = sizeof(int), + .less = min_heap ? less_than : greater_than, + .swp = swap_ints, + }; + int i, temp, err; + + /* Test with known set of values copied from data. */ + for (i = 0; i < ARRAY_SIZE(data); i++) + min_heap_push(&heap, &data[i], &funcs); + + err = pop_verify_heap(min_heap, &heap, &funcs); + + /* Test with randomly generated values. */ + while (heap.nr < heap.size) { + temp = get_random_int(); + min_heap_push(&heap, &temp, &funcs); + } + err += pop_verify_heap(min_heap, &heap, &funcs); + + return err; +} + +static __init int test_heap_pop_push(bool min_heap) +{ + const int data[] = { 3, 1, 2, 4, 0x80000000, 0x7FFFFFFF, 0, + -3, -1, -2, -4, 0x80000000, 0x7FFFFFFF }; + int values[ARRAY_SIZE(data)]; + struct min_heap heap = { + .data = values, + .nr = 0, + .size = ARRAY_SIZE(values), + }; + struct min_heap_callbacks funcs = { + .elem_size = sizeof(int), + .less = min_heap ? less_than : greater_than, + .swp = swap_ints, + }; + int i, temp, err; + + /* Fill values with data to pop and replace. */ + temp = min_heap ? 0x80000000 : 0x7FFFFFFF; + for (i = 0; i < ARRAY_SIZE(data); i++) + min_heap_push(&heap, &temp, &funcs); + + /* Test with known set of values copied from data. */ + for (i = 0; i < ARRAY_SIZE(data); i++) + min_heap_pop_push(&heap, &data[i], &funcs); + + err = pop_verify_heap(min_heap, &heap, &funcs); + + heap.nr = 0; + for (i = 0; i < ARRAY_SIZE(data); i++) + min_heap_push(&heap, &temp, &funcs); + + /* Test with randomly generated values. */ + for (i = 0; i < ARRAY_SIZE(data); i++) { + temp = get_random_int(); + min_heap_pop_push(&heap, &temp, &funcs); + } + err += pop_verify_heap(min_heap, &heap, &funcs); + + return err; +} + +static int __init test_min_heap_init(void) +{ + int err = 0; + + err += test_heapify_all(true); + err += test_heapify_all(false); + err += test_heap_push(true); + err += test_heap_push(false); + err += test_heap_pop_push(true); + err += test_heap_pop_push(false); + if (err) { + pr_err("test failed with %d errors\n", err); + return -EINVAL; + } + pr_info("test passed\n"); + return 0; +} +module_init(test_min_heap_init); + +static void __exit test_min_heap_exit(void) +{ + /* do nothing */ +} +module_exit(test_min_heap_exit); + +MODULE_LICENSE("GPL"); |