diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/crash_core.c | 1 | ||||
-rw-r--r-- | kernel/events/core.c | 14 | ||||
-rw-r--r-- | kernel/irq/affinity.c | 162 | ||||
-rw-r--r-- | kernel/kexec_file.c | 619 | ||||
-rw-r--r-- | kernel/livepatch/shadow.c | 108 | ||||
-rw-r--r-- | kernel/resource.c | 3 | ||||
-rw-r--r-- | kernel/sched/core.c | 2 | ||||
-rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 3 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/sched/rt.c | 4 | ||||
-rw-r--r-- | kernel/sched/sched.h | 17 | ||||
-rw-r--r-- | kernel/sys_ni.c | 10 | ||||
-rw-r--r-- | kernel/time/posix-stubs.c | 10 | ||||
-rw-r--r-- | kernel/trace/trace_event_perf.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_events_filter.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 25 |
17 files changed, 666 insertions, 346 deletions
diff --git a/kernel/crash_core.c b/kernel/crash_core.c index a93590cdd9e1..f7674d676889 100644 --- a/kernel/crash_core.c +++ b/kernel/crash_core.c @@ -454,6 +454,7 @@ static int __init crash_save_vmcoreinfo_init(void) VMCOREINFO_NUMBER(PG_lru); VMCOREINFO_NUMBER(PG_private); VMCOREINFO_NUMBER(PG_swapcache); + VMCOREINFO_NUMBER(PG_swapbacked); VMCOREINFO_NUMBER(PG_slab); #ifdef CONFIG_MEMORY_FAILURE VMCOREINFO_NUMBER(PG_hwpoison); diff --git a/kernel/events/core.c b/kernel/events/core.c index fc1c330c6bd6..2d5fe26551f8 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4447,6 +4447,9 @@ static void _free_event(struct perf_event *event) if (event->ctx) put_ctx(event->ctx); + if (event->hw.target) + put_task_struct(event->hw.target); + exclusive_event_destroy(event); module_put(event->pmu->module); @@ -8397,6 +8400,10 @@ static int perf_kprobe_event_init(struct perf_event *event) if (event->attr.type != perf_kprobe.type) return -ENOENT; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + /* * no branch sampling for probe events */ @@ -8434,6 +8441,10 @@ static int perf_uprobe_event_init(struct perf_event *event) if (event->attr.type != perf_uprobe.type) return -ENOENT; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + /* * no branch sampling for probe events */ @@ -9955,6 +9966,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, * and we cannot use the ctx information because we need the * pmu before we get a ctx. */ + get_task_struct(task); event->hw.target = task; } @@ -10070,6 +10082,8 @@ err_ns: perf_detach_cgroup(event); if (event->ns) put_pid_ns(event->ns); + if (event->hw.target) + put_task_struct(event->hw.target); kfree(event); return ERR_PTR(err); diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index a37a3b4b6342..f4f29b9d90ee 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c @@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, } } -static cpumask_var_t *alloc_node_to_possible_cpumask(void) +static cpumask_var_t *alloc_node_to_cpumask(void) { cpumask_var_t *masks; int node; @@ -62,7 +62,7 @@ out_unwind: return NULL; } -static void free_node_to_possible_cpumask(cpumask_var_t *masks) +static void free_node_to_cpumask(cpumask_var_t *masks) { int node; @@ -71,7 +71,7 @@ static void free_node_to_possible_cpumask(cpumask_var_t *masks) kfree(masks); } -static void build_node_to_possible_cpumask(cpumask_var_t *masks) +static void build_node_to_cpumask(cpumask_var_t *masks) { int cpu; @@ -79,14 +79,14 @@ static void build_node_to_possible_cpumask(cpumask_var_t *masks) cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); } -static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask, +static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, const struct cpumask *mask, nodemask_t *nodemsk) { int n, nodes = 0; /* Calculate the number of nodes in the supplied affinity mask */ for_each_node(n) { - if (cpumask_intersects(mask, node_to_possible_cpumask[n])) { + if (cpumask_intersects(mask, node_to_cpumask[n])) { node_set(n, *nodemsk); nodes++; } @@ -94,73 +94,46 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask, return nodes; } -/** - * irq_create_affinity_masks - Create affinity masks for multiqueue spreading - * @nvecs: The total number of vectors - * @affd: Description of the affinity requirements - * - * Returns the masks pointer or NULL if allocation failed. - */ -struct cpumask * -irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) +static int irq_build_affinity_masks(const struct irq_affinity *affd, + int startvec, int numvecs, + cpumask_var_t *node_to_cpumask, + const struct cpumask *cpu_mask, + struct cpumask *nmsk, + struct cpumask *masks) { - int n, nodes, cpus_per_vec, extra_vecs, curvec; - int affv = nvecs - affd->pre_vectors - affd->post_vectors; - int last_affv = affv + affd->pre_vectors; + int n, nodes, cpus_per_vec, extra_vecs, done = 0; + int last_affv = affd->pre_vectors + numvecs; + int curvec = startvec; nodemask_t nodemsk = NODE_MASK_NONE; - struct cpumask *masks; - cpumask_var_t nmsk, *node_to_possible_cpumask; - - /* - * If there aren't any vectors left after applying the pre/post - * vectors don't bother with assigning affinity. - */ - if (!affv) - return NULL; - - if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) - return NULL; - - masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); - if (!masks) - goto out; - node_to_possible_cpumask = alloc_node_to_possible_cpumask(); - if (!node_to_possible_cpumask) - goto out; + if (!cpumask_weight(cpu_mask)) + return 0; - /* Fill out vectors at the beginning that don't need affinity */ - for (curvec = 0; curvec < affd->pre_vectors; curvec++) - cpumask_copy(masks + curvec, irq_default_affinity); - - /* Stabilize the cpumasks */ - get_online_cpus(); - build_node_to_possible_cpumask(node_to_possible_cpumask); - nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask, - &nodemsk); + nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk); /* * If the number of nodes in the mask is greater than or equal the * number of vectors we just spread the vectors across the nodes. */ - if (affv <= nodes) { + if (numvecs <= nodes) { for_each_node_mask(n, nodemsk) { - cpumask_copy(masks + curvec, - node_to_possible_cpumask[n]); - if (++curvec == last_affv) + cpumask_copy(masks + curvec, node_to_cpumask[n]); + if (++done == numvecs) break; + if (++curvec == last_affv) + curvec = affd->pre_vectors; } - goto done; + goto out; } for_each_node_mask(n, nodemsk) { int ncpus, v, vecs_to_assign, vecs_per_node; /* Spread the vectors per node */ - vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes; + vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes; /* Get the cpus on this node which are in the mask */ - cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]); + cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); /* Calculate the number of cpus per vector */ ncpus = cpumask_weight(nmsk); @@ -181,19 +154,96 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); } - if (curvec >= last_affv) + done += v; + if (done >= numvecs) break; + if (curvec >= last_affv) + curvec = affd->pre_vectors; --nodes; } -done: +out: + return done; +} + +/** + * irq_create_affinity_masks - Create affinity masks for multiqueue spreading + * @nvecs: The total number of vectors + * @affd: Description of the affinity requirements + * + * Returns the masks pointer or NULL if allocation failed. + */ +struct cpumask * +irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) +{ + int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; + int curvec, usedvecs; + cpumask_var_t nmsk, npresmsk, *node_to_cpumask; + struct cpumask *masks = NULL; + + /* + * If there aren't any vectors left after applying the pre/post + * vectors don't bother with assigning affinity. + */ + if (nvecs == affd->pre_vectors + affd->post_vectors) + return NULL; + + if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) + return NULL; + + if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) + goto outcpumsk; + + node_to_cpumask = alloc_node_to_cpumask(); + if (!node_to_cpumask) + goto outnpresmsk; + + masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); + if (!masks) + goto outnodemsk; + + /* Fill out vectors at the beginning that don't need affinity */ + for (curvec = 0; curvec < affd->pre_vectors; curvec++) + cpumask_copy(masks + curvec, irq_default_affinity); + + /* Stabilize the cpumasks */ + get_online_cpus(); + build_node_to_cpumask(node_to_cpumask); + + /* Spread on present CPUs starting from affd->pre_vectors */ + usedvecs = irq_build_affinity_masks(affd, curvec, affvecs, + node_to_cpumask, cpu_present_mask, + nmsk, masks); + + /* + * Spread on non present CPUs starting from the next vector to be + * handled. If the spreading of present CPUs already exhausted the + * vector space, assign the non present CPUs to the already spread + * out vectors. + */ + if (usedvecs >= affvecs) + curvec = affd->pre_vectors; + else + curvec = affd->pre_vectors + usedvecs; + cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); + usedvecs += irq_build_affinity_masks(affd, curvec, affvecs, + node_to_cpumask, npresmsk, + nmsk, masks); put_online_cpus(); /* Fill out vectors at the end that don't need affinity */ + if (usedvecs >= affvecs) + curvec = affd->pre_vectors + affvecs; + else + curvec = affd->pre_vectors + usedvecs; for (; curvec < nvecs; curvec++) cpumask_copy(masks + curvec, irq_default_affinity); - free_node_to_possible_cpumask(node_to_possible_cpumask); -out: + +outnodemsk: + free_node_to_cpumask(node_to_cpumask); +outnpresmsk: + free_cpumask_var(npresmsk); +outcpumsk: free_cpumask_var(nmsk); return masks; } diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c index e5bcd94c1efb..75d8e7cf040e 100644 --- a/kernel/kexec_file.c +++ b/kernel/kexec_file.c @@ -22,50 +22,123 @@ #include <linux/ima.h> #include <crypto/hash.h> #include <crypto/sha.h> +#include <linux/elf.h> +#include <linux/elfcore.h> +#include <linux/kernel.h> +#include <linux/kexec.h> +#include <linux/slab.h> #include <linux/syscalls.h> #include <linux/vmalloc.h> #include "kexec_internal.h" static int kexec_calculate_store_digests(struct kimage *image); +/* + * Currently this is the only default function that is exported as some + * architectures need it to do additional handlings. + * In the future, other default functions may be exported too if required. + */ +int kexec_image_probe_default(struct kimage *image, void *buf, + unsigned long buf_len) +{ + const struct kexec_file_ops * const *fops; + int ret = -ENOEXEC; + + for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) { + ret = (*fops)->probe(buf, buf_len); + if (!ret) { + image->fops = *fops; + return ret; + } + } + + return ret; +} + /* Architectures can provide this probe function */ int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) { - return -ENOEXEC; + return kexec_image_probe_default(image, buf, buf_len); +} + +static void *kexec_image_load_default(struct kimage *image) +{ + if (!image->fops || !image->fops->load) + return ERR_PTR(-ENOEXEC); + + return image->fops->load(image, image->kernel_buf, + image->kernel_buf_len, image->initrd_buf, + image->initrd_buf_len, image->cmdline_buf, + image->cmdline_buf_len); } void * __weak arch_kexec_kernel_image_load(struct kimage *image) { - return ERR_PTR(-ENOEXEC); + return kexec_image_load_default(image); +} + +static int kexec_image_post_load_cleanup_default(struct kimage *image) +{ + if (!image->fops || !image->fops->cleanup) + return 0; + + return image->fops->cleanup(image->image_loader_data); } int __weak arch_kimage_file_post_load_cleanup(struct kimage *image) { - return -EINVAL; + return kexec_image_post_load_cleanup_default(image); } #ifdef CONFIG_KEXEC_VERIFY_SIG +static int kexec_image_verify_sig_default(struct kimage *image, void *buf, + unsigned long buf_len) +{ + if (!image->fops || !image->fops->verify_sig) { + pr_debug("kernel loader does not support signature verification.\n"); + return -EKEYREJECTED; + } + + return image->fops->verify_sig(buf, buf_len); +} + int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf, unsigned long buf_len) { - return -EKEYREJECTED; + return kexec_image_verify_sig_default(image, buf, buf_len); } #endif -/* Apply relocations of type RELA */ +/* + * arch_kexec_apply_relocations_add - apply relocations of type RELA + * @pi: Purgatory to be relocated. + * @section: Section relocations applying to. + * @relsec: Section containing RELAs. + * @symtab: Corresponding symtab. + * + * Return: 0 on success, negative errno on error. + */ int __weak -arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - unsigned int relsec) +arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section, + const Elf_Shdr *relsec, const Elf_Shdr *symtab) { pr_err("RELA relocation unsupported.\n"); return -ENOEXEC; } -/* Apply relocations of type REL */ +/* + * arch_kexec_apply_relocations - apply relocations of type REL + * @pi: Purgatory to be relocated. + * @section: Section relocations applying to. + * @relsec: Section containing RELs. + * @symtab: Corresponding symtab. + * + * Return: 0 on success, negative errno on error. + */ int __weak -arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, - unsigned int relsec) +arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section, + const Elf_Shdr *relsec, const Elf_Shdr *symtab) { pr_err("REL relocation unsupported.\n"); return -ENOEXEC; @@ -532,6 +605,9 @@ static int kexec_calculate_store_digests(struct kimage *image) struct kexec_sha_region *sha_regions; struct purgatory_info *pi = &image->purgatory_info; + if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY)) + return 0; + zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT); zero_buf_sz = PAGE_SIZE; @@ -633,87 +709,29 @@ out: return ret; } -/* Actually load purgatory. Lot of code taken from kexec-tools */ -static int __kexec_load_purgatory(struct kimage *image, unsigned long min, - unsigned long max, int top_down) +#ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY +/* + * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory. + * @pi: Purgatory to be loaded. + * @kbuf: Buffer to setup. + * + * Allocates the memory needed for the buffer. Caller is responsible to free + * the memory after use. + * + * Return: 0 on success, negative errno on error. + */ +static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi, + struct kexec_buf *kbuf) { - struct purgatory_info *pi = &image->purgatory_info; - unsigned long align, bss_align, bss_sz, bss_pad; - unsigned long entry, load_addr, curr_load_addr, bss_addr, offset; - unsigned char *buf_addr, *src; - int i, ret = 0, entry_sidx = -1; - const Elf_Shdr *sechdrs_c; - Elf_Shdr *sechdrs = NULL; - struct kexec_buf kbuf = { .image = image, .bufsz = 0, .buf_align = 1, - .buf_min = min, .buf_max = max, - .top_down = top_down }; - - /* - * sechdrs_c points to section headers in purgatory and are read - * only. No modifications allowed. - */ - sechdrs_c = (void *)pi->ehdr + pi->ehdr->e_shoff; - - /* - * We can not modify sechdrs_c[] and its fields. It is read only. - * Copy it over to a local copy where one can store some temporary - * data and free it at the end. We need to modify ->sh_addr and - * ->sh_offset fields to keep track of permanent and temporary - * locations of sections. - */ - sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr)); - if (!sechdrs) - return -ENOMEM; - - memcpy(sechdrs, sechdrs_c, pi->ehdr->e_shnum * sizeof(Elf_Shdr)); - - /* - * We seem to have multiple copies of sections. First copy is which - * is embedded in kernel in read only section. Some of these sections - * will be copied to a temporary buffer and relocated. And these - * sections will finally be copied to their final destination at - * segment load time. - * - * Use ->sh_offset to reflect section address in memory. It will - * point to original read only copy if section is not allocatable. - * Otherwise it will point to temporary copy which will be relocated. - * - * Use ->sh_addr to contain final address of the section where it - * will go during execution time. - */ - for (i = 0; i < pi->ehdr->e_shnum; i++) { - if (sechdrs[i].sh_type == SHT_NOBITS) - continue; - - sechdrs[i].sh_offset = (unsigned long)pi->ehdr + - sechdrs[i].sh_offset; - } - - /* - * Identify entry point section and make entry relative to section - * start. - */ - entry = pi->ehdr->e_entry; - for (i = 0; i < pi->ehdr->e_shnum; i++) { - if (!(sechdrs[i].sh_flags & SHF_ALLOC)) - continue; - - if (!(sechdrs[i].sh_flags & SHF_EXECINSTR)) - continue; - - /* Make entry section relative */ - if (sechdrs[i].sh_addr <= pi->ehdr->e_entry && - ((sechdrs[i].sh_addr + sechdrs[i].sh_size) > - pi->ehdr->e_entry)) { - entry_sidx = i; - entry -= sechdrs[i].sh_addr; - break; - } - } + const Elf_Shdr *sechdrs; + unsigned long bss_align; + unsigned long bss_sz; + unsigned long align; + int i, ret; - /* Determine how much memory is needed to load relocatable object. */ - bss_align = 1; - bss_sz = 0; + sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; + kbuf->buf_align = bss_align = 1; + kbuf->bufsz = bss_sz = 0; for (i = 0; i < pi->ehdr->e_shnum; i++) { if (!(sechdrs[i].sh_flags & SHF_ALLOC)) @@ -721,111 +739,124 @@ static int __kexec_load_purgatory(struct kimage *image, unsigned long min, align = sechdrs[i].sh_addralign; if (sechdrs[i].sh_type != SHT_NOBITS) { - if (kbuf.buf_align < align) - kbuf.buf_align = align; - kbuf.bufsz = ALIGN(kbuf.bufsz, align); - kbuf.bufsz += sechdrs[i].sh_size; + if (kbuf->buf_align < align) + kbuf->buf_align = align; + kbuf->bufsz = ALIGN(kbuf->bufsz, align); + kbuf->bufsz += sechdrs[i].sh_size; } else { - /* bss section */ if (bss_align < align) bss_align = align; bss_sz = ALIGN(bss_sz, align); bss_sz += sechdrs[i].sh_size; } } + kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align); + kbuf->memsz = kbuf->bufsz + bss_sz; + if (kbuf->buf_align < bss_align) + kbuf->buf_align = bss_align; - /* Determine the bss padding required to align bss properly */ - bss_pad = 0; - if (kbuf.bufsz & (bss_align - 1)) - bss_pad = bss_align - (kbuf.bufsz & (bss_align - 1)); - - kbuf.memsz = kbuf.bufsz + bss_pad + bss_sz; + kbuf->buffer = vzalloc(kbuf->bufsz); + if (!kbuf->buffer) + return -ENOMEM; + pi->purgatory_buf = kbuf->buffer; - /* Allocate buffer for purgatory */ - kbuf.buffer = vzalloc(kbuf.bufsz); - if (!kbuf.buffer) { - ret = -ENOMEM; + ret = kexec_add_buffer(kbuf); + if (ret) goto out; - } - if (kbuf.buf_align < bss_align) - kbuf.buf_align = bss_align; + return 0; +out: + vfree(pi->purgatory_buf); + pi->purgatory_buf = NULL; + return ret; +} - /* Add buffer to segment list */ - ret = kexec_add_buffer(&kbuf); - if (ret) - goto out; - pi->purgatory_load_addr = kbuf.mem; +/* + * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer. + * @pi: Purgatory to be loaded. + * @kbuf: Buffer prepared to store purgatory. + * + * Allocates the memory needed for the buffer. Caller is responsible to free + * the memory after use. + * + * Return: 0 on success, negative errno on error. + */ +static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi, + struct kexec_buf *kbuf) +{ + unsigned long bss_addr; + unsigned long offset; + Elf_Shdr *sechdrs; + int i; + + /* + * The section headers in kexec_purgatory are read-only. In order to + * have them modifiable make a temporary copy. + */ + sechdrs = vzalloc(pi->ehdr->e_shnum * sizeof(Elf_Shdr)); + if (!sechdrs) + return -ENOMEM; + memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff, + pi->ehdr->e_shnum * sizeof(Elf_Shdr)); + pi->sechdrs = sechdrs; - /* Load SHF_ALLOC sections */ - buf_addr = kbuf.buffer; - load_addr = curr_load_addr = pi->purgatory_load_addr; - bss_addr = load_addr + kbuf.bufsz + bss_pad; + offset = 0; + bss_addr = kbuf->mem + kbuf->bufsz; + kbuf->image->start = pi->ehdr->e_entry; for (i = 0; i < pi->ehdr->e_shnum; i++) { + unsigned long align; + void *src, *dst; + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) continue; align = sechdrs[i].sh_addralign; - if (sechdrs[i].sh_type != SHT_NOBITS) { - curr_load_addr = ALIGN(curr_load_addr, align); - offset = curr_load_addr - load_addr; - /* We already modifed ->sh_offset to keep src addr */ - src = (char *) sechdrs[i].sh_offset; - memcpy(buf_addr + offset, src, sechdrs[i].sh_size); - - /* Store load address and source address of section */ - sechdrs[i].sh_addr = curr_load_addr; - - /* - * This section got copied to temporary buffer. Update - * ->sh_offset accordingly. - */ - sechdrs[i].sh_offset = (unsigned long)(buf_addr + offset); - - /* Advance to the next address */ - curr_load_addr += sechdrs[i].sh_size; - } else { + if (sechdrs[i].sh_type == SHT_NOBITS) { bss_addr = ALIGN(bss_addr, align); sechdrs[i].sh_addr = bss_addr; bss_addr += sechdrs[i].sh_size; + continue; } - } - /* Update entry point based on load address of text section */ - if (entry_sidx >= 0) - entry += sechdrs[entry_sidx].sh_addr; + offset = ALIGN(offset, align); + if (sechdrs[i].sh_flags & SHF_EXECINSTR && + pi->ehdr->e_entry >= sechdrs[i].sh_addr && + pi->ehdr->e_entry < (sechdrs[i].sh_addr + + sechdrs[i].sh_size)) { + kbuf->image->start -= sechdrs[i].sh_addr; + kbuf->image->start += kbuf->mem + offset; + } - /* Make kernel jump to purgatory after shutdown */ - image->start = entry; + src = (void *)pi->ehdr + sechdrs[i].sh_offset; + dst = pi->purgatory_buf + offset; + memcpy(dst, src, sechdrs[i].sh_size); - /* Used later to get/set symbol values */ - pi->sechdrs = sechdrs; + sechdrs[i].sh_addr = kbuf->mem + offset; + sechdrs[i].sh_offset = offset; + offset += sechdrs[i].sh_size; + } - /* - * Used later to identify which section is purgatory and skip it - * from checksumming. - */ - pi->purgatory_buf = kbuf.buffer; - return ret; -out: - vfree(sechdrs); - vfree(kbuf.buffer); - return ret; + return 0; } static int kexec_apply_relocations(struct kimage *image) { int i, ret; struct purgatory_info *pi = &image->purgatory_info; - Elf_Shdr *sechdrs = pi->sechdrs; + const Elf_Shdr *sechdrs; + + sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff; - /* Apply relocations */ for (i = 0; i < pi->ehdr->e_shnum; i++) { - Elf_Shdr *section, *symtab; + const Elf_Shdr *relsec; + const Elf_Shdr *symtab; + Elf_Shdr *section; + + relsec = sechdrs + i; - if (sechdrs[i].sh_type != SHT_RELA && - sechdrs[i].sh_type != SHT_REL) + if (relsec->sh_type != SHT_RELA && + relsec->sh_type != SHT_REL) continue; /* @@ -834,12 +865,12 @@ static int kexec_apply_relocations(struct kimage *image) * symbol table. And ->sh_info contains section header * index of section to which relocations apply. */ - if (sechdrs[i].sh_info >= pi->ehdr->e_shnum || - sechdrs[i].sh_link >= pi->ehdr->e_shnum) + if (relsec->sh_info >= pi->ehdr->e_shnum || + relsec->sh_link >= pi->ehdr->e_shnum) return -ENOEXEC; - section = &sechdrs[sechdrs[i].sh_info]; - symtab = &sechdrs[sechdrs[i].sh_link]; + section = pi->sechdrs + relsec->sh_info; + symtab = sechdrs + relsec->sh_link; if (!(section->sh_flags & SHF_ALLOC)) continue; @@ -856,12 +887,12 @@ static int kexec_apply_relocations(struct kimage *image) * Respective architecture needs to provide support for applying * relocations of type SHT_RELA/SHT_REL. */ - if (sechdrs[i].sh_type == SHT_RELA) - ret = arch_kexec_apply_relocations_add(pi->ehdr, - sechdrs, i); - else if (sechdrs[i].sh_type == SHT_REL) - ret = arch_kexec_apply_relocations(pi->ehdr, - sechdrs, i); + if (relsec->sh_type == SHT_RELA) + ret = arch_kexec_apply_relocations_add(pi, section, + relsec, symtab); + else if (relsec->sh_type == SHT_REL) + ret = arch_kexec_apply_relocations(pi, section, + relsec, symtab); if (ret) return ret; } @@ -869,10 +900,18 @@ static int kexec_apply_relocations(struct kimage *image) return 0; } -/* Load relocatable purgatory object and relocate it appropriately */ -int kexec_load_purgatory(struct kimage *image, unsigned long min, - unsigned long max, int top_down, - unsigned long *load_addr) +/* + * kexec_load_purgatory - Load and relocate the purgatory object. + * @image: Image to add the purgatory to. + * @kbuf: Memory parameters to use. + * + * Allocates the memory needed for image->purgatory_info.sechdrs and + * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible + * to free the memory after use. + * + * Return: 0 on success, negative errno on error. + */ +int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf) { struct purgatory_info *pi = &image->purgatory_info; int ret; @@ -880,55 +919,51 @@ int kexec_load_purgatory(struct kimage *image, unsigned long min, if (kexec_purgatory_size <= 0) return -EINVAL; - if (kexec_purgatory_size < sizeof(Elf_Ehdr)) - return -ENOEXEC; - - pi->ehdr = (Elf_Ehdr *)kexec_purgatory; - - if (memcmp(pi->ehdr->e_ident, ELFMAG, SELFMAG) != 0 - || pi->ehdr->e_type != ET_REL - || !elf_check_arch(pi->ehdr) - || pi->ehdr->e_shentsize != sizeof(Elf_Shdr)) - return -ENOEXEC; - - if (pi->ehdr->e_shoff >= kexec_purgatory_size - || (pi->ehdr->e_shnum * sizeof(Elf_Shdr) > - kexec_purgatory_size - pi->ehdr->e_shoff)) - return -ENOEXEC; + pi->ehdr = (const Elf_Ehdr *)kexec_purgatory; - ret = __kexec_load_purgatory(image, min, max, top_down); + ret = kexec_purgatory_setup_kbuf(pi, kbuf); if (ret) return ret; + ret = kexec_purgatory_setup_sechdrs(pi, kbuf); + if (ret) + goto out_free_kbuf; + ret = kexec_apply_relocations(image); if (ret) goto out; - *load_addr = pi->purgatory_load_addr; return 0; out: vfree(pi->sechdrs); pi->sechdrs = NULL; - +out_free_kbuf: vfree(pi->purgatory_buf); pi->purgatory_buf = NULL; return ret; } -static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, - const char *name) +/* + * kexec_purgatory_find_symbol - find a symbol in the purgatory + * @pi: Purgatory to search in. + * @name: Name of the symbol. + * + * Return: pointer to symbol in read-only symtab on success, NULL on error. + */ +static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, + const char *name) { - Elf_Sym *syms; - Elf_Shdr *sechdrs; - Elf_Ehdr *ehdr; - int i, k; + const Elf_Shdr *sechdrs; + const Elf_Ehdr *ehdr; + const Elf_Sym *syms; const char *strtab; + int i, k; - if (!pi->sechdrs || !pi->ehdr) + if (!pi->ehdr) return NULL; - sechdrs = pi->sechdrs; ehdr = pi->ehdr; + sechdrs = (void *)ehdr + ehdr->e_shoff; for (i = 0; i < ehdr->e_shnum; i++) { if (sechdrs[i].sh_type != SHT_SYMTAB) @@ -937,8 +972,8 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, if (sechdrs[i].sh_link >= ehdr->e_shnum) /* Invalid strtab section number */ continue; - strtab = (char *)sechdrs[sechdrs[i].sh_link].sh_offset; - syms = (Elf_Sym *)sechdrs[i].sh_offset; + strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset; + syms = (void *)ehdr + sechdrs[i].sh_offset; /* Go through symbols for a match */ for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) { @@ -966,7 +1001,7 @@ static Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi, void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name) { struct purgatory_info *pi = &image->purgatory_info; - Elf_Sym *sym; + const Elf_Sym *sym; Elf_Shdr *sechdr; sym = kexec_purgatory_find_symbol(pi, name); @@ -989,9 +1024,9 @@ void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name) int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, void *buf, unsigned int size, bool get_value) { - Elf_Sym *sym; - Elf_Shdr *sechdrs; struct purgatory_info *pi = &image->purgatory_info; + const Elf_Sym *sym; + Elf_Shdr *sec; char *sym_buf; sym = kexec_purgatory_find_symbol(pi, name); @@ -1004,16 +1039,15 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, return -EINVAL; } - sechdrs = pi->sechdrs; + sec = pi->sechdrs + sym->st_shndx; - if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { + if (sec->sh_type == SHT_NOBITS) { pr_err("symbol %s is in a bss section. Cannot %s\n", name, get_value ? "get" : "set"); return -EINVAL; } - sym_buf = (unsigned char *)sechdrs[sym->st_shndx].sh_offset + - sym->st_value; + sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value; if (get_value) memcpy((void *)buf, sym_buf, size); @@ -1022,3 +1056,174 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, return 0; } +#endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */ + +int crash_exclude_mem_range(struct crash_mem *mem, + unsigned long long mstart, unsigned long long mend) +{ + int i, j; + unsigned long long start, end; + struct crash_mem_range temp_range = {0, 0}; + + for (i = 0; i < mem->nr_ranges; i++) { + start = mem->ranges[i].start; + end = mem->ranges[i].end; + + if (mstart > end || mend < start) + continue; + + /* Truncate any area outside of range */ + if (mstart < start) + mstart = start; + if (mend > end) + mend = end; + + /* Found completely overlapping range */ + if (mstart == start && mend == end) { + mem->ranges[i].start = 0; + mem->ranges[i].end = 0; + if (i < mem->nr_ranges - 1) { + /* Shift rest of the ranges to left */ + for (j = i; j < mem->nr_ranges - 1; j++) { + mem->ranges[j].start = + mem->ranges[j+1].start; + mem->ranges[j].end = + mem->ranges[j+1].end; + } + } + mem->nr_ranges--; + return 0; + } + + if (mstart > start && mend < end) { + /* Split original range */ + mem->ranges[i].end = mstart - 1; + temp_range.start = mend + 1; + temp_range.end = end; + } else if (mstart != start) + mem->ranges[i].end = mstart - 1; + else + mem->ranges[i].start = mend + 1; + break; + } + + /* If a split happened, add the split to array */ + if (!temp_range.end) + return 0; + + /* Split happened */ + if (i == mem->max_nr_ranges - 1) + return -ENOMEM; + + /* Location where new range should go */ + j = i + 1; + if (j < mem->nr_ranges) { + /* Move over all ranges one slot towards the end */ + for (i = mem->nr_ranges - 1; i >= j; i--) + mem->ranges[i + 1] = mem->ranges[i]; + } + + mem->ranges[j].start = temp_range.start; + mem->ranges[j].end = temp_range.end; + mem->nr_ranges++; + return 0; +} + +int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map, + void **addr, unsigned long *sz) +{ + Elf64_Ehdr *ehdr; + Elf64_Phdr *phdr; + unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz; + unsigned char *buf; + unsigned int cpu, i; + unsigned long long notes_addr; + unsigned long mstart, mend; + + /* extra phdr for vmcoreinfo elf note */ + nr_phdr = nr_cpus + 1; + nr_phdr += mem->nr_ranges; + + /* + * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping + * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64). + * I think this is required by tools like gdb. So same physical + * memory will be mapped in two elf headers. One will contain kernel + * text virtual addresses and other will have __va(physical) addresses. + */ + + nr_phdr++; + elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr); + elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN); + + buf = vzalloc(elf_sz); + if (!buf) + return -ENOMEM; + + ehdr = (Elf64_Ehdr *)buf; + phdr = (Elf64_Phdr *)(ehdr + 1); + memcpy(ehdr->e_ident, ELFMAG, SELFMAG); + ehdr->e_ident[EI_CLASS] = ELFCLASS64; + ehdr->e_ident[EI_DATA] = ELFDATA2LSB; + ehdr->e_ident[EI_VERSION] = EV_CURRENT; + ehdr->e_ident[EI_OSABI] = ELF_OSABI; + memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD); + ehdr->e_type = ET_CORE; + ehdr->e_machine = ELF_ARCH; + ehdr->e_version = EV_CURRENT; + ehdr->e_phoff = sizeof(Elf64_Ehdr); + ehdr->e_ehsize = sizeof(Elf64_Ehdr); + ehdr->e_phentsize = sizeof(Elf64_Phdr); + + /* Prepare one phdr of type PT_NOTE for each present cpu */ + for_each_present_cpu(cpu) { + phdr->p_type = PT_NOTE; + notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); + phdr->p_offset = phdr->p_paddr = notes_addr; + phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t); + (ehdr->e_phnum)++; + phdr++; + } + + /* Prepare one PT_NOTE header for vmcoreinfo */ + phdr->p_type = PT_NOTE; + phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note(); + phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE; + (ehdr->e_phnum)++; + phdr++; + + /* Prepare PT_LOAD type program header for kernel text region */ + if (kernel_map) { + phdr->p_type = PT_LOAD; + phdr->p_flags = PF_R|PF_W|PF_X; + phdr->p_vaddr = (Elf64_Addr)_text; + phdr->p_filesz = phdr->p_memsz = _end - _text; + phdr->p_offset = phdr->p_paddr = __pa_symbol(_text); + ehdr->e_phnum++; + phdr++; + } + + /* Go through all the ranges in mem->ranges[] and prepare phdr */ + for (i = 0; i < mem->nr_ranges; i++) { + mstart = mem->ranges[i].start; + mend = mem->ranges[i].end; + + phdr->p_type = PT_LOAD; + phdr->p_flags = PF_R|PF_W|PF_X; + phdr->p_offset = mstart; + + phdr->p_paddr = mstart; + phdr->p_vaddr = (unsigned long long) __va(mstart); + phdr->p_filesz = phdr->p_memsz = mend - mstart + 1; + phdr->p_align = 0; + ehdr->e_phnum++; + phdr++; + pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n", + phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz, + ehdr->e_phnum, phdr->p_offset); + } + + *addr = buf; + *sz = elf_sz; + return 0; +} diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c index fdac27588d60..83958c814439 100644 --- a/kernel/livepatch/shadow.c +++ b/kernel/livepatch/shadow.c @@ -113,8 +113,10 @@ void *klp_shadow_get(void *obj, unsigned long id) } EXPORT_SYMBOL_GPL(klp_shadow_get); -static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags, bool warn_on_exist) +static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data, + bool warn_on_exist) { struct klp_shadow *new_shadow; void *shadow_data; @@ -125,18 +127,15 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, if (shadow_data) goto exists; - /* Allocate a new shadow variable for use inside the lock below */ + /* + * Allocate a new shadow variable. Fill it with zeroes by default. + * More complex setting can be done by @ctor function. But it is + * called only when the buffer is really used (under klp_shadow_lock). + */ new_shadow = kzalloc(size + sizeof(*new_shadow), gfp_flags); if (!new_shadow) return NULL; - new_shadow->obj = obj; - new_shadow->id = id; - - /* Initialize the shadow variable if data provided */ - if (data) - memcpy(new_shadow->data, data, size); - /* Look for <obj, id> again under the lock */ spin_lock_irqsave(&klp_shadow_lock, flags); shadow_data = klp_shadow_get(obj, id); @@ -150,6 +149,22 @@ static void *__klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, goto exists; } + new_shadow->obj = obj; + new_shadow->id = id; + + if (ctor) { + int err; + + err = ctor(obj, new_shadow->data, ctor_data); + if (err) { + spin_unlock_irqrestore(&klp_shadow_lock, flags); + kfree(new_shadow); + pr_err("Failed to construct shadow variable <%p, %lx> (%d)\n", + obj, id, err); + return NULL; + } + } + /* No <obj, id> found, so attach the newly allocated one */ hash_add_rcu(klp_shadow_hash, &new_shadow->node, (unsigned long)new_shadow->obj); @@ -170,26 +185,32 @@ exists: * klp_shadow_alloc() - allocate and add a new shadow variable * @obj: pointer to parent object * @id: data identifier - * @data: pointer to data to attach to parent * @size: size of attached data * @gfp_flags: GFP mask for allocation + * @ctor: custom constructor to initialize the shadow data (optional) + * @ctor_data: pointer to any data needed by @ctor (optional) + * + * Allocates @size bytes for new shadow variable data using @gfp_flags. + * The data are zeroed by default. They are further initialized by @ctor + * function if it is not NULL. The new shadow variable is then added + * to the global hashtable. * - * Allocates @size bytes for new shadow variable data using @gfp_flags - * and copies @size bytes from @data into the new shadow variable's own - * data space. If @data is NULL, @size bytes are still allocated, but - * no copy is performed. The new shadow variable is then added to the - * global hashtable. + * If an existing <obj, id> shadow variable can be found, this routine will + * issue a WARN, exit early and return NULL. * - * If an existing <obj, id> shadow variable can be found, this routine - * will issue a WARN, exit early and return NULL. + * This function guarantees that the constructor function is called only when + * the variable did not exist before. The cost is that @ctor is called + * in atomic context under a spin lock. * * Return: the shadow variable data element, NULL on duplicate or * failure. */ -void *klp_shadow_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags) +void *klp_shadow_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data) { - return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, true); + return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, + ctor, ctor_data, true); } EXPORT_SYMBOL_GPL(klp_shadow_alloc); @@ -197,37 +218,51 @@ EXPORT_SYMBOL_GPL(klp_shadow_alloc); * klp_shadow_get_or_alloc() - get existing or allocate a new shadow variable * @obj: pointer to parent object * @id: data identifier - * @data: pointer to data to attach to parent * @size: size of attached data * @gfp_flags: GFP mask for allocation + * @ctor: custom constructor to initialize the shadow data (optional) + * @ctor_data: pointer to any data needed by @ctor (optional) * * Returns a pointer to existing shadow data if an <obj, id> shadow * variable is already present. Otherwise, it creates a new shadow * variable like klp_shadow_alloc(). * - * This function guarantees that only one shadow variable exists with - * the given @id for the given @obj. It also guarantees that the shadow - * variable will be initialized by the given @data only when it did not - * exist before. + * This function guarantees that only one shadow variable exists with the given + * @id for the given @obj. It also guarantees that the constructor function + * will be called only when the variable did not exist before. The cost is + * that @ctor is called in atomic context under a spin lock. * * Return: the shadow variable data element, NULL on failure. */ -void *klp_shadow_get_or_alloc(void *obj, unsigned long id, void *data, - size_t size, gfp_t gfp_flags) +void *klp_shadow_get_or_alloc(void *obj, unsigned long id, + size_t size, gfp_t gfp_flags, + klp_shadow_ctor_t ctor, void *ctor_data) { - return __klp_shadow_get_or_alloc(obj, id, data, size, gfp_flags, false); + return __klp_shadow_get_or_alloc(obj, id, size, gfp_flags, + ctor, ctor_data, false); } EXPORT_SYMBOL_GPL(klp_shadow_get_or_alloc); +static void klp_shadow_free_struct(struct klp_shadow *shadow, + klp_shadow_dtor_t dtor) +{ + hash_del_rcu(&shadow->node); + if (dtor) + dtor(shadow->obj, shadow->data); + kfree_rcu(shadow, rcu_head); +} + /** * klp_shadow_free() - detach and free a <obj, id> shadow variable * @obj: pointer to parent object * @id: data identifier + * @dtor: custom callback that can be used to unregister the variable + * and/or free data that the shadow variable points to (optional) * * This function releases the memory for this <obj, id> shadow variable * instance, callers should stop referencing it accordingly. */ -void klp_shadow_free(void *obj, unsigned long id) +void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor) { struct klp_shadow *shadow; unsigned long flags; @@ -239,8 +274,7 @@ void klp_shadow_free(void *obj, unsigned long id) (unsigned long)obj) { if (klp_shadow_match(shadow, obj, id)) { - hash_del_rcu(&shadow->node); - kfree_rcu(shadow, rcu_head); + klp_shadow_free_struct(shadow, dtor); break; } } @@ -252,11 +286,13 @@ EXPORT_SYMBOL_GPL(klp_shadow_free); /** * klp_shadow_free_all() - detach and free all <*, id> shadow variables * @id: data identifier + * @dtor: custom callback that can be used to unregister the variable + * and/or free data that the shadow variable points to (optional) * * This function releases the memory for all <*, id> shadow variable * instances, callers should stop referencing them accordingly. */ -void klp_shadow_free_all(unsigned long id) +void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor) { struct klp_shadow *shadow; unsigned long flags; @@ -266,10 +302,8 @@ void klp_shadow_free_all(unsigned long id) /* Delete all <*, id> from hash */ hash_for_each(klp_shadow_hash, i, shadow, node) { - if (klp_shadow_match(shadow, shadow->obj, id)) { - hash_del_rcu(&shadow->node); - kfree_rcu(shadow, rcu_head); - } + if (klp_shadow_match(shadow, shadow->obj, id)) + klp_shadow_free_struct(shadow, dtor); } spin_unlock_irqrestore(&klp_shadow_lock, flags); diff --git a/kernel/resource.c b/kernel/resource.c index e270b5048988..2af6c03858b9 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -651,7 +651,8 @@ static int __find_resource(struct resource *root, struct resource *old, alloc.start = constraint->alignf(constraint->alignf_data, &avail, size, constraint->align); alloc.end = alloc.start + size - 1; - if (resource_contains(&avail, &alloc)) { + if (alloc.start <= alloc.end && + resource_contains(&avail, &alloc)) { new->start = alloc.start; new->end = alloc.end; return 0; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e8afd6086f23..5e10aaeebfcc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * this case, we can save a useless back to back clock update. */ if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) - rq_clock_skip_update(rq, true); + rq_clock_skip_update(rq); } #ifdef CONFIG_SMP diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 2b124811947d..d2c6083304b4 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -631,10 +631,9 @@ fail: stop_kthread: sugov_kthread_stop(sg_policy); - -free_sg_policy: mutex_unlock(&global_tunables_lock); +free_sg_policy: sugov_policy_free(sg_policy); disable_fast_switch: diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d1c7bf7c7e5b..e7b3008b85bb 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq) * so we don't do microscopic update in schedule() * and double the fastpath cost. */ - rq_clock_skip_update(rq, true); + rq_clock_skip_update(rq); } #ifdef CONFIG_SMP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0951d1c58d2f..54dc31e7ab9b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq) * so we don't do microscopic update in schedule() * and double the fastpath cost. */ - rq_clock_skip_update(rq, true); + rq_clock_skip_update(rq); } set_skip_buddy(se); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 86b77987435e..7aef6b4e885a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -839,6 +839,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) continue; raw_spin_lock(&rq->lock); + update_rq_clock(rq); + if (rt_rq->rt_time) { u64 runtime; @@ -859,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) * 'runtime'. */ if (rt_rq->rt_nr_running && rq->curr == rq->idle) - rq_clock_skip_update(rq, false); + rq_clock_cancel_skipupdate(rq); } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c3deaee7a7a2..15750c222ca2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq) return rq->clock_task; } -static inline void rq_clock_skip_update(struct rq *rq, bool skip) +static inline void rq_clock_skip_update(struct rq *rq) { lockdep_assert_held(&rq->lock); - if (skip) - rq->clock_update_flags |= RQCF_REQ_SKIP; - else - rq->clock_update_flags &= ~RQCF_REQ_SKIP; + rq->clock_update_flags |= RQCF_REQ_SKIP; +} + +/* + * See rt task throttoling, which is the only time a skip + * request is cancelled. + */ +static inline void rq_clock_cancel_skipupdate(struct rq *rq) +{ + lockdep_assert_held(&rq->lock); + rq->clock_update_flags &= ~RQCF_REQ_SKIP; } struct rq_flags { diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 6cafc008f6db..9791364925dc 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -5,6 +5,11 @@ #include <asm/unistd.h> +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +/* Architectures may override COND_SYSCALL and COND_SYSCALL_COMPAT */ +#include <asm/syscall_wrapper.h> +#endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ + /* we can't #include <linux/syscalls.h> here, but tell gcc to not warn with -Wmissing-prototypes */ asmlinkage long sys_ni_syscall(void); @@ -17,8 +22,13 @@ asmlinkage long sys_ni_syscall(void) return -ENOSYS; } +#ifndef COND_SYSCALL #define COND_SYSCALL(name) cond_syscall(sys_##name) +#endif /* COND_SYSCALL */ + +#ifndef COND_SYSCALL_COMPAT #define COND_SYSCALL_COMPAT(name) cond_syscall(compat_sys_##name) +#endif /* COND_SYSCALL_COMPAT */ /* * This list is kept in the same order as include/uapi/asm-generic/unistd.h. diff --git a/kernel/time/posix-stubs.c b/kernel/time/posix-stubs.c index 6259dbc0191a..e0dbae98db9d 100644 --- a/kernel/time/posix-stubs.c +++ b/kernel/time/posix-stubs.c @@ -19,6 +19,11 @@ #include <linux/posix-timers.h> #include <linux/compat.h> +#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER +/* Architectures may override SYS_NI and COMPAT_SYS_NI */ +#include <asm/syscall_wrapper.h> +#endif + asmlinkage long sys_ni_posix_timers(void) { pr_err_once("process %d (%s) attempted a POSIX timer syscall " @@ -27,8 +32,13 @@ asmlinkage long sys_ni_posix_timers(void) return -ENOSYS; } +#ifndef SYS_NI #define SYS_NI(name) SYSCALL_ALIAS(sys_##name, sys_ni_posix_timers) +#endif + +#ifndef COMPAT_SYS_NI #define COMPAT_SYS_NI(name) SYSCALL_ALIAS(compat_sys_##name, sys_ni_posix_timers) +#endif SYS_NI(timer_create); SYS_NI(timer_gettime); diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 2c416509b834..c79193e598f5 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c @@ -252,6 +252,8 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe) ret = strncpy_from_user( func, u64_to_user_ptr(p_event->attr.kprobe_func), KSYM_NAME_LEN); + if (ret == KSYM_NAME_LEN) + ret = -E2BIG; if (ret < 0) goto out; @@ -300,6 +302,8 @@ int perf_uprobe_init(struct perf_event *p_event, bool is_retprobe) return -ENOMEM; ret = strncpy_from_user( path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX); + if (ret == PATH_MAX) + return -E2BIG; if (ret < 0) goto out; if (path[0] == '\0') { diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 1bda4ec95e18..9b4716bb8bb0 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c @@ -1704,18 +1704,16 @@ static int create_filter(struct trace_event_call *call, struct event_filter **filterp) { struct filter_parse_error *pe = NULL; - struct event_filter *filter = NULL; int err; - err = create_filter_start(filter_string, set_str, &pe, &filter); + err = create_filter_start(filter_string, set_str, &pe, filterp); if (err) return err; - err = process_preds(call, filter_string, filter, pe); + err = process_preds(call, filter_string, *filterp, pe); if (err && set_str) - append_filter_err(pe, filter); + append_filter_err(pe, *filterp); - *filterp = filter; return err; } @@ -1739,24 +1737,22 @@ static int create_system_filter(struct trace_subsystem_dir *dir, struct trace_array *tr, char *filter_str, struct event_filter **filterp) { - struct event_filter *filter = NULL; struct filter_parse_error *pe = NULL; int err; - err = create_filter_start(filter_str, true, &pe, &filter); + err = create_filter_start(filter_str, true, &pe, filterp); if (!err) { err = process_system_preds(dir, tr, pe, filter_str); if (!err) { /* System filters just show a default message */ - kfree(filter->filter_string); - filter->filter_string = NULL; + kfree((*filterp)->filter_string); + (*filterp)->filter_string = NULL; } else { - append_filter_err(pe, filter); + append_filter_err(pe, *filterp); } } create_filter_finish(pe); - *filterp = filter; return err; } @@ -1764,7 +1760,7 @@ static int create_system_filter(struct trace_subsystem_dir *dir, int apply_event_filter(struct trace_event_file *file, char *filter_string) { struct trace_event_call *call = file->event_call; - struct event_filter *filter; + struct event_filter *filter = NULL; int err; if (!strcmp(strstrip(filter_string), "0")) { @@ -1817,7 +1813,7 @@ int apply_subsystem_event_filter(struct trace_subsystem_dir *dir, { struct event_subsystem *system = dir->subsystem; struct trace_array *tr = dir->tr; - struct event_filter *filter; + struct event_filter *filter = NULL; int err = 0; mutex_lock(&event_mutex); @@ -2024,7 +2020,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, char *filter_str) { int err; - struct event_filter *filter; + struct event_filter *filter = NULL; struct trace_event_call *call; mutex_lock(&event_mutex); @@ -2140,7 +2136,7 @@ static struct test_filter_data_t { #undef YES #undef NO -#define DATA_CNT (sizeof(test_filter_data)/sizeof(struct test_filter_data_t)) +#define DATA_CNT ARRAY_SIZE(test_filter_data) static int test_pred_visited; diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 2014f4351ae0..34fd0e0ec51d 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -151,6 +151,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, return; ret = strncpy_from_user(dst, src, maxlen); + if (ret == maxlen) + dst[--ret] = '\0'; if (ret < 0) { /* Failed to fetch string */ ((u8 *)get_rloc_data(dest))[0] = '\0'; @@ -446,7 +448,7 @@ static int create_trace_uprobe(int argc, char **argv) if (ret) goto fail_address_parse; - inode = igrab(d_inode(path.dentry)); + inode = igrab(d_real_inode(path.dentry)); path_put(&path); if (!inode || !S_ISREG(inode->i_mode)) { @@ -602,24 +604,9 @@ static int probes_seq_show(struct seq_file *m, void *v) char c = is_ret_probe(tu) ? 'r' : 'p'; int i; - seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, - trace_event_name(&tu->tp.call)); - seq_printf(m, " %s:", tu->filename); - - /* Don't print "0x (null)" when offset is 0 */ - if (tu->offset) { - seq_printf(m, "0x%px", (void *)tu->offset); - } else { - switch (sizeof(void *)) { - case 4: - seq_printf(m, "0x00000000"); - break; - case 8: - default: - seq_printf(m, "0x0000000000000000"); - break; - } - } + seq_printf(m, "%c:%s/%s %s:0x%0*lx", c, tu->tp.call.class->system, + trace_event_name(&tu->tp.call), tu->filename, + (int)(sizeof(void *) * 2), tu->offset); for (i = 0; i < tu->tp.nr_args; i++) seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); |