aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/Kconfig.debug14
-rw-r--r--lib/Makefile5
-rw-r--r--lib/bucket_locks.c54
-rw-r--r--lib/chacha20.c71
-rw-r--r--lib/dma-direct.c156
-rw-r--r--lib/dma-noop.c68
-rw-r--r--lib/error-inject.c242
-rw-r--r--lib/errseq.c37
-rw-r--r--lib/kobject.c6
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/rhashtable.c160
-rw-r--r--lib/smp_processor_id.c3
-rw-r--r--lib/string.c2
-rw-r--r--lib/swiotlb.c205
-rw-r--r--lib/test_bpf.c112
-rw-r--r--lib/test_firmware.c17
-rw-r--r--lib/test_kmod.c14
-rw-r--r--lib/test_rhashtable.c6
-rw-r--r--lib/usercopy.c2
-rw-r--r--lib/uuid.c34
-rw-r--r--lib/vsprintf.c92
22 files changed, 974 insertions, 331 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 4dd5c11366f9..e96089499371 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -413,7 +413,7 @@ config SGL_ALLOC
bool
default n
-config DMA_NOOP_OPS
+config DMA_DIRECT_OPS
bool
depends on HAS_DMA && (!64BIT || ARCH_DMA_ADDR_T_64BIT)
default n
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 2a1f28c183d3..64d7c19d3167 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1500,6 +1500,10 @@ config FAULT_INJECTION
Provide fault-injection framework.
For more details, see Documentation/fault-injection/.
+config FUNCTION_ERROR_INJECTION
+ def_bool y
+ depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES
+
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
@@ -1547,6 +1551,16 @@ config FAIL_FUTEX
help
Provide fault-injection capability for futexes.
+config FAIL_FUNCTION
+ bool "Fault-injection capability for functions"
+ depends on FAULT_INJECTION_DEBUG_FS && FUNCTION_ERROR_INJECTION
+ help
+ Provide function-based fault-injection capability.
+ This will allow you to override a specific function with a return
+ with given return value. As a result, function caller will see
+ an error value and have to handle it. This is useful to test the
+ error handling in various subsystems.
+
config FAULT_INJECTION_DEBUG_FS
bool "Debugfs entries for fault-injection capabilities"
depends on FAULT_INJECTION && SYSFS && DEBUG_FS
diff --git a/lib/Makefile b/lib/Makefile
index d11c48ec8ffd..7adb066692b3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -28,7 +28,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_NOOP_OPS) += dma-noop.o
+lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
@@ -39,7 +39,7 @@ obj-y += bcd.o div64.o sort.o parser.o debug_locks.o random32.o \
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
- once.o refcount.o usercopy.o errseq.o
+ once.o refcount.o usercopy.o errseq.o bucket_locks.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
obj-y += string_helpers.o
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
@@ -149,6 +149,7 @@ obj-$(CONFIG_NETDEV_NOTIFIER_ERROR_INJECT) += netdev-notifier-error-inject.o
obj-$(CONFIG_MEMORY_NOTIFIER_ERROR_INJECT) += memory-notifier-error-inject.o
obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
+obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
diff --git a/lib/bucket_locks.c b/lib/bucket_locks.c
new file mode 100644
index 000000000000..266a97c5708b
--- /dev/null
+++ b/lib/bucket_locks.c
@@ -0,0 +1,54 @@
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/* Allocate an array of spinlocks to be accessed by a hash. Two arguments
+ * indicate the number of elements to allocate in the array. max_size
+ * gives the maximum number of elements to allocate. cpu_mult gives
+ * the number of locks per CPU to allocate. The size is rounded up
+ * to a power of 2 to be suitable as a hash table.
+ */
+
+int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
+ size_t max_size, unsigned int cpu_mult, gfp_t gfp)
+{
+ spinlock_t *tlocks = NULL;
+ unsigned int i, size;
+#if defined(CONFIG_PROVE_LOCKING)
+ unsigned int nr_pcpus = 2;
+#else
+ unsigned int nr_pcpus = num_possible_cpus();
+#endif
+
+ if (cpu_mult) {
+ nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
+ size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
+ } else {
+ size = max_size;
+ }
+
+ if (sizeof(spinlock_t) != 0) {
+ if (gfpflags_allow_blocking(gfp))
+ tlocks = kvmalloc(size * sizeof(spinlock_t), gfp);
+ else
+ tlocks = kmalloc_array(size, sizeof(spinlock_t), gfp);
+ if (!tlocks)
+ return -ENOMEM;
+ for (i = 0; i < size; i++)
+ spin_lock_init(&tlocks[i]);
+ }
+
+ *locks = tlocks;
+ *locks_mask = size - 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(alloc_bucket_spinlocks);
+
+void free_bucket_spinlocks(spinlock_t *locks)
+{
+ kvfree(locks);
+}
+EXPORT_SYMBOL(free_bucket_spinlocks);
diff --git a/lib/chacha20.c b/lib/chacha20.c
index 250ceed9ec9a..c1cc50fb68c9 100644
--- a/lib/chacha20.c
+++ b/lib/chacha20.c
@@ -16,12 +16,7 @@
#include <asm/unaligned.h>
#include <crypto/chacha20.h>
-static inline u32 rotl32(u32 v, u8 n)
-{
- return (v << n) | (v >> (sizeof(v) * 8 - n));
-}
-
-extern void chacha20_block(u32 *state, void *stream)
+void chacha20_block(u32 *state, u32 *stream)
{
u32 x[16], *out = stream;
int i;
@@ -30,45 +25,45 @@ extern void chacha20_block(u32 *state, void *stream)
x[i] = state[i];
for (i = 0; i < 20; i += 2) {
- x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 16);
- x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 16);
- x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 16);
- x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 16);
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16);
- x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 12);
- x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 12);
- x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 12);
- x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 12);
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12);
- x[0] += x[4]; x[12] = rotl32(x[12] ^ x[0], 8);
- x[1] += x[5]; x[13] = rotl32(x[13] ^ x[1], 8);
- x[2] += x[6]; x[14] = rotl32(x[14] ^ x[2], 8);
- x[3] += x[7]; x[15] = rotl32(x[15] ^ x[3], 8);
+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8);
+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8);
+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8);
+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8);
- x[8] += x[12]; x[4] = rotl32(x[4] ^ x[8], 7);
- x[9] += x[13]; x[5] = rotl32(x[5] ^ x[9], 7);
- x[10] += x[14]; x[6] = rotl32(x[6] ^ x[10], 7);
- x[11] += x[15]; x[7] = rotl32(x[7] ^ x[11], 7);
+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7);
+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7);
+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7);
+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7);
- x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 16);
- x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 16);
- x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 16);
- x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 16);
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16);
- x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 12);
- x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 12);
- x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 12);
- x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 12);
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12);
- x[0] += x[5]; x[15] = rotl32(x[15] ^ x[0], 8);
- x[1] += x[6]; x[12] = rotl32(x[12] ^ x[1], 8);
- x[2] += x[7]; x[13] = rotl32(x[13] ^ x[2], 8);
- x[3] += x[4]; x[14] = rotl32(x[14] ^ x[3], 8);
+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8);
+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8);
+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8);
+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8);
- x[10] += x[15]; x[5] = rotl32(x[5] ^ x[10], 7);
- x[11] += x[12]; x[6] = rotl32(x[6] ^ x[11], 7);
- x[8] += x[13]; x[7] = rotl32(x[7] ^ x[8], 7);
- x[9] += x[14]; x[4] = rotl32(x[4] ^ x[9], 7);
+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7);
+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7);
+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7);
+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7);
}
for (i = 0; i < ARRAY_SIZE(x); i++)
diff --git a/lib/dma-direct.c b/lib/dma-direct.c
new file mode 100644
index 000000000000..40b1f92f2214
--- /dev/null
+++ b/lib/dma-direct.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMA operations that map physical memory directly without using an IOMMU or
+ * flushing caches.
+ */
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/dma-direct.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-contiguous.h>
+#include <linux/pfn.h>
+
+#define DIRECT_MAPPING_ERROR 0
+
+/*
+ * Most architectures use ZONE_DMA for the first 16 Megabytes, but
+ * some use it for entirely different regions:
+ */
+#ifndef ARCH_ZONE_DMA_BITS
+#define ARCH_ZONE_DMA_BITS 24
+#endif
+
+static bool
+check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
+ const char *caller)
+{
+ if (unlikely(dev && !dma_capable(dev, dma_addr, size))) {
+ if (*dev->dma_mask >= DMA_BIT_MASK(32)) {
+ dev_err(dev,
+ "%s: overflow %pad+%zu of device mask %llx\n",
+ caller, &dma_addr, size, *dev->dma_mask);
+ }
+ return false;
+ }
+ return true;
+}
+
+static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
+{
+ return phys_to_dma(dev, phys) + size - 1 <= dev->coherent_dma_mask;
+}
+
+void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ int page_order = get_order(size);
+ struct page *page = NULL;
+
+ /* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ gfp |= GFP_DMA;
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
+ gfp |= GFP_DMA32;
+
+again:
+ /* CMA can be used only in the context which permits sleeping */
+ if (gfpflags_allow_blocking(gfp)) {
+ page = dma_alloc_from_contiguous(dev, count, page_order, gfp);
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
+ }
+ if (!page)
+ page = alloc_pages_node(dev_to_node(dev), gfp, page_order);
+
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ __free_pages(page, page_order);
+ page = NULL;
+
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
+ !(gfp & GFP_DMA)) {
+ gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
+ goto again;
+ }
+ }
+
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ memset(page_address(page), 0, size);
+ return page_address(page);
+}
+
+void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ if (!dma_release_from_contiguous(dev, virt_to_page(cpu_addr), count))
+ free_pages((unsigned long)cpu_addr, get_order(size));
+}
+
+static dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ dma_addr_t dma_addr = phys_to_dma(dev, page_to_phys(page)) + offset;
+
+ if (!check_addr(dev, dma_addr, size, __func__))
+ return DIRECT_MAPPING_ERROR;
+ return dma_addr;
+}
+
+static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(sgl, sg, nents, i) {
+ BUG_ON(!sg_page(sg));
+
+ sg_dma_address(sg) = phys_to_dma(dev, sg_phys(sg));
+ if (!check_addr(dev, sg_dma_address(sg), sg->length, __func__))
+ return 0;
+ sg_dma_len(sg) = sg->length;
+ }
+
+ return nents;
+}
+
+int dma_direct_supported(struct device *dev, u64 mask)
+{
+#ifdef CONFIG_ZONE_DMA
+ if (mask < DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
+ return 0;
+#else
+ /*
+ * Because 32-bit DMA masks are so common we expect every architecture
+ * to be able to satisfy them - either by not supporting more physical
+ * memory, or by providing a ZONE_DMA32. If neither is the case, the
+ * architecture needs to use an IOMMU instead of the direct mapping.
+ */
+ if (mask < DMA_BIT_MASK(32))
+ return 0;
+#endif
+ return 1;
+}
+
+static int dma_direct_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return dma_addr == DIRECT_MAPPING_ERROR;
+}
+
+const struct dma_map_ops dma_direct_ops = {
+ .alloc = dma_direct_alloc,
+ .free = dma_direct_free,
+ .map_page = dma_direct_map_page,
+ .map_sg = dma_direct_map_sg,
+ .dma_supported = dma_direct_supported,
+ .mapping_error = dma_direct_mapping_error,
+};
+EXPORT_SYMBOL(dma_direct_ops);
diff --git a/lib/dma-noop.c b/lib/dma-noop.c
deleted file mode 100644
index a10185b0c2d4..000000000000
--- a/lib/dma-noop.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * lib/dma-noop.c
- *
- * DMA operations that map to physical addresses without flushing memory.
- */
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/scatterlist.h>
-#include <linux/pfn.h>
-
-static void *dma_noop_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- unsigned long attrs)
-{
- void *ret;
-
- ret = (void *)__get_free_pages(gfp, get_order(size));
- if (ret)
- *dma_handle = virt_to_phys(ret) - PFN_PHYS(dev->dma_pfn_offset);
-
- return ret;
-}
-
-static void dma_noop_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t dma_addr,
- unsigned long attrs)
-{
- free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- return page_to_phys(page) + offset - PFN_PHYS(dev->dma_pfn_offset);
-}
-
-static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- int i;
- struct scatterlist *sg;
-
- for_each_sg(sgl, sg, nents, i) {
- dma_addr_t offset = PFN_PHYS(dev->dma_pfn_offset);
- void *va;
-
- BUG_ON(!sg_page(sg));
- va = sg_virt(sg);
- sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va) - offset;
- sg_dma_len(sg) = sg->length;
- }
-
- return nents;
-}
-
-const struct dma_map_ops dma_noop_ops = {
- .alloc = dma_noop_alloc,
- .free = dma_noop_free,
- .map_page = dma_noop_map_page,
- .map_sg = dma_noop_map_sg,
-};
-
-EXPORT_SYMBOL(dma_noop_ops);
diff --git a/lib/error-inject.c b/lib/error-inject.c
new file mode 100644
index 000000000000..c0d4600f4896
--- /dev/null
+++ b/lib/error-inject.c
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-2.0
+// error-inject.c: Function-level error injection table
+#include <linux/error-injection.h>
+#include <linux/debugfs.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/* Whitelist of symbols that can be overridden for error injection. */
+static LIST_HEAD(error_injection_list);
+static DEFINE_MUTEX(ei_mutex);
+struct ei_entry {
+ struct list_head list;
+ unsigned long start_addr;
+ unsigned long end_addr;
+ int etype;
+ void *priv;
+};
+
+bool within_error_injection_list(unsigned long addr)
+{
+ struct ei_entry *ent;
+ bool ret = false;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr) {
+ ret = true;
+ break;
+ }
+ }
+ mutex_unlock(&ei_mutex);
+ return ret;
+}
+
+int get_injectable_error_type(unsigned long addr)
+{
+ struct ei_entry *ent;
+
+ list_for_each_entry(ent, &error_injection_list, list) {
+ if (addr >= ent->start_addr && addr < ent->end_addr)
+ return ent->etype;
+ }
+ return EI_ETYPE_NONE;
+}
+
+/*
+ * Lookup and populate the error_injection_list.
+ *
+ * For safety reasons we only allow certain functions to be overridden with
+ * bpf_error_injection, so we need to populate the list of the symbols that have
+ * been marked as safe for overriding.
+ */
+static void populate_error_injection_list(struct error_injection_entry *start,
+ struct error_injection_entry *end,
+ void *priv)
+{
+ struct error_injection_entry *iter;
+ struct ei_entry *ent;
+ unsigned long entry, offset = 0, size = 0;
+
+ mutex_lock(&ei_mutex);
+ for (iter = start; iter < end; iter++) {
+ entry = arch_deref_entry_point((void *)iter->addr);
+
+ if (!kernel_text_address(entry) ||
+ !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+ pr_err("Failed to find error inject entry at %p\n",
+ (void *)entry);
+ continue;
+ }
+
+ ent = kmalloc(sizeof(*ent), GFP_KERNEL);
+ if (!ent)
+ break;
+ ent->start_addr = entry;
+ ent->end_addr = entry + size;
+ ent->etype = iter->etype;
+ ent->priv = priv;
+ INIT_LIST_HEAD(&ent->list);
+ list_add_tail(&ent->list, &error_injection_list);
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Markers of the _error_inject_whitelist section */
+extern struct error_injection_entry __start_error_injection_whitelist[];
+extern struct error_injection_entry __stop_error_injection_whitelist[];
+
+static void __init populate_kernel_ei_list(void)
+{
+ populate_error_injection_list(__start_error_injection_whitelist,
+ __stop_error_injection_whitelist,
+ NULL);
+}
+
+#ifdef CONFIG_MODULES
+static void module_load_ei_list(struct module *mod)
+{
+ if (!mod->num_ei_funcs)
+ return;
+
+ populate_error_injection_list(mod->ei_funcs,
+ mod->ei_funcs + mod->num_ei_funcs, mod);
+}
+
+static void module_unload_ei_list(struct module *mod)
+{
+ struct ei_entry *ent, *n;
+
+ if (!mod->num_ei_funcs)
+ return;
+
+ mutex_lock(&ei_mutex);
+ list_for_each_entry_safe(ent, n, &error_injection_list, list) {
+ if (ent->priv == mod) {
+ list_del_init(&ent->list);
+ kfree(ent);
+ }
+ }
+ mutex_unlock(&ei_mutex);
+}
+
+/* Module notifier call back, checking error injection table on the module */
+static int ei_module_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct module *mod = data;
+
+ if (val == MODULE_STATE_COMING)
+ module_load_ei_list(mod);
+ else if (val == MODULE_STATE_GOING)
+ module_unload_ei_list(mod);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ei_module_nb = {
+ .notifier_call = ei_module_callback,
+ .priority = 0
+};
+
+static __init int module_ei_init(void)
+{
+ return register_module_notifier(&ei_module_nb);
+}
+#else /* !CONFIG_MODULES */
+#define module_ei_init() (0)
+#endif
+
+/*
+ * error_injection/whitelist -- shows which functions can be overridden for
+ * error injection.
+ */
+static void *ei_seq_start(struct seq_file *m, loff_t *pos)
+{
+ mutex_lock(&ei_mutex);
+ return seq_list_start(&error_injection_list, *pos);
+}
+
+static void ei_seq_stop(struct seq_file *m, void *v)
+{
+ mutex_unlock(&ei_mutex);
+}
+
+static void *ei_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &error_injection_list, pos);
+}
+
+static const char *error_type_string(int etype)
+{
+ switch (etype) {
+ case EI_ETYPE_NULL:
+ return "NULL";
+ case EI_ETYPE_ERRNO:
+ return "ERRNO";
+ case EI_ETYPE_ERRNO_NULL:
+ return "ERRNO_NULL";
+ default:
+ return "(unknown)";
+ }
+}
+
+static int ei_seq_show(struct seq_file *m, void *v)
+{
+ struct ei_entry *ent = list_entry(v, struct ei_entry, list);
+
+ seq_printf(m, "%pf\t%s\n", (void *)ent->start_addr,
+ error_type_string(ent->etype));
+ return 0;
+}
+
+static const struct seq_operations ei_seq_ops = {
+ .start = ei_seq_start,
+ .next = ei_seq_next,
+ .stop = ei_seq_stop,
+ .show = ei_seq_show,
+};
+
+static int ei_open(struct inode *inode, struct file *filp)
+{
+ return seq_open(filp, &ei_seq_ops);
+}
+
+static const struct file_operations debugfs_ei_ops = {
+ .open = ei_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __init ei_debugfs_init(void)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("error_injection", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops);
+ if (!file) {
+ debugfs_remove(dir);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int __init init_error_injection(void)
+{
+ populate_kernel_ei_list();
+
+ if (!module_ei_init())
+ ei_debugfs_init();
+
+ return 0;
+}
+late_initcall(init_error_injection);
diff --git a/lib/errseq.c b/lib/errseq.c
index 79cc66897db4..df782418b333 100644
--- a/lib/errseq.c
+++ b/lib/errseq.c
@@ -46,14 +46,14 @@
* @eseq: errseq_t field that should be set
* @err: error to set (must be between -1 and -MAX_ERRNO)
*
- * This function sets the error in *eseq, and increments the sequence counter
+ * This function sets the error in @eseq, and increments the sequence counter
* if the last sequence was sampled at some point in the past.
*
* Any error set will always overwrite an existing error.
*
- * We do return the latest value here, primarily for debugging purposes. The
- * return value should not be used as a previously sampled value in later calls
- * as it will not have the SEEN flag set.
+ * Return: The previous value, primarily for debugging purposes. The
+ * return value should not be used as a previously sampled value in later
+ * calls as it will not have the SEEN flag set.
*/
errseq_t errseq_set(errseq_t *eseq, int err)
{
@@ -108,11 +108,13 @@ errseq_t errseq_set(errseq_t *eseq, int err)
EXPORT_SYMBOL(errseq_set);
/**
- * errseq_sample - grab current errseq_t value
- * @eseq: pointer to errseq_t to be sampled
+ * errseq_sample() - Grab current errseq_t value.
+ * @eseq: Pointer to errseq_t to be sampled.
*
* This function allows callers to sample an errseq_t value, marking it as
* "seen" if required.
+ *
+ * Return: The current errseq value.
*/
errseq_t errseq_sample(errseq_t *eseq)
{
@@ -134,15 +136,15 @@ errseq_t errseq_sample(errseq_t *eseq)
EXPORT_SYMBOL(errseq_sample);
/**
- * errseq_check - has an error occurred since a particular sample point?
- * @eseq: pointer to errseq_t value to be checked
- * @since: previously-sampled errseq_t from which to check
+ * errseq_check() - Has an error occurred since a particular sample point?
+ * @eseq: Pointer to errseq_t value to be checked.
+ * @since: Previously-sampled errseq_t from which to check.
*
- * Grab the value that eseq points to, and see if it has changed "since"
- * the given value was sampled. The "since" value is not advanced, so there
+ * Grab the value that eseq points to, and see if it has changed @since
+ * the given value was sampled. The @since value is not advanced, so there
* is no need to mark the value as seen.
*
- * Returns the latest error set in the errseq_t or 0 if it hasn't changed.
+ * Return: The latest error set in the errseq_t or 0 if it hasn't changed.
*/
int errseq_check(errseq_t *eseq, errseq_t since)
{
@@ -155,11 +157,11 @@ int errseq_check(errseq_t *eseq, errseq_t since)
EXPORT_SYMBOL(errseq_check);
/**
- * errseq_check_and_advance - check an errseq_t and advance to current value
- * @eseq: pointer to value being checked and reported
- * @since: pointer to previously-sampled errseq_t to check against and advance
+ * errseq_check_and_advance() - Check an errseq_t and advance to current value.
+ * @eseq: Pointer to value being checked and reported.
+ * @since: Pointer to previously-sampled errseq_t to check against and advance.
*
- * Grab the eseq value, and see whether it matches the value that "since"
+ * Grab the eseq value, and see whether it matches the value that @since
* points to. If it does, then just return 0.
*
* If it doesn't, then the value has changed. Set the "seen" flag, and try to
@@ -170,6 +172,9 @@ EXPORT_SYMBOL(errseq_check);
* value. The caller must provide that if necessary. Because of this, callers
* may want to do a lockless errseq_check before taking the lock and calling
* this.
+ *
+ * Return: Negative errno if one has been stored, or 0 if no new error has
+ * occurred.
*/
int errseq_check_and_advance(errseq_t *eseq, errseq_t *since)
{
diff --git a/lib/kobject.c b/lib/kobject.c
index 763d70a18941..afd5a3fc6123 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kobject.c - library routines for handling generic kernel objects
*
@@ -5,9 +6,6 @@
* Copyright (c) 2006-2007 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (c) 2006-2007 Novell Inc.
*
- * This file is released under the GPLv2.
- *
- *
* Please see the file Documentation/kobject.txt for critical information
* about using the kobject interface.
*/
@@ -1039,6 +1037,7 @@ void *kobj_ns_grab_current(enum kobj_ns_type type)
return ns;
}
+EXPORT_SYMBOL_GPL(kobj_ns_grab_current);
const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk)
{
@@ -1074,3 +1073,4 @@ void kobj_ns_drop(enum kobj_ns_type type, void *ns)
kobj_ns_ops_tbl[type]->drop_ns(ns);
spin_unlock(&kobj_ns_type_lock);
}
+EXPORT_SYMBOL_GPL(kobj_ns_drop);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2615074d3de5..9fe6ec8fda28 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* kernel userspace event delivery
*
@@ -5,8 +6,6 @@
* Copyright (C) 2004 Novell, Inc. All rights reserved.
* Copyright (C) 2004 IBM, Inc. All rights reserved.
*
- * Licensed under the GNU GPL v2.
- *
* Authors:
* Robert Love <rml@novell.com>
* Kay Sievers <kay.sievers@vrfy.org>
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index ddd7dde87c3c..3825c30aaa36 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -65,42 +65,6 @@ EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
#define ASSERT_RHT_MUTEX(HT)
#endif
-
-static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
- gfp_t gfp)
-{
- unsigned int i, size;
-#if defined(CONFIG_PROVE_LOCKING)
- unsigned int nr_pcpus = 2;
-#else
- unsigned int nr_pcpus = num_possible_cpus();
-#endif
-
- nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
- size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
-
- /* Never allocate more than 0.5 locks per bucket */
- size = min_t(unsigned int, size, tbl->size >> 1);
-
- if (tbl->nest)
- size = min(size, 1U << tbl->nest);
-
- if (sizeof(spinlock_t) != 0) {
- if (gfpflags_allow_blocking(gfp))
- tbl->locks = kvmalloc(size * sizeof(spinlock_t), gfp);
- else
- tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
- gfp);
- if (!tbl->locks)
- return -ENOMEM;
- for (i = 0; i < size; i++)
- spin_lock_init(&tbl->locks[i]);
- }
- tbl->locks_mask = size - 1;
-
- return 0;
-}
-
static void nested_table_free(union nested_table *ntbl, unsigned int size)
{
const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *));
@@ -140,7 +104,7 @@ static void bucket_table_free(const struct bucket_table *tbl)
if (tbl->nest)
nested_bucket_table_free(tbl);
- kvfree(tbl->locks);
+ free_bucket_spinlocks(tbl->locks);
kvfree(tbl);
}
@@ -207,7 +171,7 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
gfp_t gfp)
{
struct bucket_table *tbl = NULL;
- size_t size;
+ size_t size, max_locks;
int i;
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
@@ -227,7 +191,12 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
tbl->size = size;
- if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
+ max_locks = size >> 1;
+ if (tbl->nest)
+ max_locks = min_t(size_t, max_locks, 1U << tbl->nest);
+
+ if (alloc_bucket_spinlocks(&tbl->locks, &tbl->locks_mask, max_locks,
+ ht->p.locks_mul, gfp) < 0) {
bucket_table_free(tbl);
return NULL;
}
@@ -707,6 +676,7 @@ void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter)
iter->p = NULL;
iter->slot = 0;
iter->skip = 0;
+ iter->end_of_table = 0;
spin_lock(&ht->lock);
iter->walker.tbl =
@@ -732,7 +702,7 @@ void rhashtable_walk_exit(struct rhashtable_iter *iter)
EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
/**
- * rhashtable_walk_start - Start a hash table walk
+ * rhashtable_walk_start_check - Start a hash table walk
* @iter: Hash table iterator
*
* Start a hash table walk at the current iterator position. Note that we take
@@ -744,8 +714,12 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
* Returns -EAGAIN if resize event occured. Note that the iterator
* will rewind back to the beginning and you may use it immediately
* by calling rhashtable_walk_next.
+ *
+ * rhashtable_walk_start is defined as an inline variant that returns
+ * void. This is preferred in cases where the caller would ignore
+ * resize events and always continue.
*/
-int rhashtable_walk_start(struct rhashtable_iter *iter)
+int rhashtable_walk_start_check(struct rhashtable_iter *iter)
__acquires(RCU)
{
struct rhashtable *ht = iter->ht;
@@ -757,28 +731,26 @@ int rhashtable_walk_start(struct rhashtable_iter *iter)
list_del(&iter->walker.list);
spin_unlock(&ht->lock);
- if (!iter->walker.tbl) {
+ if (!iter->walker.tbl && !iter->end_of_table) {
iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht);
return -EAGAIN;
}
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_walk_start);
+EXPORT_SYMBOL_GPL(rhashtable_walk_start_check);
/**
- * rhashtable_walk_next - Return the next object and advance the iterator
- * @iter: Hash table iterator
+ * __rhashtable_walk_find_next - Find the next element in a table (or the first
+ * one in case of a new walk).
*
- * Note that you must call rhashtable_walk_stop when you are finished
- * with the walk.
+ * @iter: Hash table iterator
*
- * Returns the next object or NULL when the end of the table is reached.
+ * Returns the found object or NULL when the end of the table is reached.
*
- * Returns -EAGAIN if resize event occured. Note that the iterator
- * will rewind back to the beginning and you may continue to use it.
+ * Returns -EAGAIN if resize event occurred.
*/
-void *rhashtable_walk_next(struct rhashtable_iter *iter)
+static void *__rhashtable_walk_find_next(struct rhashtable_iter *iter)
{
struct bucket_table *tbl = iter->walker.tbl;
struct rhlist_head *list = iter->list;
@@ -786,13 +758,8 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter)
struct rhash_head *p = iter->p;
bool rhlist = ht->rhlist;
- if (p) {
- if (!rhlist || !(list = rcu_dereference(list->next))) {
- p = rcu_dereference(p->next);
- list = container_of(p, struct rhlist_head, rhead);
- }
- goto next;
- }
+ if (!tbl)
+ return NULL;
for (; iter->slot < tbl->size; iter->slot++) {
int skip = iter->skip;
@@ -836,13 +803,90 @@ next:
iter->slot = 0;
iter->skip = 0;
return ERR_PTR(-EAGAIN);
+ } else {
+ iter->end_of_table = true;
}
return NULL;
}
+
+/**
+ * rhashtable_walk_next - Return the next object and advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Note that you must call rhashtable_walk_stop when you are finished
+ * with the walk.
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_next(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+ bool rhlist = ht->rhlist;
+
+ if (p) {
+ if (!rhlist || !(list = rcu_dereference(list->next))) {
+ p = rcu_dereference(p->next);
+ list = container_of(p, struct rhlist_head, rhead);
+ }
+ if (!rht_is_a_nulls(p)) {
+ iter->skip++;
+ iter->p = p;
+ iter->list = list;
+ return rht_obj(ht, rhlist ? &list->rhead : p);
+ }
+
+ /* At the end of this slot, switch to next one and then find
+ * next entry from that point.
+ */
+ iter->skip = 0;
+ iter->slot++;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
EXPORT_SYMBOL_GPL(rhashtable_walk_next);
/**
+ * rhashtable_walk_peek - Return the next object but don't advance the iterator
+ * @iter: Hash table iterator
+ *
+ * Returns the next object or NULL when the end of the table is reached.
+ *
+ * Returns -EAGAIN if resize event occurred. Note that the iterator
+ * will rewind back to the beginning and you may continue to use it.
+ */
+void *rhashtable_walk_peek(struct rhashtable_iter *iter)
+{
+ struct rhlist_head *list = iter->list;
+ struct rhashtable *ht = iter->ht;
+ struct rhash_head *p = iter->p;
+
+ if (p)
+ return rht_obj(ht, ht->rhlist ? &list->rhead : p);
+
+ /* No object found in current iter, find next one in the table. */
+
+ if (iter->skip) {
+ /* A nonzero skip value points to the next entry in the table
+ * beyond that last one that was found. Decrement skip so
+ * we find the current value. __rhashtable_walk_find_next
+ * will restore the original value of skip assuming that
+ * the table hasn't changed.
+ */
+ iter->skip--;
+ }
+
+ return __rhashtable_walk_find_next(iter);
+}
+EXPORT_SYMBOL_GPL(rhashtable_walk_peek);
+
+/**
* rhashtable_walk_stop - Finish a hash table walk
* @iter: Hash table iterator
*
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 835cc6df2776..85925aaa4fff 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -5,7 +5,6 @@
* DEBUG_PREEMPT variant of smp_processor_id().
*/
#include <linux/export.h>
-#include <linux/kallsyms.h>
#include <linux/sched.h>
notrace static unsigned int check_preemption_disabled(const char *what1,
@@ -43,7 +42,7 @@ notrace static unsigned int check_preemption_disabled(const char *what1,
printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n",
what1, what2, preempt_count() - 1, current->comm, current->pid);
- print_symbol("caller is %s\n", (long)__builtin_return_address(0));
+ printk("caller is %pS\n", __builtin_return_address(0));
dump_stack();
out_enable:
diff --git a/lib/string.c b/lib/string.c
index 64a9e33f1daa..2c0900a5d51a 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -203,7 +203,7 @@ ssize_t strscpy(char *dest, const char *src, size_t count)
while (max >= sizeof(unsigned long)) {
unsigned long c, data;
- c = *(unsigned long *)(src+res);
+ c = read_word_at_a_time(src+res);
if (has_zero(c, &data, &constants)) {
data = prep_zero_mask(c, data, &constants);
data = create_zero_mask(data);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index cea19aaf303c..c43ec2271469 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -18,7 +18,7 @@
*/
#include <linux/cache.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-direct.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -417,7 +417,7 @@ cleanup2:
return -ENOMEM;
}
-void __init swiotlb_free(void)
+void __init swiotlb_exit(void)
{
if (!io_tlb_orig_addr)
return;
@@ -586,7 +586,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
- if (printk_ratelimit())
+ if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
return SWIOTLB_MAP_ERROR;
found:
@@ -605,7 +605,6 @@ found:
return tlb_addr;
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
/*
* Allocates bounce buffer and returns its kernel virtual address.
@@ -675,7 +674,6 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_unmap_single);
void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
@@ -707,92 +705,107 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
BUG();
}
}
-EXPORT_SYMBOL_GPL(swiotlb_tbl_sync_single);
+
+static inline bool dma_coherent_ok(struct device *dev, dma_addr_t addr,
+ size_t size)
+{
+ u64 mask = DMA_BIT_MASK(32);
+
+ if (dev && dev->coherent_dma_mask)
+ mask = dev->coherent_dma_mask;
+ return addr + size - 1 <= mask;
+}
+
+static void *
+swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ unsigned long attrs)
+{
+ phys_addr_t phys_addr;
+
+ if (swiotlb_force == SWIOTLB_NO_FORCE)
+ goto out_warn;
+
+ phys_addr = swiotlb_tbl_map_single(dev,
+ swiotlb_phys_to_dma(dev, io_tlb_start),
+ 0, size, DMA_FROM_DEVICE, 0);
+ if (phys_addr == SWIOTLB_MAP_ERROR)
+ goto out_warn;
+
+ *dma_handle = swiotlb_phys_to_dma(dev, phys_addr);
+ if (dma_coherent_ok(dev, *dma_handle, size))
+ goto out_unmap;
+
+ memset(phys_to_virt(phys_addr), 0, size);
+ return phys_to_virt(phys_addr);
+
+out_unmap:
+ dev_warn(dev, "hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+ (unsigned long long)(dev ? dev->coherent_dma_mask : 0),
+ (unsigned long long)*dma_handle);
+
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in unmap_single.
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
+ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+out_warn:
+ if ((attrs & DMA_ATTR_NO_WARN) && printk_ratelimit()) {
+ dev_warn(dev,
+ "swiotlb: coherent allocation failed, size=%zu\n",
+ size);
+ dump_stack();
+ }
+ return NULL;
+}
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- dma_addr_t dev_addr;
- void *ret;
int order = get_order(size);
- u64 dma_mask = DMA_BIT_MASK(32);
-
- if (hwdev && hwdev->coherent_dma_mask)
- dma_mask = hwdev->coherent_dma_mask;
+ unsigned long attrs = (flags & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0;
+ void *ret;
ret = (void *)__get_free_pages(flags, order);
if (ret) {
- dev_addr = swiotlb_virt_to_bus(hwdev, ret);
- if (dev_addr + size - 1 > dma_mask) {
- /*
- * The allocated memory isn't reachable by the device.
- */
- free_pages((unsigned long) ret, order);
- ret = NULL;
+ *dma_handle = swiotlb_virt_to_bus(hwdev, ret);
+ if (dma_coherent_ok(hwdev, *dma_handle, size)) {
+ memset(ret, 0, size);
+ return ret;
}
+ free_pages((unsigned long)ret, order);
}
- if (!ret) {
- /*
- * We are either out of memory or the device can't DMA to
- * GFP_DMA memory; fall back on map_single(), which
- * will grab memory from the lowest available address range.
- */
- phys_addr_t paddr = map_single(hwdev, 0, size,
- DMA_FROM_DEVICE, 0);
- if (paddr == SWIOTLB_MAP_ERROR)
- goto err_warn;
- ret = phys_to_virt(paddr);
- dev_addr = swiotlb_phys_to_dma(hwdev, paddr);
-
- /* Confirm address can be DMA'd by device */
- if (dev_addr + size - 1 > dma_mask) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
- (unsigned long long)dma_mask,
- (unsigned long long)dev_addr);
-
- /*
- * DMA_TO_DEVICE to avoid memcpy in unmap_single.
- * The DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(hwdev, paddr,
- size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- goto err_warn;
- }
- }
+ return swiotlb_alloc_buffer(hwdev, size, dma_handle, attrs);
+}
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
- *dma_handle = dev_addr;
- memset(ret, 0, size);
+static bool swiotlb_free_buffer(struct device *dev, size_t size,
+ dma_addr_t dma_addr)
+{
+ phys_addr_t phys_addr = dma_to_phys(dev, dma_addr);
- return ret;
+ WARN_ON_ONCE(irqs_disabled());
-err_warn:
- pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
- dev_name(hwdev), size);
- dump_stack();
+ if (!is_swiotlb_buffer(phys_addr))
+ return false;
- return NULL;
+ /*
+ * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
+ * DMA_ATTR_SKIP_CPU_SYNC is optional.
+ */
+ swiotlb_tbl_unmap_single(dev, phys_addr, size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ return true;
}
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr)
{
- phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
-
- WARN_ON(irqs_disabled());
- if (!is_swiotlb_buffer(paddr))
+ if (!swiotlb_free_buffer(hwdev, size, dev_addr))
free_pages((unsigned long)vaddr, get_order(size));
- else
- /*
- * DMA_TO_DEVICE to avoid memcpy in swiotlb_tbl_unmap_single.
- * DMA_ATTR_SKIP_CPU_SYNC is optional.
- */
- swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
}
EXPORT_SYMBOL(swiotlb_free_coherent);
@@ -868,7 +881,6 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
return swiotlb_phys_to_dma(dev, io_tlb_overflow_buffer);
}
-EXPORT_SYMBOL_GPL(swiotlb_map_page);
/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
@@ -909,7 +921,6 @@ void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
{
unmap_single(hwdev, dev_addr, size, dir, attrs);
}
-EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
/*
* Make physical memory consistent for a single streaming mode DMA translation
@@ -947,7 +958,6 @@ swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
void
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
@@ -955,7 +965,6 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
{
swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_sync_single_for_device);
/*
* Map a set of buffers described by scatterlist in streaming mode for DMA.
@@ -1007,7 +1016,6 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
}
return nelems;
}
-EXPORT_SYMBOL(swiotlb_map_sg_attrs);
/*
* Unmap a set of streaming mode DMA translations. Again, cpu read rules
@@ -1027,7 +1035,6 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir,
attrs);
}
-EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
/*
* Make physical memory consistent for a set of streaming mode DMA translations
@@ -1055,7 +1062,6 @@ swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
}
-EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
@@ -1063,14 +1069,12 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
{
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
-EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == swiotlb_phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
-EXPORT_SYMBOL(swiotlb_dma_mapping_error);
/*
* Return whether the given device DMA address mask can be supported
@@ -1083,4 +1087,49 @@ swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
return swiotlb_phys_to_dma(hwdev, io_tlb_end - 1) <= mask;
}
-EXPORT_SYMBOL(swiotlb_dma_supported);
+
+#ifdef CONFIG_DMA_DIRECT_OPS
+void *swiotlb_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *vaddr;
+
+ /* temporary workaround: */
+ if (gfp & __GFP_NOWARN)
+ attrs |= DMA_ATTR_NO_WARN;
+
+ /*
+ * Don't print a warning when the first allocation attempt fails.
+ * swiotlb_alloc_coherent() will print a warning when the DMA memory
+ * allocation ultimately failed.
+ */
+ gfp |= __GFP_NOWARN;
+
+ vaddr = dma_direct_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!vaddr)
+ vaddr = swiotlb_alloc_buffer(dev, size, dma_handle, attrs);
+ return vaddr;
+}
+
+void swiotlb_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_addr, unsigned long attrs)
+{
+ if (!swiotlb_free_buffer(dev, size, dma_addr))
+ dma_direct_free(dev, size, vaddr, dma_addr, attrs);
+}
+
+const struct dma_map_ops swiotlb_dma_ops = {
+ .mapping_error = swiotlb_dma_mapping_error,
+ .alloc = swiotlb_alloc,
+ .free = swiotlb_free,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .dma_supported = swiotlb_dma_supported,
+};
+#endif /* CONFIG_DMA_DIRECT_OPS */
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index f369889e521d..4cd9ea9b3449 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -2003,10 +2003,14 @@ static struct bpf_test tests[] = {
{ { 4, 0 }, { 5, 10 } }
},
{
- "INT: DIV by zero",
+ /* This one doesn't go through verifier, but is just raw insn
+ * as opposed to cBPF tests from here. Thus div by 0 tests are
+ * done in test_verifier in BPF kselftests.
+ */
+ "INT: DIV by -1",
.u.insns_int = {
BPF_ALU64_REG(BPF_MOV, R6, R1),
- BPF_ALU64_IMM(BPF_MOV, R7, 0),
+ BPF_ALU64_IMM(BPF_MOV, R7, -1),
BPF_LD_ABS(BPF_B, 3),
BPF_ALU32_REG(BPF_DIV, R0, R7),
BPF_EXIT_INSN(),
@@ -6109,6 +6113,110 @@ static struct bpf_test tests[] = {
{ { ETH_HLEN, 42 } },
.fill_helper = bpf_fill_ld_abs_vlan_push_pop2,
},
+ /* Checking interpreter vs JIT wrt signed extended imms. */
+ {
+ "JNE signed compare, test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R2, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R1, 0xfefbbc12),
+ BPF_ALU32_IMM(BPF_MOV, R3, 0xffff0000),
+ BPF_ALU32_IMM(BPF_MOV, R4, 0xfefb0000),
+ BPF_MOV64_REG(R2, R1),
+ BPF_ALU64_REG(BPF_AND, R2, R3),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_REG(BPF_JNE, R2, R4, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 4",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, -17104896),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, -17104896, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 5",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0xfefb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0xfefb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
+ {
+ "JNE signed compare, test 6",
+ .u.insns_int = {
+ BPF_LD_IMM64(R1, 0x7efb0000),
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_JMP_IMM(BPF_JNE, R1, 0x7efb0000, 1),
+ BPF_ALU32_IMM(BPF_MOV, R0, 2),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 2 } },
+ },
+ {
+ "JNE signed compare, test 7",
+ .u.insns = {
+ BPF_STMT(BPF_LD | BPF_IMM, 0xffff0000),
+ BPF_STMT(BPF_MISC | BPF_TAX, 0),
+ BPF_STMT(BPF_LD | BPF_IMM, 0xfefbbc12),
+ BPF_STMT(BPF_ALU | BPF_AND | BPF_X, 0),
+ BPF_JUMP(BPF_JMP | BPF_JEQ | BPF_K, 0xfefb0000, 1, 0),
+ BPF_STMT(BPF_RET | BPF_K, 1),
+ BPF_STMT(BPF_RET | BPF_K, 2),
+ },
+ CLASSIC | FLAG_NO_DATA,
+ {},
+ { { 0, 2 } },
+ },
};
static struct net_device dev;
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index 64a4c76cba2b..078a61480573 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -96,7 +96,7 @@ struct test_config {
struct device *device);
};
-struct test_config *test_fw_config;
+static struct test_config *test_fw_config;
static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
size_t size, loff_t *offset)
@@ -359,7 +359,7 @@ static ssize_t config_name_show(struct device *dev,
{
return config_test_show_str(buf, test_fw_config->name);
}
-static DEVICE_ATTR(config_name, 0644, config_name_show, config_name_store);
+static DEVICE_ATTR_RW(config_name);
static ssize_t config_num_requests_store(struct device *dev,
struct device_attribute *attr,
@@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev,
if (test_fw_config->reqs) {
pr_err("Must call release_all_firmware prior to changing config\n");
rc = -EINVAL;
+ mutex_unlock(&test_fw_mutex);
goto out;
}
mutex_unlock(&test_fw_mutex);
@@ -388,8 +389,7 @@ static ssize_t config_num_requests_show(struct device *dev,
{
return test_dev_config_show_u8(buf, test_fw_config->num_requests);
}
-static DEVICE_ATTR(config_num_requests, 0644, config_num_requests_show,
- config_num_requests_store);
+static DEVICE_ATTR_RW(config_num_requests);
static ssize_t config_sync_direct_store(struct device *dev,
struct device_attribute *attr,
@@ -411,8 +411,7 @@ static ssize_t config_sync_direct_show(struct device *dev,
{
return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
}
-static DEVICE_ATTR(config_sync_direct, 0644, config_sync_direct_show,
- config_sync_direct_store);
+static DEVICE_ATTR_RW(config_sync_direct);
static ssize_t config_send_uevent_store(struct device *dev,
struct device_attribute *attr,
@@ -428,8 +427,7 @@ static ssize_t config_send_uevent_show(struct device *dev,
{
return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
}
-static DEVICE_ATTR(config_send_uevent, 0644, config_send_uevent_show,
- config_send_uevent_store);
+static DEVICE_ATTR_RW(config_send_uevent);
static ssize_t config_read_fw_idx_store(struct device *dev,
struct device_attribute *attr,
@@ -445,8 +443,7 @@ static ssize_t config_read_fw_idx_show(struct device *dev,
{
return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
}
-static DEVICE_ATTR(config_read_fw_idx, 0644, config_read_fw_idx_show,
- config_read_fw_idx_store);
+static DEVICE_ATTR_RW(config_read_fw_idx);
static ssize_t trigger_request_store(struct device *dev,
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index 337f408b4de6..e372b97eee13 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -694,8 +694,7 @@ static ssize_t config_test_driver_show(struct device *dev,
return config_test_show_str(&test_dev->config_mutex, buf,
config->test_driver);
}
-static DEVICE_ATTR(config_test_driver, 0644, config_test_driver_show,
- config_test_driver_store);
+static DEVICE_ATTR_RW(config_test_driver);
static ssize_t config_test_fs_store(struct device *dev,
struct device_attribute *attr,
@@ -726,8 +725,7 @@ static ssize_t config_test_fs_show(struct device *dev,
return config_test_show_str(&test_dev->config_mutex, buf,
config->test_fs);
}
-static DEVICE_ATTR(config_test_fs, 0644, config_test_fs_show,
- config_test_fs_store);
+static DEVICE_ATTR_RW(config_test_fs);
static int trigger_config_run_type(struct kmod_test_device *test_dev,
enum kmod_test_case test_case,
@@ -1012,8 +1010,7 @@ static ssize_t config_num_threads_show(struct device *dev,
return test_dev_config_show_int(test_dev, buf, config->num_threads);
}
-static DEVICE_ATTR(config_num_threads, 0644, config_num_threads_show,
- config_num_threads_store);
+static DEVICE_ATTR_RW(config_num_threads);
static ssize_t config_test_case_store(struct device *dev,
struct device_attribute *attr,
@@ -1037,8 +1034,7 @@ static ssize_t config_test_case_show(struct device *dev,
return test_dev_config_show_uint(test_dev, buf, config->test_case);
}
-static DEVICE_ATTR(config_test_case, 0644, config_test_case_show,
- config_test_case_store);
+static DEVICE_ATTR_RW(config_test_case);
static ssize_t test_result_show(struct device *dev,
struct device_attribute *attr,
@@ -1049,7 +1045,7 @@ static ssize_t test_result_show(struct device *dev,
return test_dev_config_show_int(test_dev, buf, config->test_result);
}
-static DEVICE_ATTR(test_result, 0644, test_result_show, test_result_store);
+static DEVICE_ATTR_RW(test_result);
#define TEST_KMOD_DEV_ATTR(name) &dev_attr_##name.attr
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 8e83cbdc049c..76d3667fdea2 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -162,11 +162,7 @@ static void test_bucket_stats(struct rhashtable *ht, unsigned int entries)
return;
}
- err = rhashtable_walk_start(&hti);
- if (err && err != -EAGAIN) {
- pr_warn("Test failed: iterator failed: %d\n", err);
- return;
- }
+ rhashtable_walk_start(&hti);
while ((pos = rhashtable_walk_next(&hti))) {
if (PTR_ERR(pos) == -EAGAIN) {
diff --git a/lib/usercopy.c b/lib/usercopy.c
index 15e2e6fb060e..3744b2a8e591 100644
--- a/lib/usercopy.c
+++ b/lib/usercopy.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL(_copy_from_user);
#endif
#ifndef INLINE_COPY_TO_USER
-unsigned long _copy_to_user(void *to, const void __user *from, unsigned long n)
+unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
{
might_fault();
if (likely(access_ok(VERIFY_WRITE, to, n))) {
diff --git a/lib/uuid.c b/lib/uuid.c
index 680b9fb9ba09..2290b9f001a9 100644
--- a/lib/uuid.c
+++ b/lib/uuid.c
@@ -29,15 +29,14 @@ EXPORT_SYMBOL(uuid_null);
const u8 guid_index[16] = {3,2,1,0,5,4,7,6,8,9,10,11,12,13,14,15};
const u8 uuid_index[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
-/***************************************************************
+/**
+ * generate_random_uuid - generate a random UUID
+ * @uuid: where to put the generated UUID
+ *
* Random UUID interface
*
- * Used here for a Boot ID, but can be useful for other kernel
- * drivers.
- ***************************************************************/
-
-/*
- * Generate random UUID
+ * Used to create a Boot ID or a filesystem UUID/GUID, but can be
+ * useful for other kernel drivers.
*/
void generate_random_uuid(unsigned char uuid[16])
{
@@ -73,16 +72,17 @@ void uuid_gen(uuid_t *bu)
EXPORT_SYMBOL_GPL(uuid_gen);
/**
- * uuid_is_valid - checks if UUID string valid
- * @uuid: UUID string to check
- *
- * Description:
- * It checks if the UUID string is following the format:
- * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- * where x is a hex digit.
- *
- * Return: true if input is valid UUID string.
- */
+ * uuid_is_valid - checks if a UUID string is valid
+ * @uuid: UUID string to check
+ *
+ * Description:
+ * It checks if the UUID string is following the format:
+ * xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+ *
+ * where x is a hex digit.
+ *
+ * Return: true if input is valid UUID string.
+ */
bool uuid_is_valid(const char *uuid)
{
unsigned int i;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 01c3957b2de6..77ee6ced11b1 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -42,7 +42,6 @@
#include "../mm/internal.h" /* For the trace_print_flags arrays */
#include <asm/page.h> /* for PAGE_SIZE */
-#include <asm/sections.h> /* for dereference_function_descriptor() */
#include <asm/byteorder.h> /* cpu_to_le16 */
#include <linux/string_helpers.h>
@@ -1834,7 +1833,8 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
*
* - 'x' For printing the address. Equivalent to "%lx".
*
- * ** Please update also Documentation/printk-formats.txt when making changes **
+ * ** When making changes please also update:
+ * Documentation/core-api/printk-formats.rst
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
@@ -1862,10 +1862,10 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
switch (*fmt) {
case 'F':
case 'f':
- ptr = dereference_function_descriptor(ptr);
- /* Fallthrough */
case 'S':
case 's':
+ ptr = dereference_symbol_descriptor(ptr);
+ /* Fallthrough */
case 'B':
return symbol_string(buf, end, ptr, spec, fmt);
case 'R':
@@ -2194,7 +2194,7 @@ set_precision(struct printf_spec *spec, int prec)
* - ``%n`` is unsupported
* - ``%p*`` is handled by pointer()
*
- * See pointer() or Documentation/printk-formats.txt for more
+ * See pointer() or Documentation/core-api/printk-formats.rst for more
* extensive description.
*
* **Please update the documentation in both places when making changes**
@@ -2516,29 +2516,34 @@ int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args)
{
struct printf_spec spec = {0};
char *str, *end;
+ int width;
str = (char *)bin_buf;
end = (char *)(bin_buf + size);
#define save_arg(type) \
-do { \
+({ \
+ unsigned long long value; \
if (sizeof(type) == 8) { \
- unsigned long long value; \
+ unsigned long long val8; \
str = PTR_ALIGN(str, sizeof(u32)); \
- value = va_arg(args, unsigned long long); \
+ val8 = va_arg(args, unsigned long long); \
if (str + sizeof(type) <= end) { \
- *(u32 *)str = *(u32 *)&value; \
- *(u32 *)(str + 4) = *((u32 *)&value + 1); \
+ *(u32 *)str = *(u32 *)&val8; \
+ *(u32 *)(str + 4) = *((u32 *)&val8 + 1); \
} \
+ value = val8; \
} else { \
- unsigned long value; \
+ unsigned int val4; \
str = PTR_ALIGN(str, sizeof(type)); \
- value = va_arg(args, int); \
+ val4 = va_arg(args, int); \
if (str + sizeof(type) <= end) \
- *(typeof(type) *)str = (type)value; \
+ *(typeof(type) *)str = (type)(long)val4; \
+ value = (unsigned long long)val4; \
} \
str += sizeof(type); \
-} while (0)
+ value; \
+})
while (*fmt) {
int read = format_decode(fmt, &spec);
@@ -2554,7 +2559,10 @@ do { \
case FORMAT_TYPE_WIDTH:
case FORMAT_TYPE_PRECISION:
- save_arg(int);
+ width = (int)save_arg(int);
+ /* Pointers may require the width */
+ if (*fmt == 'p')
+ set_field_width(&spec, width);
break;
case FORMAT_TYPE_CHAR:
@@ -2576,7 +2584,27 @@ do { \
}
case FORMAT_TYPE_PTR:
- save_arg(void *);
+ /* Dereferenced pointers must be done now */
+ switch (*fmt) {
+ /* Dereference of functions is still OK */
+ case 'S':
+ case 's':
+ case 'F':
+ case 'f':
+ save_arg(void *);
+ break;
+ default:
+ if (!isalnum(*fmt)) {
+ save_arg(void *);
+ break;
+ }
+ str = pointer(fmt, str, end, va_arg(args, void *),
+ spec);
+ if (str + 1 < end)
+ *str++ = '\0';
+ else
+ end[-1] = '\0'; /* Must be nul terminated */
+ }
/* skip all alphanumeric pointer suffixes */
while (isalnum(*fmt))
fmt++;
@@ -2728,11 +2756,39 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
break;
}
- case FORMAT_TYPE_PTR:
- str = pointer(fmt, str, end, get_arg(void *), spec);
+ case FORMAT_TYPE_PTR: {
+ bool process = false;
+ int copy, len;
+ /* Non function dereferences were already done */
+ switch (*fmt) {
+ case 'S':
+ case 's':
+ case 'F':
+ case 'f':
+ process = true;
+ break;
+ default:
+ if (!isalnum(*fmt)) {
+ process = true;
+ break;
+ }
+ /* Pointer dereference was already processed */
+ if (str < end) {
+ len = copy = strlen(args);
+ if (copy > end - str)
+ copy = end - str;
+ memcpy(str, args, copy);
+ str += len;
+ args += len;
+ }
+ }
+ if (process)
+ str = pointer(fmt, str, end, get_arg(void *), spec);
+
while (isalnum(*fmt))
fmt++;
break;
+ }
case FORMAT_TYPE_PERCENT_CHAR:
if (str < end)