aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acpi_bus.h2
-rw-r--r--include/acpi/cppc_acpi.h3
-rw-r--r--include/asm-generic/bug.h20
-rw-r--r--include/asm-generic/extable.h26
-rw-r--r--include/asm-generic/mm_hooks.h6
-rw-r--r--include/asm-generic/pgtable.h25
-rw-r--r--include/asm-generic/uaccess.h135
-rw-r--r--include/asm-generic/vmlinux.lds.h5
-rw-r--r--include/clocksource/arm_arch_timer.h34
-rw-r--r--include/crypto/gf128mul.h87
-rw-r--r--include/crypto/internal/acompress.h3
-rw-r--r--include/crypto/internal/scompress.h3
-rw-r--r--include/crypto/kpp.h6
-rw-r--r--include/crypto/xts.h2
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h3
-rw-r--r--include/linux/acpi.h54
-rw-r--r--include/linux/ata.h5
-rw-r--r--include/linux/atomic.h46
-rw-r--r--include/linux/backing-dev-defs.h8
-rw-r--r--include/linux/backing-dev.h16
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blk-mq.h21
-rw-r--r--include/linux/blk_types.h47
-rw-r--r--include/linux/blkdev.h76
-rw-r--r--include/linux/bug.h2
-rw-r--r--include/linux/ccp.h68
-rw-r--r--include/linux/cgroup-defs.h12
-rw-r--r--include/linux/cgroup.h8
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/coda_psdev.h1
-rw-r--r--include/linux/compat.h7
-rw-r--r--include/linux/coresight.h2
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/cpumask.h10
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crypto.h2
-rw-r--r--include/linux/cryptohash.h5
-rw-r--r--include/linux/dell-led.h6
-rw-r--r--include/linux/devfreq.h30
-rw-r--r--include/linux/device-mapper.h11
-rw-r--r--include/linux/edac.h30
-rw-r--r--include/linux/efi-bgrt.h5
-rw-r--r--include/linux/efi.h5
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/linux/extcon.h21
-rw-r--r--include/linux/flex_array.h67
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/fwnode.h12
-rw-r--r--include/linux/genhd.h12
-rw-r--r--include/linux/hrtimer.h6
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/inet.h6
-rw-r--r--include/linux/init_task.h1
-rw-r--r--include/linux/irqchip/mips-gic.h1
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h2
-rw-r--r--include/linux/leds-pca9532.h4
-rw-r--r--include/linux/leds.h14
-rw-r--r--include/linux/lightnvm.h13
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/mailbox/brcm-message.h14
-rw-r--r--include/linux/mfd/motorola-cpcap.h3
-rw-r--r--include/linux/mg_disk.h45
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/mtd/mtd.h5
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nvme-fc-driver.h104
-rw-r--r--include/linux/nvme-fc.h68
-rw-r--r--include/linux/nvme.h13
-rw-r--r--include/linux/of.h4
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/percpu.h1
-rw-r--r--include/linux/perf_event.h17
-rw-r--r--include/linux/pm_domain.h1
-rw-r--r--include/linux/poll.h56
-rw-r--r--include/linux/posix-clock.h10
-rw-r--r--include/linux/posix-timers.h20
-rw-r--r--include/linux/power/bq24190_charger.h16
-rw-r--r--include/linux/property.h26
-rw-r--r--include/linux/pstore.h156
-rw-r--r--include/linux/ras.h13
-rw-r--r--include/linux/refcount.h19
-rw-r--r--include/linux/sbitmap.h55
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/sched/rt.h23
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/smp.h12
-rw-r--r--include/linux/splice.h4
-rw-r--r--include/linux/t10-pi.h8
-rw-r--r--include/linux/thread_info.h16
-rw-r--r--include/linux/tick.h1
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/uaccess.h197
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/xhci-dbgp.h29
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/rdma/ib.h2
-rw-r--r--include/scsi/scsi_request.h2
-rw-r--r--include/soc/fsl/qman.h109
-rw-r--r--include/trace/events/block.h61
-rw-r--r--include/trace/events/sched.h16
-rw-r--r--include/trace/events/xen.h28
-rw-r--r--include/uapi/asm-generic/unistd.h3
-rw-r--r--include/uapi/linux/cryptouser.h10
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/lightnvm.h4
-rw-r--r--include/uapi/linux/nbd-netlink.h98
-rw-r--r--include/uapi/linux/nbd.h6
-rw-r--r--include/uapi/linux/nubus.h4
-rw-r--r--include/uapi/linux/perf_event.h33
-rw-r--r--include/uapi/linux/stat.h8
-rw-r--r--include/uapi/linux/vfio.h18
-rw-r--r--include/uapi/linux/vfio_ccw.h24
-rw-r--r--include/video/udlfb.h2
-rw-r--r--include/xen/page.h2
124 files changed, 1727 insertions, 754 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index ef0ae8aaa567..2fc678e08d8d 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -88,6 +88,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
}
bool acpi_dev_found(const char *hid);
+bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
#ifdef CONFIG_ACPI
@@ -386,6 +387,7 @@ struct acpi_data_node {
const char *name;
acpi_handle handle;
struct fwnode_handle fwnode;
+ struct fwnode_handle *parent;
struct acpi_device_data data;
struct list_head sibling;
struct kobject kobj;
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 427a7c3e6c75..2010c0516f27 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -103,6 +103,7 @@ struct cppc_perf_caps {
u32 highest_perf;
u32 nominal_perf;
u32 lowest_perf;
+ u32 lowest_nonlinear_perf;
};
struct cppc_perf_ctrls {
@@ -115,7 +116,7 @@ struct cppc_perf_fb_ctrs {
u64 reference;
u64 delivered;
u64 reference_perf;
- u64 ctr_wrap_time;
+ u64 wraparound_time;
};
/* Per CPU container for runtime CPPC management. */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 6f96247226a4..d6f4aed479a1 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -5,7 +5,9 @@
#ifdef CONFIG_GENERIC_BUG
#define BUGFLAG_WARNING (1 << 0)
-#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
+#define BUGFLAG_ONCE (1 << 1)
+#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
@@ -55,6 +57,18 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
+#ifdef __WARN_FLAGS
+#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
+#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
+
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_ONCE_TAINT(TAINT_WARN); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant issues that need prompt attention if they should ever
@@ -97,7 +111,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#endif
#ifndef WARN
-#define WARN(condition, format...) ({ \
+#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf(format); \
@@ -112,6 +126,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
unlikely(__ret_warn_on); \
})
+#ifndef WARN_ON_ONCE
#define WARN_ON_ONCE(condition) ({ \
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
@@ -122,6 +137,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
} \
unlikely(__ret_warn_once); \
})
+#endif
#define WARN_ONCE(condition, format...) ({ \
static bool __section(.data.unlikely) __warned; \
diff --git a/include/asm-generic/extable.h b/include/asm-generic/extable.h
new file mode 100644
index 000000000000..ca14c6664027
--- /dev/null
+++ b/include/asm-generic/extable.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_GENERIC_EXTABLE_H
+#define __ASM_GENERIC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+
+struct pt_regs;
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index cc5d9a1405df..41e5b6784b97 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -32,10 +32,4 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
/* by default, allow everything */
return true;
}
-
-static inline bool arch_pte_access_permitted(pte_t pte, bool write)
-{
- /* by default, allow everything */
- return true;
-}
#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 1fad160f35de..7dfa767dc680 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -341,6 +341,31 @@ static inline int pte_unused(pte_t pte)
}
#endif
+#ifndef pte_access_permitted
+#define pte_access_permitted(pte, write) \
+ (pte_present(pte) && (!(write) || pte_write(pte)))
+#endif
+
+#ifndef pmd_access_permitted
+#define pmd_access_permitted(pmd, write) \
+ (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
+#endif
+
+#ifndef pud_access_permitted
+#define pud_access_permitted(pud, write) \
+ (pud_present(pud) && (!(write) || pud_write(pud)))
+#endif
+
+#ifndef p4d_access_permitted
+#define p4d_access_permitted(p4d, write) \
+ (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
+#endif
+
+#ifndef pgd_access_permitted
+#define pgd_access_permitted(pgd, write) \
+ (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
+#endif
+
#ifndef __HAVE_ARCH_PMD_SAME
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index cc6bb319e464..bbe4bb438e39 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -6,7 +6,6 @@
* on any machine that has kernel and user data in the same
* address space, e.g. all NOMMU machines.
*/
-#include <linux/sched.h>
#include <linux/string.h>
#include <asm/segment.h>
@@ -35,9 +34,6 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a).seg == (b).seg)
#endif
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
/*
@@ -52,87 +48,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
#endif
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/*
- * architectures with an MMU should override these two
- */
-#ifndef __copy_from_user
-static inline __must_check long __copy_from_user(void *to,
- const void __user * from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)to = *(u64 __force *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
-
- memcpy(to, (const void __force *)from, n);
- return 0;
-}
-#endif
-
-#ifndef __copy_to_user
-static inline __must_check long __copy_to_user(void __user *to,
- const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 __force *)to = *(u64 *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
-
- memcpy((void __force *)to, from, n);
- return 0;
-}
-#endif
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
* This version just falls back to copy_{from,to}_user, which should
@@ -171,8 +86,7 @@ static inline __must_check long __copy_to_user(void __user *to,
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- size = __copy_to_user(ptr, x, size);
- return size ? -EFAULT : size;
+ return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
}
#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
@@ -187,28 +101,28 @@ extern int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
- unsigned char __x; \
+ unsigned char __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 2: { \
- unsigned short __x; \
+ unsigned short __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 4: { \
- unsigned int __x; \
+ unsigned int __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 8: { \
- unsigned long long __x; \
+ unsigned long long __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
@@ -233,12 +147,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size_t n = __copy_from_user(x, ptr, size);
- if (unlikely(n)) {
- memset(x + (size - n), 0, n);
- return -EFAULT;
- }
- return 0;
+ return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -247,36 +156,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn));
-#ifndef __copy_from_user_inatomic
-#define __copy_from_user_inatomic __copy_from_user
-#endif
-
-#ifndef __copy_to_user_inatomic
-#define __copy_to_user_inatomic __copy_to_user
-#endif
-
-static inline long copy_from_user(void *to,
- const void __user * from, unsigned long n)
-{
- unsigned long res = n;
- might_fault();
- if (likely(access_ok(VERIFY_READ, from, n)))
- res = __copy_from_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
-}
-
-static inline long copy_to_user(void __user *to,
- const void *from, unsigned long n)
-{
- might_fault();
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_to_user(to, from, n);
- else
- return n;
-}
-
/*
* Copy a null terminated string from userspace.
*/
@@ -348,4 +227,6 @@ clear_user(void __user *to, unsigned long n)
return __clear_user(to, n);
}
+#include <asm/extable.h>
+
#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 143db9c523e2..3558f4eb1a86 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -287,8 +287,6 @@
*(.rodata1) \
} \
\
- BUG_TABLE \
- \
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -857,7 +855,8 @@
READ_MOSTLY_DATA(cacheline) \
DATA_DATA \
CONSTRUCTORS \
- }
+ } \
+ BUG_TABLE
#define INIT_TEXT_SECTION(inittext_align) \
. = ALIGN(inittext_align); \
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index caedb74c9210..cc805b72994a 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -16,9 +16,13 @@
#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
#define __CLKSOURCE_ARM_ARCH_TIMER_H
+#include <linux/bitops.h>
#include <linux/timecounter.h>
#include <linux/types.h>
+#define ARCH_TIMER_TYPE_CP15 BIT(0)
+#define ARCH_TIMER_TYPE_MEM BIT(1)
+
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
@@ -34,11 +38,27 @@ enum arch_timer_reg {
ARCH_TIMER_REG_TVAL,
};
+enum arch_timer_ppi_nr {
+ ARCH_TIMER_PHYS_SECURE_PPI,
+ ARCH_TIMER_PHYS_NONSECURE_PPI,
+ ARCH_TIMER_VIRT_PPI,
+ ARCH_TIMER_HYP_PPI,
+ ARCH_TIMER_MAX_TIMER_PPI
+};
+
+enum arch_timer_spi_nr {
+ ARCH_TIMER_PHYS_SPI,
+ ARCH_TIMER_VIRT_SPI,
+ ARCH_TIMER_MAX_TIMER_SPI
+};
+
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
#define ARCH_TIMER_MEM_PHYS_ACCESS 2
#define ARCH_TIMER_MEM_VIRT_ACCESS 3
+#define ARCH_TIMER_MEM_MAX_FRAMES 8
+
#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
#define ARCH_TIMER_VIRT_EVT_EN (1 << 2)
@@ -54,6 +74,20 @@ struct arch_timer_kvm_info {
int virtual_irq;
};
+struct arch_timer_mem_frame {
+ bool valid;
+ phys_addr_t cntbase;
+ size_t size;
+ int phys_irq;
+ int virt_irq;
+};
+
+struct arch_timer_mem {
+ phys_addr_t cntctlbase;
+ size_t size;
+ struct arch_timer_mem_frame frame[ARCH_TIMER_MEM_MAX_FRAMES];
+};
+
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 592d47e565a8..0977fb18ff68 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -43,12 +43,13 @@
---------------------------------------------------------------------------
Issue Date: 31/01/2006
- An implementation of field multiplication in Galois Field GF(128)
+ An implementation of field multiplication in Galois Field GF(2^128)
*/
#ifndef _CRYPTO_GF128MUL_H
#define _CRYPTO_GF128MUL_H
+#include <asm/byteorder.h>
#include <crypto/b128ops.h>
#include <linux/slab.h>
@@ -65,7 +66,7 @@
* are left and the lsb's are right. char b[16] is an array and b[0] is
* the first octet.
*
- * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
* b[0] b[1] b[2] b[3] b[13] b[14] b[15]
*
* Every bit is a coefficient of some power of X. We can store the bits
@@ -85,15 +86,17 @@
* Both of the above formats are easy to implement on big-endian
* machines.
*
- * EME (which is patent encumbered) uses the ble format (bits are stored
- * in big endian order and the bytes in little endian). The above buffer
- * represents X^7 in this case and the primitive polynomial is b[0] = 0x87.
+ * XTS and EME (the latter of which is patent encumbered) use the ble
+ * format (bits are stored in big endian order and the bytes in little
+ * endian). The above buffer represents X^7 in this case and the
+ * primitive polynomial is b[0] = 0x87.
*
* The common machine word-size is smaller than 128 bits, so to make
* an efficient implementation we must split into machine word sizes.
- * This file uses one 32bit for the moment. Machine endianness comes into
- * play. The lle format in relation to machine endianness is discussed
- * below by the original author of gf128mul Dr Brian Gladman.
+ * This implementation uses 64-bit words for the moment. Machine
+ * endianness comes into play. The lle format in relation to machine
+ * endianness is discussed below by the original author of gf128mul Dr
+ * Brian Gladman.
*
* Let's look at the bbe and ble format on a little endian machine.
*
@@ -127,10 +130,10 @@
* machines this will automatically aligned to wordsize and on a 64-bit
* machine also.
*/
-/* Multiply a GF128 field element by x. Field elements are held in arrays
- of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower
- indexed bits placed in the more numerically significant bit positions
- within bytes.
+/* Multiply a GF(2^128) field element by x. Field elements are
+ held in arrays of bytes in which field bits 8n..8n + 7 are held in
+ byte[n], with lower indexed bits placed in the more numerically
+ significant bit positions within bytes.
On little endian machines the bit indexes translate into the bit
positions within four 32-bit words in the following way
@@ -161,8 +164,58 @@ void gf128mul_lle(be128 *a, const be128 *b);
void gf128mul_bbe(be128 *a, const be128 *b);
-/* multiply by x in ble format, needed by XTS */
-void gf128mul_x_ble(be128 *a, const be128 *b);
+/*
+ * The following functions multiply a field element by x in
+ * the polynomial field representation. They use 64-bit word operations
+ * to gain speed but compensate for machine endianness and hence work
+ * correctly on both styles of machine.
+ *
+ * They are defined here for performance.
+ */
+
+static inline u64 gf128mul_mask_from_bit(u64 x, int which)
+{
+ /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */
+ return ((s64)(x << (63 - which)) >> 63);
+}
+
+static inline void gf128mul_x_lle(be128 *r, const be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48
+ * (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56);
+
+ r->b = cpu_to_be64((b >> 1) | (a << 63));
+ r->a = cpu_to_be64((a >> 1) ^ _tt);
+}
+
+static inline void gf128mul_x_bbe(be128 *r, const be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
+
+ r->a = cpu_to_be64((a << 1) | (b >> 63));
+ r->b = cpu_to_be64((b << 1) ^ _tt);
+}
+
+/* needed by XTS */
+static inline void gf128mul_x_ble(le128 *r, const le128 *x)
+{
+ u64 a = le64_to_cpu(x->a);
+ u64 b = le64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
+
+ r->a = cpu_to_le64((a << 1) | (b >> 63));
+ r->b = cpu_to_le64((b << 1) ^ _tt);
+}
/* 4k table optimization */
@@ -172,8 +225,8 @@ struct gf128mul_4k {
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
-void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
-void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
+void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
+void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
@@ -194,6 +247,6 @@ struct gf128mul_64k {
*/
struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
void gf128mul_free_64k(struct gf128mul_64k *t);
-void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
+void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t);
#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 1de2b5af12d7..51052f65cefc 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -78,4 +78,7 @@ int crypto_register_acomp(struct acomp_alg *alg);
*/
int crypto_unregister_acomp(struct acomp_alg *alg);
+int crypto_register_acomps(struct acomp_alg *algs, int count);
+void crypto_unregister_acomps(struct acomp_alg *algs, int count);
+
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 3fda3c5655a0..ccad9b2c9bd6 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -133,4 +133,7 @@ int crypto_register_scomp(struct scomp_alg *alg);
*/
int crypto_unregister_scomp(struct scomp_alg *alg);
+int crypto_register_scomps(struct scomp_alg *algs, int count);
+void crypto_unregister_scomps(struct scomp_alg *algs, int count);
+
#endif
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 4307a2f2365f..ce8e1f79374b 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -74,7 +74,7 @@ struct crypto_kpp {
* @base: Common crypto API algorithm data structure
*/
struct kpp_alg {
- int (*set_secret)(struct crypto_kpp *tfm, void *buffer,
+ int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
unsigned int len);
int (*generate_public_key)(struct kpp_request *req);
int (*compute_shared_secret)(struct kpp_request *req);
@@ -273,8 +273,8 @@ struct kpp_secret {
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer,
- unsigned int len)
+static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
+ const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 77b630672b2c..c0bde308b28a 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -11,7 +11,7 @@ struct blkcipher_desc;
#define XTS_BLOCK_SIZE 16
struct xts_crypt_req {
- be128 *tbuf;
+ le128 *tbuf;
unsigned int tbuflen;
void *tweak_ctx;
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 692846c7941b..63f4c2c44a1f 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -12,9 +12,10 @@
#define CLKID_FCLK_DIV4 6
#define CLKID_CLK81 12
#define CLKID_MPLL2 15
-#define CLKID_SPI 34
#define CLKID_I2C 22
#define CLKID_SAR_ADC 23
+#define CLKID_RNG0 25
+#define CLKID_SPI 34
#define CLKID_ETH 36
#define CLKID_USB0 50
#define CLKID_USB1 51
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 9b05886f9773..f729adaac18b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -233,10 +233,6 @@ int acpi_numa_init (void);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_parse_entries(char *id, unsigned long table_size,
- acpi_tbl_entry_handler handler,
- struct acpi_table_header *table_header,
- int entry_id, unsigned int max_entries);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
@@ -595,6 +591,13 @@ enum acpi_reconfig_event {
int acpi_reconfig_notifier_register(struct notifier_block *nb);
int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
+#ifdef CONFIG_ACPI_GTDT
+int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count);
+int acpi_gtdt_map_ppi(int type);
+bool acpi_gtdt_c3stop(int type);
+int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -611,6 +614,11 @@ static inline bool acpi_dev_found(const char *hid)
return false;
}
+static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
+{
+ return false;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -997,8 +1005,16 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval);
-struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
- struct fwnode_handle *subnode);
+struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+ struct fwnode_handle *child);
+struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode);
+
+struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev);
+int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle **remote,
+ struct fwnode_handle **port,
+ struct fwnode_handle **endpoint);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1115,12 +1131,34 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev,
return -ENXIO;
}
-static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
- struct fwnode_handle *subnode)
+static inline struct fwnode_handle *
+acpi_get_next_subnode(struct fwnode_handle *fwnode, struct fwnode_handle *child)
+{
+ return NULL;
+}
+
+static inline struct fwnode_handle *
+acpi_node_get_parent(struct fwnode_handle *fwnode)
{
return NULL;
}
+static inline struct fwnode_handle *
+acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int
+acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle **remote,
+ struct fwnode_handle **port,
+ struct fwnode_handle **endpoint)
+{
+ return -ENXIO;
+}
+
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index af6859b3a93d..ad7d9ee89ff0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -817,11 +817,6 @@ static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
}
-static inline bool ata_id_sct_write_same(const u16 *id)
-{
- return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
-}
-
static inline bool ata_id_sct_long_sector_access(const u16 *id)
{
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index e71835bf60a9..c56be7410130 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -423,6 +423,29 @@
#endif
#endif /* atomic_cmpxchg_relaxed */
+#ifndef atomic_try_cmpxchg
+
+#define __atomic_try_cmpxchg(type, _p, _po, _n) \
+({ \
+ typeof(_po) __po = (_po); \
+ typeof(*(_po)) __r, __o = *__po; \
+ __r = atomic_cmpxchg##type((_p), __o, (_n)); \
+ if (unlikely(__r != __o)) \
+ *__po = __r; \
+ likely(__r == __o); \
+})
+
+#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
+#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic_try_cmpxchg */
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg
+#endif /* atomic_try_cmpxchg */
+
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
@@ -996,6 +1019,29 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_try_cmpxchg
+
+#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
+({ \
+ typeof(_po) __po = (_po); \
+ typeof(*(_po)) __r, __o = *__po; \
+ __r = atomic64_cmpxchg##type((_p), __o, (_n)); \
+ if (unlikely(__r != __o)) \
+ *__po = __r; \
+ likely(__r == __o); \
+})
+
+#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
+#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic64_try_cmpxchg */
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
+#endif /* atomic64_try_cmpxchg */
+
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index ad955817916d..866c433e7d32 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -21,6 +21,7 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
+ WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
};
@@ -54,7 +55,9 @@ struct bdi_writeback_congested {
atomic_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK
- struct backing_dev_info *bdi; /* the associated bdi */
+ struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
+ * on bdi unregistration. For memcg-wb
+ * internal use only! */
int blkcg_id; /* ID of the associated blkcg */
struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
#endif
@@ -143,7 +146,7 @@ struct backing_dev_info {
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
- char *name;
+ const char *name;
struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
@@ -161,7 +164,6 @@ struct backing_dev_info {
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
- atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c52a48cb9a66..557d84063934 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -17,8 +17,6 @@
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
-int __must_check bdi_init(struct backing_dev_info *bdi);
-
static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
{
kref_get(&bdi->refcnt);
@@ -27,16 +25,18 @@ static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
void bdi_put(struct backing_dev_info *bdi);
-__printf(3, 4)
-int bdi_register(struct backing_dev_info *bdi, struct device *parent,
- const char *fmt, ...);
-int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+__printf(2, 3)
+int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
+int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
+ va_list args);
int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
-int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
-void bdi_destroy(struct backing_dev_info *bdi);
struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
+static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
+{
+ return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
+}
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 8e521194f6fc..4931756d86d9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -383,7 +383,7 @@ extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern mempool_t *biovec_create_pool(int pool_entries);
-extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
+extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9382c5da7a2e..f3e5e1de1bdb 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -15,7 +15,7 @@ struct blk_mq_hw_ctx {
unsigned long state; /* BLK_MQ_S_* flags */
} ____cacheline_aligned_in_smp;
- struct work_struct run_work;
+ struct delayed_work run_work;
cpumask_var_t cpumask;
int next_cpu;
int next_cpu_batch;
@@ -51,9 +51,6 @@ struct blk_mq_hw_ctx {
atomic_t nr_active;
- struct delayed_work delayed_run_work;
- struct delayed_work delay_work;
-
struct hlist_node cpuhp_dead;
struct kobject kobj;
@@ -82,7 +79,6 @@ struct blk_mq_tag_set {
struct blk_mq_queue_data {
struct request *rq;
- struct list_head *list;
bool last;
};
@@ -143,6 +139,14 @@ struct blk_mq_ops {
reinit_request_fn *reinit_request;
map_queues_fn *map_queues;
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ /*
+ * Used by the debugfs implementation to show driver-specific
+ * information about a request.
+ */
+ void (*show_rq)(struct seq_file *m, struct request *rq);
+#endif
};
enum {
@@ -153,7 +157,6 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_NO_SCHED = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
@@ -163,6 +166,7 @@ enum {
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
BLK_MQ_S_TAG_WAITING = 3,
+ BLK_MQ_S_START_ON_RUN = 4,
BLK_MQ_MAX_DEPTH = 10240,
@@ -230,7 +234,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq, int error);
+void blk_mq_complete_request(struct request *rq);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -240,13 +244,14 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
-void blk_mq_freeze_queue_start(struct request_queue *q);
+void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d703acb55d0f..61339bc44400 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -17,6 +17,10 @@ struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
+struct blk_issue_stat {
+ u64 stat;
+};
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -29,7 +33,7 @@ struct bio {
* top bits REQ_OP. Use
* accessors.
*/
- unsigned short bi_flags; /* status, command, etc */
+ unsigned short bi_flags; /* status, etc and bvec pool number */
unsigned short bi_ioprio;
struct bvec_iter bi_iter;
@@ -58,6 +62,10 @@ struct bio {
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ void *bi_cg_private;
+ struct blk_issue_stat bi_issue_stat;
+#endif
#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -102,12 +110,9 @@ struct bio {
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
#define BIO_THROTTLED 9 /* This bio has already been subjected to
* throttling rules. Don't do it again. */
-
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS 10
+#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ * of this bio. */
+/* See BVEC_POOL_OFFSET below before adding new flags */
/*
* We support 6 different bvec pools, the last one is magic in that it
@@ -117,13 +122,22 @@ struct bio {
#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
/*
- * Top 4 bits of bio flags indicate the pool the bvecs came from. We add
+ * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
* 1 to the actual index so that 0 indicates that there are no bvecs to be
* freed.
*/
-#define BVEC_POOL_BITS (4)
+#define BVEC_POOL_BITS (3)
#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
+#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
+# error "BVEC_POOL_BITS is too small"
+#endif
+
+/*
+ * Flags starting here get preserved by bio_reset() - this includes
+ * only BVEC_POOL_IDX()
+ */
+#define BIO_RESET_BITS BVEC_POOL_OFFSET
/*
* Operations and flags common to the bio and request structures.
@@ -160,7 +174,7 @@ enum req_opf {
/* write the same sector many times */
REQ_OP_WRITE_SAME = 7,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 8,
+ REQ_OP_WRITE_ZEROES = 9,
/* SCSI passthrough using struct scsi_request */
REQ_OP_SCSI_IN = 32,
@@ -187,6 +201,10 @@ enum req_flag_bits {
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
+
+ /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ __REQ_NOUNMAP, /* do not free blocks when zeroing */
+
__REQ_NR_BITS, /* stops here */
};
@@ -204,6 +222,8 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
+#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
+
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -283,12 +303,6 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
return (cookie & BLK_QC_T_INTERNAL) != 0;
}
-struct blk_issue_stat {
- u64 time;
-};
-
-#define BLK_RQ_STAT_BATCH 64
-
struct blk_rq_stat {
s64 mean;
u64 min;
@@ -296,7 +310,6 @@ struct blk_rq_stat {
s32 nr_samples;
s32 nr_batch;
u64 batch;
- s64 time;
};
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 01a696b0a4d3..83d28623645f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -40,15 +40,20 @@ struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
struct rq_wb;
+struct blk_queue_stats;
+struct blk_stat_callback;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
+/* Must be consisitent with blk_mq_poll_stats_bkt() */
+#define BLK_MQ_POLL_STATS_BKTS 16
+
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 2
+#define BLKCG_MAX_POLS 3
typedef void (rq_end_io_fn)(struct request *, int);
@@ -173,6 +178,7 @@ struct request {
struct rb_node rb_node; /* sort/lookup */
struct bio_vec special_vec;
void *completion_data;
+ int error_count; /* for legacy drivers, don't use */
};
/*
@@ -213,16 +219,14 @@ struct request {
unsigned short ioprio;
- void *special; /* opaque pointer available for LLD use */
+ unsigned int timeout;
- int errors;
+ void *special; /* opaque pointer available for LLD use */
unsigned int extra_len; /* length of alignment and padding */
unsigned long deadline;
struct list_head timeout_list;
- unsigned int timeout;
- int retries;
/*
* completion callback.
@@ -337,7 +341,6 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char cluster;
- unsigned char discard_zeroes_data;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
};
@@ -388,6 +391,7 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct blk_queue_stats *stats;
struct rq_wb *rq_wb;
/*
@@ -505,8 +509,6 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
- struct blk_rq_stat rq_stats[2];
-
/*
* Number of active block driver functions for which blk_drain_queue()
* must wait. Must be incremented around functions that unlock the
@@ -516,6 +518,10 @@ struct request_queue {
unsigned int rq_timeout;
int poll_nsec;
+
+ struct blk_stat_callback *poll_cb;
+ struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
@@ -610,6 +616,8 @@ struct request_queue {
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
+#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
+#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -918,6 +926,7 @@ extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
+extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
@@ -963,7 +972,7 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *,
gfp_t);
-extern int blk_execute_rq(struct request_queue *, struct gendisk *,
+extern void blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
@@ -1081,20 +1090,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
}
/*
- * blk_rq_set_prio - associate a request with prio from ioc
- * @rq: request of interest
- * @ioc: target iocontext
- *
- * Assocate request prio with ioc prio so request based drivers
- * can leverage priority information.
- */
-static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc)
-{
- if (ioc)
- rq->ioprio = ioc->ioprio;
-}
-
-/*
* Request issue related functions.
*/
extern struct request *blk_peek_request(struct request_queue *q);
@@ -1120,13 +1115,10 @@ extern void blk_finish_request(struct request *rq, int error);
extern bool blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, int error);
-extern bool blk_end_request_cur(struct request *rq, int error);
-extern bool blk_end_request_err(struct request *rq, int error);
extern bool __blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, int error);
extern bool __blk_end_request_cur(struct request *rq, int error);
-extern bool __blk_end_request_err(struct request *rq, int error);
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
@@ -1329,23 +1321,27 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return bqt->tag_index[tag];
}
+extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct page *page);
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
-#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int flags,
struct bio **biop);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+
+#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
+#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
- bool discard);
+ unsigned flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, bool discard);
+ sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
+
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
@@ -1359,7 +1355,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
return blkdev_issue_zeroout(sb->s_bdev,
block << (sb->s_blocksize_bits - 9),
nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask, true);
+ gfp_mask, 0);
}
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1529,19 +1525,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
return q->limits.discard_alignment;
}
-static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
-{
- if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
- return 1;
-
- return 0;
-}
-
-static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
-{
- return queue_discard_zeroes_data(bdev_get_queue(bdev));
-}
-
static inline unsigned int bdev_write_same(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1726,6 +1709,7 @@ int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 5828489309bb..687b557fc5eb 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -105,7 +105,7 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING;
}
-const struct bug_entry *find_bug(unsigned long bugaddr);
+struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c41b8d99dd0e..3285c944194a 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -123,6 +123,10 @@ enum ccp_aes_mode {
CCP_AES_MODE_CFB,
CCP_AES_MODE_CTR,
CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE_GCM,
+ CCP_AES_MODE_GMAC,
CCP_AES_MODE__LAST,
};
@@ -137,6 +141,9 @@ enum ccp_aes_action {
CCP_AES_ACTION_ENCRYPT,
CCP_AES_ACTION__LAST,
};
+/* Overloaded field */
+#define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT
+#define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT
/**
* struct ccp_aes_engine - CCP AES operation
@@ -181,6 +188,8 @@ struct ccp_aes_engine {
struct scatterlist *cmac_key; /* K1/K2 cmac key required for
* final cmac cmd */
u32 cmac_key_len; /* In bytes */
+
+ u32 aad_len; /* In bytes */
};
/***** XTS-AES engine *****/
@@ -249,6 +258,8 @@ enum ccp_sha_type {
CCP_SHA_TYPE_1 = 1,
CCP_SHA_TYPE_224,
CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
CCP_SHA_TYPE__LAST,
};
@@ -290,6 +301,60 @@ struct ccp_sha_engine {
* final sha cmd */
};
+/***** 3DES engine *****/
+enum ccp_des3_mode {
+ CCP_DES3_MODE_ECB = 0,
+ CCP_DES3_MODE_CBC,
+ CCP_DES3_MODE_CFB,
+ CCP_DES3_MODE__LAST,
+};
+
+enum ccp_des3_type {
+ CCP_DES3_TYPE_168 = 1,
+ CCP_DES3_TYPE__LAST,
+ };
+
+enum ccp_des3_action {
+ CCP_DES3_ACTION_DECRYPT = 0,
+ CCP_DES3_ACTION_ENCRYPT,
+ CCP_DES3_ACTION__LAST,
+};
+
+/**
+ * struct ccp_des3_engine - CCP SHA operation
+ * @type: Type of 3DES operation
+ * @mode: cipher mode
+ * @action: 3DES operation (decrypt/encrypt)
+ * @key: key to be used for this 3DES operation
+ * @key_len: length of key (in bytes)
+ * @iv: IV to be used for this AES operation
+ * @iv_len: length in bytes of iv
+ * @src: input data to be used for this operation
+ * @src_len: length of input data used for this operation (in bytes)
+ * @dst: output data produced by this operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - type, mode, action, key, key_len, src, dst, src_len
+ * - iv, iv_len for any mode other than ECB
+ *
+ * The iv variable is used as both input and output. On completion of the
+ * 3DES operation the new IV overwrites the old IV.
+ */
+struct ccp_des3_engine {
+ enum ccp_des3_type type;
+ enum ccp_des3_mode mode;
+ enum ccp_des3_action action;
+
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
+ struct scatterlist *iv;
+ u32 iv_len; /* In bytes */
+
+ struct scatterlist *src, *dst;
+ u64 src_len; /* In bytes */
+};
+
/***** RSA engine *****/
/**
* struct ccp_rsa_engine - CCP RSA operation
@@ -539,7 +604,7 @@ struct ccp_ecc_engine {
enum ccp_engine {
CCP_ENGINE_AES = 0,
CCP_ENGINE_XTS_AES_128,
- CCP_ENGINE_RSVD1,
+ CCP_ENGINE_DES3,
CCP_ENGINE_SHA,
CCP_ENGINE_RSA,
CCP_ENGINE_PASSTHRU,
@@ -587,6 +652,7 @@ struct ccp_cmd {
union {
struct ccp_aes_engine aes;
struct ccp_xts_aes_engine xts;
+ struct ccp_des3_engine des3;
struct ccp_sha_engine sha;
struct ccp_rsa_engine rsa;
struct ccp_passthru_engine passthru;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 6a3f850cabab..21745946cae1 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
@@ -106,9 +107,6 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* PI: the parent css */
- struct cgroup_subsys_state *parent;
-
/* siblings list anchored at the parent's ->children */
struct list_head sibling;
struct list_head children;
@@ -138,6 +136,12 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
+
+ /*
+ * PI: the parent css. Placed here for cache proximity to following
+ * fields of the containing structure.
+ */
+ struct cgroup_subsys_state *parent;
};
/*
@@ -156,7 +160,7 @@ struct css_set {
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
/* reference count */
- atomic_t refcount;
+ refcount_t refcount;
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index af9c86e958bd..ed2573e149fa 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -17,11 +17,11 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
-#include <linux/nsproxy.h>
#include <linux/types.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
+#include <linux/refcount.h>
#include <linux/cgroup-defs.h>
@@ -661,7 +661,7 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- atomic_t count;
+ refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -696,12 +696,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->count))
free_cgroup_ns(ns);
}
diff --git a/include/linux/clk.h b/include/linux/clk.h
index e9d36b3e49de..024cd07870d0 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -132,8 +132,8 @@ int clk_get_phase(struct clk *clk);
* @q: clk compared against p
*
* Returns true if the two struct clk pointers both point to the same hardware
- * clock node. Put differently, returns true if struct clk *p and struct clk *q
- * share the same struct clk_core object.
+ * clock node. Put differently, returns true if @p and @q
+ * share the same &struct clk_core object.
*
* Returns false otherwise. Note that two NULL clks are treated as matching.
*/
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 6d7edc3082f9..acc9ce05e5f0 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -182,7 +182,6 @@ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *e
extern void clockevents_register_device(struct clock_event_device *dev);
extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
-extern void clockevents_config(struct clock_event_device *dev, u32 freq);
extern void clockevents_config_and_register(struct clock_event_device *dev,
u32 freq, unsigned long min_delta,
unsigned long max_delta);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index cfc75848a35d..f2b10d9ebd04 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -120,7 +120,7 @@ struct clocksource {
#define CLOCK_SOURCE_RESELECT 0x100
/* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 5b8721efa948..31e4e1f1547c 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -15,7 +15,6 @@ struct venus_comm {
struct list_head vc_processing;
int vc_inuse;
struct super_block *vc_sb;
- struct backing_dev_info bdi;
struct mutex vc_mutex;
};
diff --git a/include/linux/compat.h b/include/linux/compat.h
index aef47be2a5c1..8172b03685f9 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -528,11 +528,6 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
-asmlinkage long compat_sys_getdents64(unsigned int fd,
- struct linux_dirent64 __user *dirent,
- unsigned int count);
-#endif
asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
unsigned int nr_segs, unsigned int flags);
asmlinkage long compat_sys_open(const char __user *filename, int flags,
@@ -723,6 +718,8 @@ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
int, const char __user *);
+asmlinkage long compat_sys_arch_prctl(int option, unsigned long arg2);
+
/*
* For most but not all architectures, "am I in a compat syscall?" and
* "am I a compat task?" are the same question. For architectures on which
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 2a5982c37dfb..035c16c9a505 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -201,7 +201,7 @@ struct coresight_ops_sink {
void *sink_config);
unsigned long (*reset_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
- void *sink_config, bool *lost);
+ void *sink_config);
void (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 87165f06a307..a5ce0bbeadb5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -120,6 +120,13 @@ struct cpufreq_policy {
bool fast_switch_possible;
bool fast_switch_enabled;
+ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
+ */
+ unsigned int transition_delay_us;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 96f1e88b767c..1a675604b17d 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -667,6 +667,11 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return mask != NULL;
+}
+
#else
typedef struct cpumask cpumask_var_t[1];
@@ -708,6 +713,11 @@ static inline void free_cpumask_var(cpumask_var_t mask)
static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
+
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return true;
+}
#endif /* CONFIG_CPUMASK_OFFSTACK */
/* It's common to want to use cpu_all_mask in struct member initializers,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 611fce58d670..119a3f9604b0 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -42,7 +42,7 @@ static inline void cpuset_dec(void)
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
-extern void cpuset_update_active_cpus(bool cpu_online);
+extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -155,7 +155,7 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
-static inline void cpuset_update_active_cpus(bool cpu_online)
+static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index c0b0cf3d2d2f..84da9978e951 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -123,7 +123,7 @@
/*
* Miscellaneous stuff.
*/
-#define CRYPTO_MAX_ALG_NAME 64
+#define CRYPTO_MAX_ALG_NAME 128
/*
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index 3252799832cf..df4d3e943d28 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -10,9 +10,4 @@
void sha_init(__u32 *buf);
void sha_transform(__u32 *digest, const char *data, __u32 *W);
-#define MD5_DIGEST_WORDS 4
-#define MD5_MESSAGE_BYTES 64
-
-void md5_transform(__u32 *hash, __u32 const *in);
-
#endif
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
index 7009b8bec77b..3f033c48071e 100644
--- a/include/linux/dell-led.h
+++ b/include/linux/dell-led.h
@@ -1,10 +1,6 @@
#ifndef __DELL_LED_H__
#define __DELL_LED_H__
-enum {
- DELL_LED_MICMUTE,
-};
-
-int dell_app_wmi_led_set(int whichled, int on);
+int dell_micmute_led_set(int on);
#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index e0acb0e5243b..6c220e4ebb6b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -27,6 +27,7 @@
#define DEVFREQ_POSTCHANGE (1)
struct devfreq;
+struct devfreq_governor;
/**
* struct devfreq_dev_status - Data given from devfreq user device to
@@ -101,35 +102,6 @@ struct devfreq_dev_profile {
};
/**
- * struct devfreq_governor - Devfreq policy governor
- * @node: list node - contains registered devfreq governors
- * @name: Governor's name
- * @immutable: Immutable flag for governor. If the value is 1,
- * this govenror is never changeable to other governor.
- * @get_target_freq: Returns desired operating frequency for the device.
- * Basically, get_target_freq will run
- * devfreq_dev_profile.get_dev_status() to get the
- * status of the device (load = busy_time / total_time).
- * If no_central_polling is set, this callback is called
- * only with update_devfreq() notified by OPP.
- * @event_handler: Callback for devfreq core framework to notify events
- * to governors. Events include per device governor
- * init and exit, opp changes out of devfreq, suspend
- * and resume of per device devfreq during device idle.
- *
- * Note that the callbacks are called with devfreq->lock locked by devfreq.
- */
-struct devfreq_governor {
- struct list_head node;
-
- const char name[DEVFREQ_NAME_LEN];
- const unsigned int immutable;
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
- int (*event_handler)(struct devfreq *devfreq,
- unsigned int event, void *data);
-};
-
-/**
* struct devfreq - Device devfreq structure
* @node: list node - contains the devices with devfreq that have been
* registered.
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a7e6903866fd..c7ea33e38fb9 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -255,6 +255,12 @@ struct dm_target {
unsigned num_write_same_bios;
/*
+ * The number of WRITE ZEROES bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned num_write_zeroes_bios;
+
+ /*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
@@ -290,11 +296,6 @@ struct dm_target {
* on max_io_len boundary.
*/
bool split_discard_bios:1;
-
- /*
- * Set if this target does not return zeroes on discarded blocks.
- */
- bool discard_zeroes_data_unsupported:1;
};
/* Each target can link one of these into the table */
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 5b6adf964248..8ae0f45fafd6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -28,12 +28,10 @@ struct device;
#define EDAC_OPSTATE_INT 2
extern int edac_op_state;
-extern int edac_err_assert;
-extern atomic_t edac_handlers;
-extern int edac_handler_set(void);
-extern void edac_atomic_assert_error(void);
-extern struct bus_type *edac_get_sysfs_subsys(void);
+struct bus_type *edac_get_sysfs_subsys(void);
+int edac_get_report_status(void);
+void edac_set_report_status(int new);
enum {
EDAC_REPORTING_ENABLED,
@@ -41,28 +39,6 @@ enum {
EDAC_REPORTING_FORCE
};
-extern int edac_report_status;
-#ifdef CONFIG_EDAC
-static inline int get_edac_report_status(void)
-{
- return edac_report_status;
-}
-
-static inline void set_edac_report_status(int new)
-{
- edac_report_status = new;
-}
-#else
-static inline int get_edac_report_status(void)
-{
- return EDAC_REPORTING_DISABLED;
-}
-
-static inline void set_edac_report_status(int new)
-{
-}
-#endif
-
static inline void opstate_init(void)
{
switch (edac_op_state) {
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index 2fd3993c370b..e6f624b53c3d 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -6,6 +6,7 @@
#ifdef CONFIG_ACPI_BGRT
void efi_bgrt_init(struct acpi_table_header *table);
+int __init acpi_parse_bgrt(struct acpi_table_header *table);
/* The BGRT data itself; only valid if bgrt_image != NULL. */
extern size_t bgrt_image_size;
@@ -14,6 +15,10 @@ extern struct acpi_table_bgrt bgrt_tab;
#else /* !CONFIG_ACPI_BGRT */
static inline void efi_bgrt_init(struct acpi_table_header *table) {}
+static inline int __init acpi_parse_bgrt(struct acpi_table_header *table)
+{
+ return 0;
+}
#endif /* !CONFIG_ACPI_BGRT */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 94d34e0be24f..ec36f42a2add 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1435,9 +1435,6 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
/* prototypes shared between arch specific and generic stub code */
-#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg)
-#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
-
void efi_printk(efi_system_table_t *sys_table_arg, char *str);
void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
@@ -1471,7 +1468,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
unsigned long *load_addr,
unsigned long *load_size);
-efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_parse_options(char const *cmdline);
efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
struct screen_info *si, efi_guid_t *proto,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 22d39e8d4de1..3a216318ae73 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -93,6 +93,8 @@ struct blk_mq_hw_ctx;
struct elevator_mq_ops {
int (*init_sched)(struct request_queue *, struct elevator_type *);
void (*exit_sched)(struct elevator_queue *);
+ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
@@ -104,7 +106,7 @@ struct elevator_mq_ops {
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct blk_mq_hw_ctx *, struct request *);
+ void (*completed_request)(struct request *);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7010fb01a81a..7e206a9f88db 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -236,11 +236,11 @@ extern int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
- * Following APIs are to monitor every action of a notifier.
- * Registrar gets notified for every external port of a connection device.
- * Probably this could be used to debug an action of notifier; however,
- * we do not recommend to use this for normal 'notifiee' device drivers who
- * want to be notified by a specific external port of the notifier.
+ * Following APIs are to monitor the status change of the external connectors.
+ * extcon_register_notifier(*edev, id, *nb) : Register a notifier block
+ * for specific external connector of the extcon.
+ * extcon_register_notifier_all(*edev, *nb) : Register a notifier block
+ * for all supported external connectors of the extcon.
*/
extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -253,6 +253,17 @@ extern void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
+extern int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb);
+
/*
* Following API get the extcon device from devicetree.
* This function use phandle of devicetree to get extcon device directly.
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index b6efb0c64408..11366b3ff0b4 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -61,16 +61,83 @@ struct flex_array {
FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
}
+/**
+ * flex_array_alloc() - Creates a flexible array.
+ * @element_size: individual object size.
+ * @total: maximum number of objects which can be stored.
+ * @flags: GFP flags
+ *
+ * Return: Returns an object of structure flex_array.
+ */
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
+
+/**
+ * flex_array_prealloc() - Ensures that memory for the elements indexed in the
+ * range defined by start and nr_elements has been allocated.
+ * @fa: array to allocate memory to.
+ * @start: start address
+ * @nr_elements: number of elements to be allocated.
+ * @flags: GFP flags
+ *
+ */
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags);
+
+/**
+ * flex_array_free() - Removes all elements of a flexible array.
+ * @fa: array to be freed.
+ */
void flex_array_free(struct flex_array *fa);
+
+/**
+ * flex_array_free_parts() - Removes all elements of a flexible array, but
+ * leaves the array itself in place.
+ * @fa: array to be emptied.
+ */
void flex_array_free_parts(struct flex_array *fa);
+
+/**
+ * flex_array_put() - Stores data into a flexible array.
+ * @fa: array where element is to be stored.
+ * @element_nr: position to copy, must be less than the maximum specified when
+ * the array was created.
+ * @src: data source to be copied into the array.
+ * @flags: GFP flags
+ *
+ * Return: Returns zero on success, a negative error code otherwise.
+ */
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags);
+
+/**
+ * flex_array_clear() - Clears an individual element in the array, sets the
+ * given element to FLEX_ARRAY_FREE.
+ * @element_nr: element position to clear.
+ * @fa: array to which element to be cleared belongs.
+ *
+ * Return: Returns zero on success, -EINVAL otherwise.
+ */
int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
+
+/**
+ * flex_array_get() - Retrieves data into a flexible array.
+ *
+ * @element_nr: Element position to retrieve data from.
+ * @fa: array from which data is to be retrieved.
+ *
+ * Return: Returns a pointer to the data element, or NULL if that
+ * particular element has never been allocated.
+ */
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
+
+/**
+ * flex_array_shrink() - Reduces the allocated size of an array.
+ * @fa: array to shrink.
+ *
+ * Return: Returns number of pages of memory actually freed.
+ *
+ */
int flex_array_shrink(struct flex_array *fa);
#define flex_array_put_ptr(fa, nr, src, gfp) \
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7251f7bb45e8..30e5c14bd743 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2121,6 +2121,9 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
+extern __printf(2, 3)
+int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
+extern int super_setup_bdi(struct super_block *sb);
extern int current_umask(void);
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 8bd28ce6d76e..3dff2398a5f0 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -27,4 +27,16 @@ struct fwnode_handle {
struct fwnode_handle *secondary;
};
+/**
+ * struct fwnode_endpoint - Fwnode graph endpoint
+ * @port: Port number
+ * @id: Endpoint id
+ * @local_fwnode: reference to the related fwnode
+ */
+struct fwnode_endpoint {
+ unsigned int port;
+ unsigned int id;
+ const struct fwnode_handle *local_fwnode;
+};
+
#endif
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 76f39754e7b0..acff9437e5c3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -159,11 +159,11 @@ struct badblocks;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct blk_integrity {
- struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
+ const struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char interval_exp;
+ unsigned char tag_size;
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -722,11 +722,9 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#if defined(CONFIG_BLK_DEV_INTEGRITY)
extern void blk_integrity_add(struct gendisk *);
extern void blk_integrity_del(struct gendisk *);
-extern void blk_integrity_revalidate(struct gendisk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void blk_integrity_add(struct gendisk *disk) { }
static inline void blk_integrity_del(struct gendisk *disk) { }
-static inline void blk_integrity_revalidate(struct gendisk *disk) { }
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#else /* CONFIG_BLOCK */
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 249e579ecd4c..8c5b10eb7265 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -276,8 +276,6 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
return timer->base->cpu_base->hres_active;
}
-extern void hrtimer_peek_ahead_timers(void);
-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -300,8 +298,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline void hrtimer_peek_ahead_timers(void) { }
-
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 0;
@@ -456,7 +452,7 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer,
}
/* Precise sleep: */
-extern long hrtimer_nanosleep(struct timespec *rqtp,
+extern long hrtimer_nanosleep(struct timespec64 *rqtp,
struct timespec __user *rmtp,
const enum hrtimer_mode mode,
const clockid_t clockid);
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 88b673749121..ceb751987c40 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -337,7 +337,7 @@ struct hwmon_ops {
int (*read)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val);
int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, char **str);
+ u32 attr, int channel, const char **str);
int (*write)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val);
};
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 2f51c1724b5a..6980ca322074 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -88,7 +88,7 @@ static inline bool ata_pm_request(struct request *rq)
ide_req(rq)->type == ATA_PRIV_PM_RESUME);
}
-/* Error codes returned in rq->errors to the higher part of the driver. */
+/* Error codes returned in result to the higher part of the driver. */
enum {
IDE_DRV_ERROR_GENERAL = 101,
IDE_DRV_ERROR_FILEMARK = 102,
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 4cca05c9678e..636ebe87e6f8 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -43,6 +43,8 @@
#define _LINUX_INET_H
#include <linux/types.h>
+#include <net/net_namespace.h>
+#include <linux/socket.h>
/*
* These mimic similar macros defined in user-space for inet_ntop(3).
@@ -54,4 +56,8 @@
extern __be32 in_aton(const char *str);
extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+
+extern int inet_pton_with_scope(struct net *net, unsigned short af,
+ const char *src, const char *port, struct sockaddr_storage *addr);
+
#endif /* _LINUX_INET_H */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 91d9049f0039..2c487e0879d5 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -181,6 +181,7 @@ extern struct cred init_cred;
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \
+ .pi_top_task = NULL, \
.pi_waiters_leftmost = NULL,
#else
# define INIT_RT_MUTEXES(tsk)
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 7b49c71c968b..2b0e56619e53 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -258,7 +258,6 @@ extern unsigned int gic_present;
extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, unsigned int cpu_vec,
unsigned int irqbase);
-extern void gic_clocksource_init(unsigned int);
extern u64 gic_read_count(void);
extern unsigned int gic_get_count_width(void);
extern u64 gic_read_compare(void);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 4c26dc3a8295..13bc08aba704 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -47,6 +47,7 @@
/* @a is a power of 2 value */
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
@@ -438,6 +439,7 @@ extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern bool parse_option_str(const char *str, const char *option);
+extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e6284591599e..ca85cb80e99a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -108,6 +108,8 @@ extern int __must_check kobject_rename(struct kobject *, const char *new_name);
extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
+extern struct kobject * __must_check kobject_get_unless_zero(
+ struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index c328e4f7dcad..47e4da5b4fa2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern bool arch_function_offset_within_entry(unsigned long offset);
+extern bool function_offset_within_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index d215b4561180..5e240b2b4d58 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -22,7 +22,8 @@ enum pca9532_state {
PCA9532_OFF = 0x0,
PCA9532_ON = 0x1,
PCA9532_PWM0 = 0x2,
- PCA9532_PWM1 = 0x3
+ PCA9532_PWM1 = 0x3,
+ PCA9532_KEEP = 0xff,
};
struct pca9532_led {
@@ -44,4 +45,3 @@ struct pca9532_platform_data {
};
#endif /* __LINUX_PCA9532_H */
-
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 38c0bd7ca107..64c56d454f7d 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -122,10 +122,16 @@ struct led_classdev {
struct mutex led_access;
};
-extern int led_classdev_register(struct device *parent,
- struct led_classdev *led_cdev);
-extern int devm_led_classdev_register(struct device *parent,
- struct led_classdev *led_cdev);
+extern int of_led_classdev_register(struct device *parent,
+ struct device_node *np,
+ struct led_classdev *led_cdev);
+#define led_classdev_register(parent, led_cdev) \
+ of_led_classdev_register(parent, NULL, led_cdev)
+extern int devm_of_led_classdev_register(struct device *parent,
+ struct device_node *np,
+ struct led_classdev *led_cdev);
+#define devm_led_classdev_register(parent, led_cdev) \
+ devm_of_led_classdev_register(parent, NULL, led_cdev)
extern void led_classdev_unregister(struct led_classdev *led_cdev);
extern void devm_led_classdev_unregister(struct device *parent,
struct led_classdev *led_cdev);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ca45e4a088a9..7dfa56ebbc6d 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -56,7 +56,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
@@ -70,7 +69,6 @@ struct nvm_dev_ops {
nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io;
- nvm_erase_blk_fn *erase_block;
nvm_create_dma_pool_fn *create_dma_pool;
nvm_destroy_dma_pool_fn *destroy_dma_pool;
@@ -125,7 +123,7 @@ enum {
/* NAND Access Modes */
NVM_IO_SUSPEND = 0x80,
NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_DISABLE = 0x200,
+ NVM_IO_SCRAMBLE_ENABLE = 0x200,
/* Block Types */
NVM_BLK_T_FREE = 0x0,
@@ -438,7 +436,8 @@ static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
+typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
+ int flags);
typedef void (nvm_tgt_exit_fn)(void *);
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
@@ -479,10 +478,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
+extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
const struct ppa_addr *, int, int);
-extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1e327bb80838..fffe49f188e6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -361,6 +361,8 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
+extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
+
extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
@@ -411,6 +413,7 @@ static inline void lockdep_on(void)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
+# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
index 6b55c938b401..c20b4843fc2d 100644
--- a/include/linux/mailbox/brcm-message.h
+++ b/include/linux/mailbox/brcm-message.h
@@ -16,6 +16,7 @@
enum brcm_message_type {
BRCM_MESSAGE_UNKNOWN = 0,
+ BRCM_MESSAGE_BATCH,
BRCM_MESSAGE_SPU,
BRCM_MESSAGE_SBA,
BRCM_MESSAGE_MAX,
@@ -23,24 +24,29 @@ enum brcm_message_type {
struct brcm_sba_command {
u64 cmd;
+ u64 *cmd_dma;
+ dma_addr_t cmd_dma_addr;
#define BRCM_SBA_CMD_TYPE_A BIT(0)
#define BRCM_SBA_CMD_TYPE_B BIT(1)
#define BRCM_SBA_CMD_TYPE_C BIT(2)
#define BRCM_SBA_CMD_HAS_RESP BIT(3)
#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4)
u64 flags;
- dma_addr_t input;
- size_t input_len;
dma_addr_t resp;
size_t resp_len;
- dma_addr_t output;
- size_t output_len;
+ dma_addr_t data;
+ size_t data_len;
};
struct brcm_message {
enum brcm_message_type type;
union {
struct {
+ struct brcm_message *msgs;
+ unsigned int msgs_queued;
+ unsigned int msgs_count;
+ } batch;
+ struct {
struct scatterlist *src;
struct scatterlist *dst;
} spu;
diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h
index b4031c2b2214..53758a7d7c32 100644
--- a/include/linux/mfd/motorola-cpcap.h
+++ b/include/linux/mfd/motorola-cpcap.h
@@ -14,6 +14,9 @@
* published by the Free Software Foundation.
*/
+#include <linux/device.h>
+#include <linux/regmap.h>
+
#define CPCAP_VENDOR_ST 0
#define CPCAP_VENDOR_TI 1
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index e11f4d9f1c2e..000000000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * include/linux/mg_disk.c
- *
- * Private data for mflash platform driver
- *
- * (c) 2008 mGine Co.,LTD
- * (c) 2008 unsik Kim <donari75@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MG_DISK_H__
-#define __MG_DISK_H__
-
-/* name for platform device */
-#define MG_DEV_NAME "mg_disk"
-
-/* names of GPIO resource */
-#define MG_RST_PIN "mg_rst"
-/* except MG_BOOT_DEV, reset-out pin should be assigned */
-#define MG_RSTOUT_PIN "mg_rstout"
-
-/* device attribution */
-/* use mflash as boot device */
-#define MG_BOOT_DEV (1 << 0)
-/* use mflash as storage device */
-#define MG_STORAGE_DEV (1 << 1)
-/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
-#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
-
-/* private driver data */
-struct mg_drv_data {
- /* disk resource */
- u32 use_polling;
-
- /* device attribution */
- u32 dev_attr;
-
- /* internally used */
- void *host;
-};
-
-#endif
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 00a8fa7e366a..695da2a19b4c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -432,6 +432,10 @@ static inline int pud_devmap(pud_t pud)
{
return 0;
}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
#endif
/*
@@ -758,19 +762,11 @@ static inline enum zone_type page_zonenum(const struct page *page)
}
#ifdef CONFIG_ZONE_DEVICE
-void get_zone_device_page(struct page *page);
-void put_zone_device_page(struct page *page);
static inline bool is_zone_device_page(const struct page *page)
{
return page_zonenum(page) == ZONE_DEVICE;
}
#else
-static inline void get_zone_device_page(struct page *page)
-{
-}
-static inline void put_zone_device_page(struct page *page)
-{
-}
static inline bool is_zone_device_page(const struct page *page)
{
return false;
@@ -786,9 +782,6 @@ static inline void get_page(struct page *page)
*/
VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
page_ref_inc(page);
-
- if (unlikely(is_zone_device_page(page)))
- get_zone_device_page(page);
}
static inline void put_page(struct page *page)
@@ -797,9 +790,6 @@ static inline void put_page(struct page *page)
if (put_page_testzero(page))
__put_page(page);
-
- if (unlikely(is_zone_device_page(page)))
- put_zone_device_page(page);
}
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f60f45fe226f..45cdb27791a3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -367,6 +367,11 @@ struct mm_struct {
#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
+ /* Base adresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
+#endif
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/module.h b/include/linux/module.h
index 0297c5cd7cdf..9ad68561d8c2 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -493,6 +493,7 @@ static inline int module_is_live(struct module *mod)
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
+bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
@@ -660,6 +661,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
return false;
}
+static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
+{
+ return false;
+}
+
static inline bool is_module_text_address(unsigned long addr)
{
return false;
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index eebdc63cf6af..79b176eca04a 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -334,11 +334,6 @@ struct mtd_info {
int (*_get_device) (struct mtd_info *mtd);
void (*_put_device) (struct mtd_info *mtd);
- /* Backing device capabilities for this device
- * - provides mmap capabilities
- */
- struct backing_dev_info *backing_dev_info;
-
struct notifier_block reboot_notifier; /* default mode before reboot */
/* ECC status information */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b34097c67848..e1502c55741e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -133,7 +133,6 @@ struct nfs_server {
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
- struct backing_dev_info backing_dev_info;
atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
unsigned int caps; /* server capabilities */
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index f21471f7ee40..0db37158a61d 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -137,9 +137,9 @@ enum nvmefc_fcp_datadir {
* transferred. Should equal payload_length on success.
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
- * NVME_SC_FC_xxx value upon failure. Note: this is NOT a
- * reflection of the NVME CQE completion status. Only the status
- * of the FCP operation at the NVME-FC level.
+ * negative errno value upon failure (ex: -EIO). Note: this is
+ * NOT a reflection of the NVME CQE completion status. Only the
+ * status of the FCP operation at the NVME-FC level.
*/
struct nvmefc_fcp_req {
void *cmdaddr;
@@ -533,9 +533,6 @@ enum {
* rsp as well
*/
NVMET_FCOP_RSP = 4, /* send rsp frame */
- NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */
- NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */
- NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */
};
/**
@@ -572,8 +569,6 @@ enum {
* upon compeletion of the operation. The nvmet-fc layer will also set a
* private pointer for its own use in the done routine.
*
- * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
- *
* Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
* entrypoint.
* @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
@@ -655,6 +650,22 @@ enum {
* on. The transport should pick a cpu to schedule the work
* on.
*/
+ NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2),
+ /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the cmd rcv handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
+ NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3),
+ /* Bit 3: When 0, the LLDD is calling the op done handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the op done handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
};
@@ -725,12 +736,12 @@ struct nvmet_fc_target_port {
* be freed/released.
* Entrypoint is Mandatory.
*
- * @fcp_op: Called to perform a data transfer, transmit a response, or
- * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same
- * LLDD-supplied exchange structure specified in the
- * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received.
- * The op field in the structure shall indicate the operation for
- * the LLDD to perform relative to the io.
+ * @fcp_op: Called to perform a data transfer or transmit a response.
+ * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
+ * exchange structure specified in the nvmet_fc_rcv_fcp_req() call
+ * made when the FCP CMD IU was received. The op field in the
+ * structure shall indicate the operation for the LLDD to perform
+ * relative to the io.
* NVMET_FCOP_READDATA operation: the LLDD is to send the
* payload data (described by sglist) to the host in 1 or
* more FC sequences (preferrably 1). Note: the fc-nvme layer
@@ -752,29 +763,31 @@ struct nvmet_fc_target_port {
* successfully, the LLDD is to update the nvmefc_tgt_fcp_req
* transferred_length field and may subsequently transmit the
* FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
- * The LLDD is to await FCP_CONF reception to confirm the RSP
- * reception by the host. The LLDD may retramsit the FCP_RSP iu
- * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon
- * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req
- * fcp_error field and consider the operation complete..
+ * If FCP_CONF is supported, the LLDD is to await FCP_CONF
+ * reception to confirm the RSP reception by the host. The LLDD
+ * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
+ * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ * consider the operation complete.
* NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
- * (described by rspbuf, rspdma, rsplen). The LLDD is to await
- * FCP_CONF reception to confirm the RSP reception by the host.
- * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME.
- * Upon reception of FCP_CONF, or upon FCP_CONF failure, the
+ * (described by rspbuf, rspdma, rsplen). If FCP_CONF is
+ * supported, the LLDD is to await FCP_CONF reception to confirm
+ * the RSP reception by the host. The LLDD may retramsit the
+ * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
* LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
- * consider the operation complete..
- * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
- * corresponding to the fcp operation. The LLDD shall send
- * ABTS and follow FC exchange abort-multi rules, including
- * ABTS retries and possible logout.
+ * consider the operation complete.
* Upon completing the indicated operation, the LLDD is to set the
* status fields for the operation (tranferred_length and fcp_error
- * status) in the request, then all the "done" routine
- * indicated in the fcp request. Upon return from the "done"
- * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation
- * the fc-nvme layer will not longer reference the fcp request,
- * allowing the LLDD to free/release the fcp request.
+ * status) in the request, then call the "done" routine
+ * indicated in the fcp request. After the operation completes,
+ * regardless of whether the FCP_RSP iu was successfully transmit,
+ * the LLDD-supplied exchange structure must remain valid until the
+ * transport calls the fcp_req_release() callback to return ownership
+ * of the exchange structure back to the LLDD so that it may be used
+ * for another fcp command.
* Note: when calling the done routine for READDATA or WRITEDATA
* operations, the fc-nvme layer may immediate convert, in the same
* thread and before returning to the LLDD, the fcp operation to
@@ -786,6 +799,22 @@ struct nvmet_fc_target_port {
* Returns 0 on success, -<errno> on failure (Ex: -EIO)
* Entrypoint is Mandatory.
*
+ * @fcp_abort: Called by the transport to abort an active command.
+ * The command may be in-between operations (nothing active in LLDD)
+ * or may have an active WRITEDATA operation pending. The LLDD is to
+ * initiate the ABTS process for the command and return from the
+ * callback. The ABTS does not need to be complete on the command.
+ * The fcp_abort callback inherently cannot fail. After the
+ * fcp_abort() callback completes, the transport will wait for any
+ * outstanding operation (if there was one) to complete, then will
+ * call the fcp_req_release() callback to return the command's
+ * exchange context back to the LLDD.
+ *
+ * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
+ * to the LLDD after all operations on the fcp operation are complete.
+ * This may be due to the command completing or upon completion of
+ * abort cleanup.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -820,7 +849,11 @@ struct nvmet_fc_target_template {
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_ls_req *tls_req);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_fcp_req *);
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -848,4 +881,7 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len);
+void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 4b45226bd604..e997c4a49a88 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -16,8 +16,7 @@
*/
/*
- * This file contains definitions relative to FC-NVME r1.11 and a few
- * newer items
+ * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
*/
#ifndef _NVME_FC_H
@@ -47,8 +46,15 @@ struct nvme_fc_cmd_iu {
#define NVME_FC_SIZEOF_ZEROS_RSP 12
+enum {
+ FCNVME_SC_SUCCESS = 0,
+ FCNVME_SC_INVALID_FIELD = 1,
+ FCNVME_SC_INVALID_CONNID = 2,
+};
+
struct nvme_fc_ersp_iu {
- __u8 rsvd0[2];
+ __u8 status_code;
+ __u8 rsvd1;
__be16 iu_len;
__be32 rsn;
__be32 xfrd_len;
@@ -58,7 +64,7 @@ struct nvme_fc_ersp_iu {
};
-/* FC-NVME r1.03/16-119v0 NVME Link Services */
+/* FC-NVME Link Services */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
@@ -68,7 +74,7 @@ enum {
FCNVME_LS_DISCONNECT = 5,
};
-/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */
+/* FC-NVME Link Service Descriptors */
enum {
FCNVME_LSDESC_RSVD = 0x0,
FCNVME_LSDESC_RQST = 0x1,
@@ -92,7 +98,6 @@ static inline __be32 fcnvme_lsdesc_len(size_t sz)
return cpu_to_be32(sz - (2 * sizeof(u32)));
}
-
struct fcnvme_ls_rqst_w0 {
u8 ls_cmd; /* FCNVME_LS_xxx */
u8 zeros[3];
@@ -106,8 +111,53 @@ struct fcnvme_lsdesc_rqst {
__be32 rsvd12;
};
+/* FC-NVME LS RJT reason_code values */
+enum fcnvme_ls_rjt_reason {
+ FCNVME_RJT_RC_NONE = 0,
+ /* no reason - not to be sent */
+
+ FCNVME_RJT_RC_INVAL = 0x01,
+ /* invalid NVMe_LS command code */
+
+ FCNVME_RJT_RC_LOGIC = 0x03,
+ /* logical error */
+
+ FCNVME_RJT_RC_UNAB = 0x09,
+ /* unable to perform command request */
+
+ FCNVME_RJT_RC_UNSUP = 0x0b,
+ /* command not supported */
+
+ FCNVME_RJT_RC_INPROG = 0x0e,
+ /* command already in progress */
+ FCNVME_RJT_RC_INV_ASSOC = 0x40,
+ /* Invalid Association ID*/
+ FCNVME_RJT_RC_INV_CONN = 0x41,
+ /* Invalid Connection ID*/
+
+ FCNVME_RJT_RC_VENDOR = 0xff,
+ /* vendor specific error */
+};
+
+/* FC-NVME LS RJT reason_explanation values */
+enum fcnvme_ls_rjt_explan {
+ FCNVME_RJT_EXP_NONE = 0x00,
+ /* No additional explanation */
+
+ FCNVME_RJT_EXP_OXID_RXID = 0x17,
+ /* invalid OX_ID-RX_ID combination */
+
+ FCNVME_RJT_EXP_INSUF_RES = 0x29,
+ /* insufficient resources */
+
+ FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
+ /* unable to supply requested data */
+
+ FCNVME_RJT_EXP_INV_LEN = 0x2d,
+ /* Invalid payload length */
+};
/* FCNVME_LSDESC_RJT */
struct fcnvme_lsdesc_rjt {
@@ -119,15 +169,15 @@ struct fcnvme_lsdesc_rjt {
* Reject reason and explanaction codes are generic
* to ELs's from LS-3.
*/
- u8 reason_code;
- u8 reason_explanation;
+ u8 reason_code; /* fcnvme_ls_rjt_reason */
+ u8 reason_explanation; /* fcnvme_ls_rjt_explan */
u8 vendor;
__be32 rsvd12;
};
-#define FCNVME_ASSOC_HOSTID_LEN 64
+#define FCNVME_ASSOC_HOSTID_LEN 16
#define FCNVME_ASSOC_HOSTNQN_LEN 256
#define FCNVME_ASSOC_SUBNQN_LEN 256
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 9061780b141f..b625bacf37ef 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -245,6 +245,7 @@ enum {
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
};
struct nvme_lbaf {
@@ -603,6 +604,7 @@ enum nvme_admin_opcode {
nvme_admin_download_fw = 0x11,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
+ nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
nvme_admin_security_recv = 0x82,
@@ -874,6 +876,16 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvme_dbbuf {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __le64 prp2;
+ __u32 rsvd12[6];
+};
+
struct nvme_command {
union {
struct nvme_common_command common;
@@ -893,6 +905,7 @@ struct nvme_command {
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvme_dbbuf dbbuf;
};
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 21e6323de0f3..e5d4225fda35 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -159,6 +159,8 @@ static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
container_of(fwnode, struct device_node, fwnode) : NULL;
}
+#define of_fwnode_handle(node) (&(node)->fwnode)
+
static inline bool of_have_populated_dt(void)
{
return of_root != NULL;
@@ -602,6 +604,8 @@ static inline struct device_node *of_find_node_with_property(
return NULL;
}
+#define of_fwnode_handle(node) NULL
+
static inline bool of_have_populated_dt(void)
{
return false;
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 84943e8057ef..316a19f6b635 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -148,7 +148,7 @@ static inline int page_cache_get_speculative(struct page *page)
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
@@ -186,7 +186,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_add(page, count);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 56939d3f6e53..491b3f5a5f8a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -110,6 +110,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#endif
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 000fdb211c7d..24a635887f28 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -165,6 +165,13 @@ struct hw_perf_event {
struct list_head bp_list;
};
#endif
+ struct { /* amd_iommu */
+ u8 iommu_bank;
+ u8 iommu_cntr;
+ u16 padding;
+ u64 conf;
+ u64 conf1;
+ };
};
/*
* If the event is a per task event, this will point to the task in
@@ -801,6 +808,7 @@ struct perf_output_handle {
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
+ u64 aux_flags;
union {
void *addr;
unsigned long head;
@@ -849,10 +857,11 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
- unsigned long size, bool truncated);
+ unsigned long size);
extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
+extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -1112,6 +1121,7 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
+extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
@@ -1267,8 +1277,8 @@ static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
static inline void
-perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
- bool truncated) { }
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
+ { }
static inline int
perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size) { return -EINVAL; }
@@ -1315,6 +1325,7 @@ static inline int perf_unregister_guest_info_callbacks
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
+static inline void perf_event_namespaces(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 5339ed5bd6f9..9b6abe632587 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -20,6 +20,7 @@
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
+#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
diff --git a/include/linux/poll.h b/include/linux/poll.h
index a46d6755035e..75ffc5729e4c 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -98,64 +98,8 @@ extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
extern u64 select_estimate_accuracy(struct timespec64 *tv);
-
-static inline int poll_schedule(struct poll_wqueues *pwq, int state)
-{
- return poll_schedule_timeout(pwq, state, NULL, 0);
-}
-
-/*
- * Scalable version of the fd_set.
- */
-
-typedef struct {
- unsigned long *in, *out, *ex;
- unsigned long *res_in, *res_out, *res_ex;
-} fd_set_bits;
-
-/*
- * How many longwords for "nr" bits?
- */
-#define FDS_BITPERLONG (8*sizeof(long))
-#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
-#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
-
-/*
- * We do a VERIFY_WRITE here even though we are only reading this time:
- * we'll write to it eventually..
- *
- * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
- */
-static inline
-int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
-{
- nr = FDS_BYTES(nr);
- if (ufdset)
- return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
-
- memset(fdset, 0, nr);
- return 0;
-}
-
-static inline unsigned long __must_check
-set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
-{
- if (ufdset)
- return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
- return 0;
-}
-
-static inline
-void zero_fd_set(unsigned long nr, unsigned long *fdset)
-{
- memset(fdset, 0, FDS_BYTES(nr));
-}
-
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
-extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time);
-extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
- struct timespec64 *end_time);
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec64 *end_time);
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 34c4498b800f..83b22ae9ae12 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -59,23 +59,23 @@ struct posix_clock_operations {
int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
- int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
+ int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts);
- int (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
+ int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts);
int (*clock_settime)(struct posix_clock *pc,
- const struct timespec *ts);
+ const struct timespec64 *ts);
int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
void (*timer_gettime)(struct posix_clock *pc,
- struct k_itimer *kit, struct itimerspec *tsp);
+ struct k_itimer *kit, struct itimerspec64 *tsp);
int (*timer_settime)(struct posix_clock *pc,
struct k_itimer *kit, int flags,
- struct itimerspec *tsp, struct itimerspec *old);
+ struct itimerspec64 *tsp, struct itimerspec64 *old);
/*
* Optional character device methods:
*/
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 64aa189efe21..8c1e43ab14a9 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -87,22 +87,22 @@ struct k_itimer {
};
struct k_clock {
- int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
+ int (*clock_getres) (const clockid_t which_clock, struct timespec64 *tp);
int (*clock_set) (const clockid_t which_clock,
- const struct timespec *tp);
- int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
+ const struct timespec64 *tp);
+ int (*clock_get) (const clockid_t which_clock, struct timespec64 *tp);
int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
int (*timer_create) (struct k_itimer *timer);
int (*nsleep) (const clockid_t which_clock, int flags,
- struct timespec *, struct timespec __user *);
+ struct timespec64 *, struct timespec __user *);
long (*nsleep_restart) (struct restart_block *restart_block);
- int (*timer_set) (struct k_itimer * timr, int flags,
- struct itimerspec * new_setting,
- struct itimerspec * old_setting);
- int (*timer_del) (struct k_itimer * timr);
+ int (*timer_set) (struct k_itimer *timr, int flags,
+ struct itimerspec64 *new_setting,
+ struct itimerspec64 *old_setting);
+ int (*timer_del) (struct k_itimer *timr);
#define TIMER_RETRY 1
- void (*timer_get) (struct k_itimer * timr,
- struct itimerspec * cur_setting);
+ void (*timer_get) (struct k_itimer *timr,
+ struct itimerspec64 *cur_setting);
};
extern struct k_clock clock_posix_cpu;
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
deleted file mode 100644
index 9f0283721cbc..000000000000
--- a/include/linux/power/bq24190_charger.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Platform data for the TI bq24190 battery charger driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _BQ24190_CHARGER_H_
-#define _BQ24190_CHARGER_H_
-
-struct bq24190_platform_data {
- unsigned int gpio_int; /* GPIO pin that's connected to INT# */
-};
-
-#endif
diff --git a/include/linux/property.h b/include/linux/property.h
index 64e3a9c6d95f..2f482616a2f2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -33,6 +33,8 @@ enum dev_dma_attr {
DEV_DMA_COHERENT,
};
+struct fwnode_handle *dev_fwnode(struct device *dev);
+
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval);
@@ -70,6 +72,15 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
int fwnode_property_match_string(struct fwnode_handle *fwnode,
const char *propname, const char *string);
+struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
+ struct fwnode_handle *child);
+
+#define fwnode_for_each_child_node(fwnode, child) \
+ for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
+ child = fwnode_get_next_child_node(fwnode, child))
+
struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child);
@@ -77,9 +88,12 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
+struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
+ const char *childname);
struct fwnode_handle *device_get_named_child_node(struct device *dev,
const char *childname);
+void fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
unsigned int device_get_child_node_count(struct device *dev);
@@ -258,4 +272,16 @@ int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
+struct fwnode_handle *fwnode_graph_get_next_endpoint(
+ struct fwnode_handle *fwnode, struct fwnode_handle *prev);
+struct fwnode_handle *fwnode_graph_get_remote_port_parent(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_graph_get_remote_port(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_graph_get_remote_endpoint(
+ struct fwnode_handle *fwnode);
+
+int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 0da29cae009b..e2233f50f428 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -30,7 +30,9 @@
#include <linux/time.h>
#include <linux/types.h>
-/* types */
+struct module;
+
+/* pstore record types (see fs/pstore/inode.c for filename templates) */
enum pstore_type_id {
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
@@ -45,42 +47,146 @@ enum pstore_type_id {
PSTORE_TYPE_UNKNOWN = 255
};
-struct module;
+struct pstore_info;
+/**
+ * struct pstore_record - details of a pstore record entry
+ * @psi: pstore backend driver information
+ * @type: pstore record type
+ * @id: per-type unique identifier for record
+ * @time: timestamp of the record
+ * @buf: pointer to record contents
+ * @size: size of @buf
+ * @ecc_notice_size:
+ * ECC information for @buf
+ *
+ * Valid for PSTORE_TYPE_DMESG @type:
+ *
+ * @count: Oops count since boot
+ * @reason: kdump reason for notification
+ * @part: position in a multipart record
+ * @compressed: whether the buffer is compressed
+ *
+ */
+struct pstore_record {
+ struct pstore_info *psi;
+ enum pstore_type_id type;
+ u64 id;
+ struct timespec time;
+ char *buf;
+ ssize_t size;
+ ssize_t ecc_notice_size;
+ int count;
+ enum kmsg_dump_reason reason;
+ unsigned int part;
+ bool compressed;
+};
+
+/**
+ * struct pstore_info - backend pstore driver structure
+ *
+ * @owner: module which is repsonsible for this backend driver
+ * @name: name of the backend driver
+ *
+ * @buf_lock: spinlock to serialize access to @buf
+ * @buf: preallocated crash dump buffer
+ * @bufsize: size of @buf available for crash dump writes
+ *
+ * @read_mutex: serializes @open, @read, @close, and @erase callbacks
+ * @flags: bitfield of frontends the backend can accept writes for
+ * @data: backend-private pointer passed back during callbacks
+ *
+ * Callbacks:
+ *
+ * @open:
+ * Notify backend that pstore is starting a full read of backend
+ * records. Followed by one or more @read calls, and a final @close.
+ *
+ * @psi: in: pointer to the struct pstore_info for the backend
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @close:
+ * Notify backend that pstore has finished a full read of backend
+ * records. Always preceded by an @open call and one or more @read
+ * calls.
+ *
+ * @psi: in: pointer to the struct pstore_info for the backend
+ *
+ * Returns 0 on success, and non-zero on error. (Though pstore will
+ * ignore the error.)
+ *
+ * @read:
+ * Read next available backend record. Called after a successful
+ * @open.
+ *
+ * @record:
+ * pointer to record to populate. @buf should be allocated
+ * by the backend and filled. At least @type and @id should
+ * be populated, since these are used when creating pstorefs
+ * file names.
+ *
+ * Returns record size on success, zero when no more records are
+ * available, or negative on error.
+ *
+ * @write:
+ * A newly generated record needs to be written to backend storage.
+ *
+ * @record:
+ * pointer to record metadata. When @type is PSTORE_TYPE_DMESG,
+ * @buf will be pointing to the preallocated @psi.buf, since
+ * memory allocation may be broken during an Oops. Regardless,
+ * @buf must be proccesed or copied before returning. The
+ * backend is also expected to write @id with something that
+ 8 can help identify this record to a future @erase callback.
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @write_user:
+ * Perform a frontend write to a backend record, using a specified
+ * buffer that is coming directly from userspace, instead of the
+ * @record @buf.
+ *
+ * @record: pointer to record metadata.
+ * @buf: pointer to userspace contents to write to backend
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @erase:
+ * Delete a record from backend storage. Different backends
+ * identify records differently, so entire original record is
+ * passed back to assist in identification of what the backend
+ * should remove from storage.
+ *
+ * @record: pointer to record metadata.
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ */
struct pstore_info {
struct module *owner;
char *name;
- spinlock_t buf_lock; /* serialize access to 'buf' */
+
+ spinlock_t buf_lock;
char *buf;
size_t bufsize;
- struct mutex read_mutex; /* serialize open/read/close */
+
+ struct mutex read_mutex;
+
int flags;
+ void *data;
+
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
- ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- int *count, struct timespec *time, char **buf,
- bool *compressed, ssize_t *ecc_notice_size,
- struct pstore_info *psi);
- int (*write)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, int count, bool compressed,
- size_t size, struct pstore_info *psi);
- int (*write_buf)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, const char *buf, bool compressed,
- size_t size, struct pstore_info *psi);
- int (*write_buf_user)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, const char __user *buf,
- bool compressed, size_t size, struct pstore_info *psi);
- int (*erase)(enum pstore_type_id type, u64 id,
- int count, struct timespec time,
- struct pstore_info *psi);
- void *data;
+ ssize_t (*read)(struct pstore_record *record);
+ int (*write)(struct pstore_record *record);
+ int (*write_user)(struct pstore_record *record,
+ const char __user *buf);
+ int (*erase)(struct pstore_record *record);
};
+/* Supported frontends */
#define PSTORE_FLAGS_DMESG (1 << 0)
-#define PSTORE_FLAGS_FRAGILE PSTORE_FLAGS_DMESG
#define PSTORE_FLAGS_CONSOLE (1 << 1)
#define PSTORE_FLAGS_FTRACE (1 << 2)
#define PSTORE_FLAGS_PMSG (1 << 3)
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 2aceeafd6fe5..ffb147185e8d 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -1,14 +1,25 @@
#ifndef __RAS_H__
#define __RAS_H__
+#include <asm/errno.h>
+
#ifdef CONFIG_DEBUG_FS
int ras_userspace_consumers(void);
void ras_debugfs_init(void);
int ras_add_daemon_trace(void);
#else
static inline int ras_userspace_consumers(void) { return 0; }
-static inline void ras_debugfs_init(void) { return; }
+static inline void ras_debugfs_init(void) { }
static inline int ras_add_daemon_trace(void) { return 0; }
#endif
+#ifdef CONFIG_RAS_CEC
+void __init cec_init(void);
+int __init parse_cec_param(char *str);
+int cec_add_elem(u64 pfn);
+#else
+static inline void __init cec_init(void) { }
+static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif
+
+#endif /* __RAS_H__ */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 0023fee4bbbc..b34aa649d204 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -6,17 +6,36 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
+/**
+ * refcount_t - variant of atomic_t specialized for reference counts
+ * @refs: atomic_t counter field
+ *
+ * The counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free bugs.
+ */
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+/**
+ * refcount_set - set a refcount's value
+ * @r: the refcount
+ * @n: value to which the refcount will be set
+ */
static inline void refcount_set(refcount_t *r, unsigned int n)
{
atomic_set(&r->refs, n);
}
+/**
+ * refcount_read - get a refcount's value
+ * @r: the refcount
+ *
+ * Return: the refcount's value
+ */
static inline unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index d4e0a204c118..a1904aadbc45 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -176,6 +176,25 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
/**
+ * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
+ * limiting the depth used from each word.
+ * @sb: Bitmap to allocate from.
+ * @alloc_hint: Hint for where to start searching for a free bit.
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ *
+ * This rather specific operation allows for having multiple users with
+ * different allocation limits. E.g., there can be a high-priority class that
+ * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
+ * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
+ * class can only allocate half of the total bits in the bitmap, preventing it
+ * from starving out the high-priority class.
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
+ unsigned long shallow_depth);
+
+/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
* @sb: Bitmap to check.
*
@@ -326,6 +345,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
+ * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * sbitmap_queue, limiting the depth used from each word, with preemption
+ * already disabled.
+ * @sbq: Bitmap queue to allocate from.
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * See sbitmap_get_shallow().
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth);
+
+/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
* sbitmap_queue.
* @sbq: Bitmap queue to allocate from.
@@ -346,6 +378,29 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
}
/**
+ * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * sbitmap_queue, limiting the depth used from each word.
+ * @sbq: Bitmap queue to allocate from.
+ * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
+ * sbitmap_queue_clear()).
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * See sbitmap_get_shallow().
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int *cpu,
+ unsigned int shallow_depth)
+{
+ int nr;
+
+ *cpu = get_cpu();
+ nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
+ put_cpu();
+ return nr;
+}
+
+/**
* sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
* &struct sbitmap_queue.
* @sbq: Bitmap to free from.
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4cf9a59a4d08..ba080e586dae 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -779,6 +779,8 @@ struct task_struct {
/* PI waiters blocked on a rt_mutex held by this task: */
struct rb_root pi_waiters;
struct rb_node *pi_waiters_leftmost;
+ /* Updated under owner's pi_lock and rq lock */
+ struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */
struct rt_mutex_waiter *pi_blocked_on;
#endif
@@ -1290,10 +1292,10 @@ TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
TASK_PFA_SET(LMK_WAITING, lmk_waiting)
static inline void
-tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags)
+current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
- task->flags &= ~flags;
- task->flags |= orig_flags & flags;
+ current->flags &= ~flags;
+ current->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 3bd668414f61..f93329aba31a 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -18,27 +18,20 @@ static inline int rt_task(struct task_struct *p)
}
#ifdef CONFIG_RT_MUTEXES
-extern int rt_mutex_getprio(struct task_struct *p);
-extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
-extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
+/*
+ * Must hold either p->pi_lock or task_rq(p)->lock.
+ */
+static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
+{
+ return p->pi_top_task;
+}
+extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
return tsk->pi_blocked_on != NULL;
}
#else
-static inline int rt_mutex_getprio(struct task_struct *p)
-{
- return p->normal_prio;
-}
-
-static inline int rt_mutex_get_effective_prio(struct task_struct *task,
- int newprio)
-{
- return newprio;
-}
-
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 81ef53f06534..a098d95b3d84 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3116,7 +3116,7 @@ struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
- return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
+ return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
}
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 8e0cb7a0f836..68123c1fe549 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -120,6 +120,13 @@ extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void);
+extern int __boot_cpu_id;
+
+static inline int get_boot_cpu_id(void)
+{
+ return __boot_cpu_id;
+}
+
#else /* !SMP */
static inline void smp_send_stop(void) { }
@@ -158,6 +165,11 @@ static inline void smp_init(void) { up_late_init(); }
static inline void smp_init(void) { }
#endif
+static inline int get_boot_cpu_id(void)
+{
+ return 0;
+}
+
#endif /* !SMP */
/*
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 00a21166e268..db42746bdfea 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -20,6 +20,8 @@
#define SPLICE_F_MORE (0x04) /* expect more data */
#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
+#define SPLICE_F_ALL (SPLICE_F_MOVE|SPLICE_F_NONBLOCK|SPLICE_F_MORE|SPLICE_F_GIFT)
+
/*
* Passed to the actors
*/
@@ -55,7 +57,6 @@ struct splice_pipe_desc {
struct partial_page *partial; /* pages[] may not be contig */
int nr_pages; /* number of populated pages in map */
unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
- unsigned int flags; /* splice flags */
const struct pipe_buf_operations *ops;/* ops associated with output pipe */
void (*spd_release)(struct splice_pipe_desc *, unsigned int);
};
@@ -82,7 +83,6 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
*/
extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
extern void splice_shrink_spd(struct splice_pipe_desc *);
-extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
extern const struct pipe_buf_operations default_pipe_buf_ops;
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 9fba9dd33544..9375d23a24e7 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -34,9 +34,9 @@ struct t10_pi_tuple {
};
-extern struct blk_integrity_profile t10_pi_type1_crc;
-extern struct blk_integrity_profile t10_pi_type1_ip;
-extern struct blk_integrity_profile t10_pi_type3_crc;
-extern struct blk_integrity_profile t10_pi_type3_ip;
+extern const struct blk_integrity_profile t10_pi_type1_crc;
+extern const struct blk_integrity_profile t10_pi_type1_ip;
+extern const struct blk_integrity_profile t10_pi_type3_crc;
+extern const struct blk_integrity_profile t10_pi_type3_ip;
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 58373875e8ee..d7d3ea637dd0 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -22,6 +22,18 @@
#endif
#include <linux/bitops.h>
+
+/*
+ * For per-arch arch_within_stack_frames() implementations, defined in
+ * asm/thread_info.h.
+ */
+enum {
+ BAD_STACK = -1,
+ NOT_STACK = 0,
+ GOOD_FRAME,
+ GOOD_STACK,
+};
+
#include <asm/thread_info.h>
#ifdef __KERNEL__
@@ -101,6 +113,10 @@ static inline void check_object_size(const void *ptr, unsigned long n,
{ }
#endif /* CONFIG_HARDENED_USERCOPY */
+#ifndef arch_setup_new_exec
+static inline void arch_setup_new_exec(void) { }
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index a04fea19676f..fe01e68bf520 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -117,6 +117,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
+extern unsigned long tick_nohz_get_idle_calls(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index b598cbc7b576..ddc229ff6d1e 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -19,21 +19,6 @@ extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
-static inline int do_sys_settimeofday(const struct timespec *tv,
- const struct timezone *tz)
-{
- struct timespec64 ts64;
-
- if (!tv)
- return do_sys_settimeofday64(NULL, tz);
-
- if (!timespec_valid(tv))
- return -EINVAL;
-
- ts64 = timespec_to_timespec64(*tv);
- return do_sys_settimeofday64(&ts64, tz);
-}
-
/*
* Kernel time accessors
*/
@@ -273,6 +258,11 @@ static inline void timekeeping_clocktai(struct timespec *ts)
*ts = ktime_to_timespec(ktime_get_clocktai());
}
+static inline void timekeeping_clocktai64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_clocktai());
+}
+
/*
* RTC specific
*/
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index f30c187ed785..e0cbfb09e60f 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,8 +2,199 @@
#define __LINUX_UACCESS_H__
#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/kasan-checks.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
+
#include <asm/uaccess.h>
+/*
+ * Architectures should provide two primitives (raw_copy_{to,from}_user())
+ * and get rid of their private instances of copy_{to,from}_user() and
+ * __copy_{to,from}_user{,_inatomic}().
+ *
+ * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
+ * return the amount left to copy. They should assume that access_ok() has
+ * already been checked (and succeeded); they should *not* zero-pad anything.
+ * No KASAN or object size checks either - those belong here.
+ *
+ * Both of these functions should attempt to copy size bytes starting at from
+ * into the area starting at to. They must not fetch or store anything
+ * outside of those areas. Return value must be between 0 (everything
+ * copied successfully) and size (nothing copied).
+ *
+ * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
+ * at to must become equal to the bytes fetched from the corresponding area
+ * starting at from. All data past to + size - N must be left unmodified.
+ *
+ * If copying succeeds, the return value must be 0. If some data cannot be
+ * fetched, it is permitted to copy less than had been fetched; the only
+ * hard requirement is that not storing anything at all (i.e. returning size)
+ * should happen only when nothing could be copied. In other words, you don't
+ * have to squeeze as much as possible - it is allowed, but not necessary.
+ *
+ * For raw_copy_from_user() to always points to kernel memory and no faults
+ * on store should happen. Interpretation of from is affected by set_fs().
+ * For raw_copy_to_user() it's the other way round.
+ *
+ * Both can be inlined - it's up to architectures whether it wants to bother
+ * with that. They should not be used directly; they are used to implement
+ * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
+ * that are used instead. Out of those, __... ones are inlined. Plain
+ * copy_{to,from}_user() might or might not be inlined. If you want them
+ * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
+ *
+ * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
+ * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
+ * at all; their callers absolutely must check the return value.
+ *
+ * Biarch ones should also provide raw_copy_in_user() - similar to the above,
+ * but both source and destination are __user pointers (affected by set_fs()
+ * as usual) and both source and destination can trigger faults.
+ */
+
+static __always_inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+/**
+ * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ * The caller should also make sure he pins the user space address
+ * so that we don't result in page fault and sleep.
+ */
+static __always_inline unsigned long
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+#ifdef INLINE_COPY_FROM_USER
+static inline unsigned long
+_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+#else
+extern unsigned long
+_copy_from_user(void *, const void __user *, unsigned long);
+#endif
+
+#ifdef INLINE_COPY_TO_USER
+static inline unsigned long
+_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+#else
+extern unsigned long
+_copy_to_user(void __user *, const void *, unsigned long);
+#endif
+
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+static __always_inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+
+ might_fault();
+ kasan_check_write(to, n);
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
+ n = _copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+
+static __always_inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ kasan_check_read(from, n);
+ might_fault();
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = _copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+#ifdef CONFIG_COMPAT
+static __always_inline unsigned long __must_check
+__copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ return raw_copy_in_user(to, from, n);
+}
+static __always_inline unsigned long __must_check
+copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
+ n = raw_copy_in_user(to, from, n);
+ return n;
+}
+#endif
+
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
@@ -67,12 +258,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
return __copy_from_user_inatomic(to, from, n);
}
-static inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n)
-{
- return __copy_from_user(to, from, n);
-}
-
#endif /* ARCH_HAS_NOCACHE_UACCESS */
/*
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4616a49a1c2e..f665d2ceac20 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -451,6 +451,7 @@ static inline struct usb_composite_driver *to_cdriver(
* sure doing that won't hurt too much.
*
* One notion for how to handle Wireless USB devices involves:
+ *
* (a) a second gadget here, discovery mechanism TBD, but likely
* needing separate "register/unregister WUSB gadget" calls;
* (b) updates to usb_gadget to include flags "is it wireless",
@@ -503,8 +504,9 @@ struct usb_composite_dev {
/* protects deactivations and delayed_status counts*/
spinlock_t lock;
- unsigned setup_pending:1;
- unsigned os_desc_pending:1;
+ /* public: */
+ unsigned int setup_pending:1;
+ unsigned int os_desc_pending:1;
};
extern int usb_string_id(struct usb_composite_dev *c);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index e4516e9ded0f..fbc22a39e7bc 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -188,7 +188,7 @@ struct usb_ep_caps {
* @caps:The structure describing types and directions supported by endoint.
* @maxpacket:The maximum packet size used on this endpoint. The initial
* value can sometimes be reduced (hardware allowing), according to
- * the endpoint descriptor used to configure the endpoint.
+ * the endpoint descriptor used to configure the endpoint.
* @maxpacket_limit:The maximum packet size value which can be handled by this
* endpoint. It's set once by UDC driver when endpoint is initialized, and
* should not be changed. Should not be confused with maxpacket.
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
new file mode 100644
index 000000000000..80c1cca1f529
--- /dev/null
+++ b/include/linux/usb/xhci-dbgp.h
@@ -0,0 +1,29 @@
+/*
+ * Standalone xHCI debug capability driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_XHCI_DBGP_H
+#define __LINUX_XHCI_DBGP_H
+
+#ifdef CONFIG_EARLY_PRINTK_USB_XDBC
+int __init early_xdbc_parse_parameter(char *s);
+int __init early_xdbc_setup_hardware(void);
+void __init early_xdbc_register_console(void);
+#else
+static inline int __init early_xdbc_setup_hardware(void)
+{
+ return -ENODEV;
+}
+static inline void __init early_xdbc_register_console(void)
+{
+}
+#endif /* CONFIG_EARLY_PRINTK_USB_XDBC */
+#endif /* __LINUX_XHCI_DBGP_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index bde063cefd04..c102ef65cb64 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -608,8 +608,13 @@ static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
return fn(arg);
}
+static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+{
+ return fn(arg);
+}
#else
long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a3c0cbd7c888..d5815794416c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -237,6 +237,7 @@ static inline void inode_attach_wb(struct inode *inode, struct page *page)
static inline void inode_detach_wb(struct inode *inode)
{
if (inode->i_wb) {
+ WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
wb_put(inode->i_wb);
inode->i_wb = NULL;
}
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index 9b4c22a36931..66dbed0c146d 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -100,7 +100,7 @@ struct sockaddr_ib {
*/
static inline bool ib_safe_file_access(struct file *filp)
{
- return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+ return filp->f_cred == current_cred() && !uaccess_kernel();
}
#endif /* _RDMA_IB_H */
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
index ba0aeb980f7e..f0c76f9dc285 100644
--- a/include/scsi/scsi_request.h
+++ b/include/scsi/scsi_request.h
@@ -9,8 +9,10 @@ struct scsi_request {
unsigned char __cmd[BLK_MAX_CDB];
unsigned char *cmd;
unsigned short cmd_len;
+ int result;
unsigned int sense_len;
unsigned int resid_len; /* residual count */
+ int retries;
void *sense;
};
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 3d4df74a96de..d4dfefdee6c1 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -36,8 +36,11 @@
/* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0
#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
/* Portal processing (interrupt) sources */
#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
@@ -165,6 +168,7 @@ static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
#define qm_fd_set_contig_big(fd, len) \
qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+#define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
static inline void qm_fd_clear_fd(struct qm_fd *fd)
{
@@ -639,6 +643,7 @@ struct qm_mcc_initcgr {
#define QM_CGR_WE_MODE 0x0001
#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+#define QMAN_CGR_MODE_FRAME 0x00000001
/* Portal and Frame Queues */
/* Represents a managed portal */
@@ -791,6 +796,84 @@ struct qman_cgr {
#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcr_queryfq_np {
+ u8 verb;
+ u8 result;
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+ u32 fqd_link; /* 24-bit, _res2[24-31] */
+ u16 odp_seq; /* 14-bit, _res3[14-15] */
+ u16 orp_nesn; /* 14-bit, _res4[14-15] */
+ u16 orp_ea_hseq; /* 15-bit, _res5[15] */
+ u16 orp_ea_tseq; /* 15-bit, _res6[15] */
+ u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
+ u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
+ u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
+ u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
+ u8 __reserved2[5];
+ u8 is; /* 1-bit, _res12[1-7] */
+ u16 ics_surp;
+ u32 byte_cnt;
+ u32 frm_cnt; /* 24-bit, _res13[24-31] */
+ u32 __reserved3;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved4;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+ qm_mcr_fqd_link_mask = BIT(24) - 1,
+ qm_mcr_odp_seq_mask = BIT(14) - 1,
+ qm_mcr_orp_nesn_mask = BIT(14) - 1,
+ qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
+ qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
+ qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
+ qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
+ qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
+ qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
+ qm_mcr_is_mask = BIT(1) - 1,
+ qm_mcr_frm_cnt_mask = BIT(24) - 1,
+};
+
+#define qm_mcr_np_get(np, field) \
+ ((np)->field & (qm_mcr_##field##_mask))
+
/* Portal Management */
/**
* qman_p_irqsource_add - add processing sources to be interrupt-driven
@@ -963,6 +1046,25 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);
*/
int qman_oos_fq(struct qman_fq *fq);
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
/**
* qman_enqueue - Enqueue a frame to a frame queue
* @fq: the frame queue object to enqueue to
@@ -994,6 +1096,13 @@ int qman_alloc_fqid_range(u32 *result, u32 count);
*/
int qman_release_fqid(u32 fqid);
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
/* Pool-channel management */
/**
* qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index a88ed13446ff..d0dbe60d8a6d 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -61,7 +61,16 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
TP_ARGS(bh)
);
-DECLARE_EVENT_CLASS(block_rq_with_error,
+/**
+ * block_rq_requeue - place block IO request back on a queue
+ * @q: queue holding operation
+ * @rq: block IO operation request
+ *
+ * The block operation request @rq is being placed back into queue
+ * @q. For some reason the request was not completed and needs to be
+ * put back in the queue.
+ */
+TRACE_EVENT(block_rq_requeue,
TP_PROTO(struct request_queue *q, struct request *rq),
@@ -71,7 +80,6 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, errors )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -80,7 +88,6 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
- __entry->errors = rq->errors;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
__get_str(cmd)[0] = '\0';
@@ -90,46 +97,13 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
(unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->errors)
-);
-
-/**
- * block_rq_abort - abort block operation request
- * @q: queue containing the block operation request
- * @rq: block IO operation request
- *
- * Called immediately after pending block IO operation request @rq in
- * queue @q is aborted. The fields in the operation request @rq
- * can be examined to determine which device and sectors the pending
- * operation would access.
- */
-DEFINE_EVENT(block_rq_with_error, block_rq_abort,
-
- TP_PROTO(struct request_queue *q, struct request *rq),
-
- TP_ARGS(q, rq)
-);
-
-/**
- * block_rq_requeue - place block IO request back on a queue
- * @q: queue holding operation
- * @rq: block IO operation request
- *
- * The block operation request @rq is being placed back into queue
- * @q. For some reason the request was not completed and needs to be
- * put back in the queue.
- */
-DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
-
- TP_PROTO(struct request_queue *q, struct request *rq),
-
- TP_ARGS(q, rq)
+ __entry->nr_sector, 0)
);
/**
* block_rq_complete - block IO operation completed by device driver
- * @q: queue containing the block operation request
* @rq: block operations request
+ * @error: status code
* @nr_bytes: number of completed bytes
*
* The block_rq_complete tracepoint event indicates that some portion
@@ -140,16 +114,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
*/
TRACE_EVENT(block_rq_complete,
- TP_PROTO(struct request_queue *q, struct request *rq,
- unsigned int nr_bytes),
+ TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
- TP_ARGS(q, rq, nr_bytes),
+ TP_ARGS(rq, error, nr_bytes),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, errors )
+ __field( int, error )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -158,7 +131,7 @@ TRACE_EVENT(block_rq_complete,
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
- __entry->errors = rq->errors;
+ __entry->error = error;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
__get_str(cmd)[0] = '\0';
@@ -168,7 +141,7 @@ TRACE_EVENT(block_rq_complete,
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
(unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->errors)
+ __entry->nr_sector, __entry->error)
);
DECLARE_EVENT_CLASS(block_rq,
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9e3ef6c99e4b..ae1409ffe99a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -70,7 +70,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->prio = p->prio;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
__entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),
@@ -147,6 +147,7 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
+ /* XXX SCHED_DEADLINE */
),
TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
@@ -181,7 +182,7 @@ TRACE_EVENT(sched_migrate_task,
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->prio = p->prio;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
@@ -206,7 +207,7 @@ DECLARE_EVENT_CLASS(sched_process_template,
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->prio = p->prio;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
@@ -253,7 +254,7 @@ TRACE_EVENT(sched_process_wait,
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
__entry->pid = pid_nr(pid);
- __entry->prio = current->prio;
+ __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
@@ -413,9 +414,9 @@ DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
*/
TRACE_EVENT(sched_pi_setprio,
- TP_PROTO(struct task_struct *tsk, int newprio),
+ TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
- TP_ARGS(tsk, newprio),
+ TP_ARGS(tsk, pi_task),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -428,7 +429,8 @@ TRACE_EVENT(sched_pi_setprio,
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
- __entry->newprio = newprio;
+ __entry->newprio = pi_task ? pi_task->prio : tsk->prio;
+ /* XXX SCHED_DEADLINE bits missing */
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index bce990f5a35d..31acce9019a6 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -241,21 +241,21 @@ TRACE_EVENT(xen_mmu_set_pud,
(int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
);
-TRACE_EVENT(xen_mmu_set_pgd,
- TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
- TP_ARGS(pgdp, user_pgdp, pgdval),
+TRACE_EVENT(xen_mmu_set_p4d,
+ TP_PROTO(p4d_t *p4dp, p4d_t *user_p4dp, p4d_t p4dval),
+ TP_ARGS(p4dp, user_p4dp, p4dval),
TP_STRUCT__entry(
- __field(pgd_t *, pgdp)
- __field(pgd_t *, user_pgdp)
- __field(pgdval_t, pgdval)
- ),
- TP_fast_assign(__entry->pgdp = pgdp;
- __entry->user_pgdp = user_pgdp;
- __entry->pgdval = pgdval.pgd),
- TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
- __entry->pgdp, __entry->user_pgdp,
- (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
- (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
+ __field(p4d_t *, p4dp)
+ __field(p4d_t *, user_p4dp)
+ __field(p4dval_t, p4dval)
+ ),
+ TP_fast_assign(__entry->p4dp = p4dp;
+ __entry->user_p4dp = user_p4dp;
+ __entry->p4dval = p4d_val(p4dval)),
+ TP_printk("p4dp %p user_p4dp %p p4dval %0*llx (raw %0*llx)",
+ __entry->p4dp, __entry->user_p4dp,
+ (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)),
+ (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval)
);
TRACE_EVENT(xen_mmu_pud_clear,
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index a076cf1a3a23..061185a5eb51 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -194,8 +194,7 @@ __SYSCALL(__NR_quotactl, sys_quotactl)
/* fs/readdir.c */
#define __NR_getdents64 61
-#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
-__SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64)
+__SYSCALL(__NR_getdents64, sys_getdents64)
/* fs/read_write.c */
#define __NR3264_lseek 62
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 11d21fce14d6..b4def5c630e7 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -31,7 +31,7 @@ enum {
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
-#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME
+#define CRYPTO_MAX_NAME 64
/* Netlink message attributes. */
enum crypto_attr_type_t {
@@ -53,9 +53,9 @@ enum crypto_attr_type_t {
};
struct crypto_user_alg {
- char cru_name[CRYPTO_MAX_ALG_NAME];
- char cru_driver_name[CRYPTO_MAX_ALG_NAME];
- char cru_module_name[CRYPTO_MAX_ALG_NAME];
+ char cru_name[CRYPTO_MAX_NAME];
+ char cru_driver_name[CRYPTO_MAX_NAME];
+ char cru_module_name[CRYPTO_MAX_NAME];
__u32 cru_type;
__u32 cru_mask;
__u32 cru_refcnt;
@@ -73,7 +73,7 @@ struct crypto_report_hash {
};
struct crypto_report_cipher {
- char type[CRYPTO_MAX_ALG_NAME];
+ char type[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int min_keysize;
unsigned int max_keysize;
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index cb5d1a519202..9cd1de954c0a 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -42,7 +42,6 @@
#define EM_TILEGX 191 /* Tilera TILE-Gx */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
-#define EM_AVR32 0x18ad /* Atmel AVR32 */
/*
* This is an interim value that we will use until the committee comes
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index b59ee077a596..176b6cb1008d 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -409,6 +409,8 @@ typedef struct elf64_shdr {
#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */
#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
+#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
+#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index fd19f36b3129..c8aec4b9e73b 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -85,6 +85,10 @@ struct nvm_ioctl_create_conf {
};
};
+enum {
+ NVM_TARGET_FACTORY = 1 << 0, /* Init target in factory mode */
+};
+
struct nvm_ioctl_create {
char dev[DISK_NAME_LEN]; /* open-channel SSD device */
char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */
diff --git a/include/uapi/linux/nbd-netlink.h b/include/uapi/linux/nbd-netlink.h
new file mode 100644
index 000000000000..6f7ca3d63a65
--- /dev/null
+++ b/include/uapi/linux/nbd-netlink.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 Facebook. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#ifndef _UAPILINUX_NBD_NETLINK_H
+#define _UAPILINUX_NBD_NETLINK_H
+
+#define NBD_GENL_FAMILY_NAME "nbd"
+#define NBD_GENL_VERSION 0x1
+#define NBD_GENL_MCAST_GROUP_NAME "nbd_mc_group"
+
+/* Configuration policy attributes, used for CONNECT */
+enum {
+ NBD_ATTR_UNSPEC,
+ NBD_ATTR_INDEX,
+ NBD_ATTR_SIZE_BYTES,
+ NBD_ATTR_BLOCK_SIZE_BYTES,
+ NBD_ATTR_TIMEOUT,
+ NBD_ATTR_SERVER_FLAGS,
+ NBD_ATTR_CLIENT_FLAGS,
+ NBD_ATTR_SOCKETS,
+ NBD_ATTR_DEAD_CONN_TIMEOUT,
+ NBD_ATTR_DEVICE_LIST,
+ __NBD_ATTR_MAX,
+};
+#define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1)
+
+/*
+ * This is the format for multiple devices with NBD_ATTR_DEVICE_LIST
+ *
+ * [NBD_ATTR_DEVICE_LIST]
+ * [NBD_DEVICE_ITEM]
+ * [NBD_DEVICE_INDEX]
+ * [NBD_DEVICE_CONNECTED]
+ */
+enum {
+ NBD_DEVICE_ITEM_UNSPEC,
+ NBD_DEVICE_ITEM,
+ __NBD_DEVICE_ITEM_MAX,
+};
+#define NBD_DEVICE_ITEM_MAX (__NBD_DEVICE_ITEM_MAX - 1)
+
+enum {
+ NBD_DEVICE_UNSPEC,
+ NBD_DEVICE_INDEX,
+ NBD_DEVICE_CONNECTED,
+ __NBD_DEVICE_MAX,
+};
+#define NBD_DEVICE_ATTR_MAX (__NBD_DEVICE_MAX - 1)
+
+/*
+ * This is the format for multiple sockets with NBD_ATTR_SOCKETS
+ *
+ * [NBD_ATTR_SOCKETS]
+ * [NBD_SOCK_ITEM]
+ * [NBD_SOCK_FD]
+ * [NBD_SOCK_ITEM]
+ * [NBD_SOCK_FD]
+ */
+enum {
+ NBD_SOCK_ITEM_UNSPEC,
+ NBD_SOCK_ITEM,
+ __NBD_SOCK_ITEM_MAX,
+};
+#define NBD_SOCK_ITEM_MAX (__NBD_SOCK_ITEM_MAX - 1)
+
+enum {
+ NBD_SOCK_UNSPEC,
+ NBD_SOCK_FD,
+ __NBD_SOCK_MAX,
+};
+#define NBD_SOCK_MAX (__NBD_SOCK_MAX - 1)
+
+enum {
+ NBD_CMD_UNSPEC,
+ NBD_CMD_CONNECT,
+ NBD_CMD_DISCONNECT,
+ NBD_CMD_RECONFIGURE,
+ NBD_CMD_LINK_DEAD,
+ NBD_CMD_STATUS,
+ __NBD_CMD_MAX,
+};
+#define NBD_CMD_MAX (__NBD_CMD_MAX - 1)
+
+#endif /* _UAPILINUX_NBD_NETLINK_H */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index c91c642ea900..155e33f81913 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -37,7 +37,7 @@ enum {
NBD_CMD_TRIM = 4
};
-/* values for flags field */
+/* values for flags field, these are server interaction specific. */
#define NBD_FLAG_HAS_FLAGS (1 << 0) /* nbd-server supports flags */
#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
@@ -45,6 +45,10 @@ enum {
#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
#define NBD_FLAG_CAN_MULTI_CONN (1 << 8) /* Server supports multiple connections per export. */
+/* These are client behavior specific flags. */
+#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
+ disconnect. */
+
/* userspace doesn't need the nbd_device structure */
/* These are sent over the network in the request/reply magic fields */
diff --git a/include/uapi/linux/nubus.h b/include/uapi/linux/nubus.h
index 77513d2b5638..ac516064f0ee 100644
--- a/include/uapi/linux/nubus.h
+++ b/include/uapi/linux/nubus.h
@@ -113,13 +113,15 @@ enum nubus_drhw {
NUBUS_DRHW_SIGMA_CLRMAX = 0x0007, /* Sigma Design ColorMax */
NUBUS_DRHW_APPLE_SE30 = 0x0009, /* Apple SE/30 video */
NUBUS_DRHW_APPLE_HRVC = 0x0013, /* Mac II High-Res Video Card */
+ NUBUS_DRHW_APPLE_MVC = 0x0014, /* Mac II Monochrome Video Card */
NUBUS_DRHW_APPLE_PVC = 0x0017, /* Mac II Portrait Video Card */
NUBUS_DRHW_APPLE_RBV1 = 0x0018, /* IIci RBV video */
NUBUS_DRHW_APPLE_MDC = 0x0019, /* Macintosh Display Card */
+ NUBUS_DRHW_APPLE_VSC = 0x0020, /* Duo MiniDock ViSC framebuffer */
NUBUS_DRHW_APPLE_SONORA = 0x0022, /* Sonora built-in video */
+ NUBUS_DRHW_APPLE_JET = 0x0029, /* Jet framebuffer (DuoDock) */
NUBUS_DRHW_APPLE_24AC = 0x002b, /* Mac 24AC Video Card */
NUBUS_DRHW_APPLE_VALKYRIE = 0x002e,
- NUBUS_DRHW_APPLE_JET = 0x0029, /* Jet framebuffer (DuoDock) */
NUBUS_DRHW_SMAC_GFX = 0x0105, /* SuperMac GFX */
NUBUS_DRHW_RASTER_CB264 = 0x013B, /* RasterOps ColorBoard 264 */
NUBUS_DRHW_MICRON_XCEED = 0x0146, /* Micron Exceed color */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index c66a485a24ac..d09a9cd021b1 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -344,7 +344,8 @@ struct perf_event_attr {
use_clockid : 1, /* use @clockid for time fields */
context_switch : 1, /* context switch data */
write_backward : 1, /* Write ring buffer from end to beginning */
- __reserved_1 : 36;
+ namespaces : 1, /* include namespaces data */
+ __reserved_1 : 35;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -610,6 +611,23 @@ struct perf_event_header {
__u16 size;
};
+struct perf_ns_link_info {
+ __u64 dev;
+ __u64 ino;
+};
+
+enum {
+ NET_NS_INDEX = 0,
+ UTS_NS_INDEX = 1,
+ IPC_NS_INDEX = 2,
+ PID_NS_INDEX = 3,
+ USER_NS_INDEX = 4,
+ MNT_NS_INDEX = 5,
+ CGROUP_NS_INDEX = 6,
+
+ NR_NAMESPACES, /* number of available namespaces */
+};
+
enum perf_event_type {
/*
@@ -862,6 +880,18 @@ enum perf_event_type {
*/
PERF_RECORD_SWITCH_CPU_WIDE = 15,
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * u64 nr_namespaces;
+ * { u64 dev, inode; } [nr_namespaces];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_NAMESPACES = 16,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -885,6 +915,7 @@ enum perf_callchain_context {
*/
#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index d538897b8e08..17b10304c393 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -48,17 +48,13 @@
* tv_sec holds the number of seconds before (negative) or after (positive)
* 00:00:00 1st January 1970 UTC.
*
- * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is
- * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
- *
- * Note that if both tv_sec and tv_nsec are non-zero, then the two values must
- * either be both positive or both negative.
+ * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time.
*
* __reserved is held in case we need a yet finer resolution.
*/
struct statx_timestamp {
__s64 tv_sec;
- __s32 tv_nsec;
+ __u32 tv_nsec;
__s32 __reserved;
};
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 519eff362c1c..ae461050661a 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -198,6 +198,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
+#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -212,6 +213,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PCI_STRING "vfio-pci"
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
+#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
@@ -446,6 +448,22 @@ enum {
VFIO_PCI_NUM_IRQS
};
+/*
+ * The vfio-ccw bus driver makes use of the following fixed region and
+ * IRQ index mapping. Unimplemented regions return a size of zero.
+ * Unimplemented IRQ types return a count of zero.
+ */
+
+enum {
+ VFIO_CCW_CONFIG_REGION_INDEX,
+ VFIO_CCW_NUM_REGIONS
+};
+
+enum {
+ VFIO_CCW_IO_IRQ_INDEX,
+ VFIO_CCW_NUM_IRQS
+};
+
/**
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h
new file mode 100644
index 000000000000..34a7f6f9e065
--- /dev/null
+++ b/include/uapi/linux/vfio_ccw.h
@@ -0,0 +1,24 @@
+/*
+ * Interfaces for vfio-ccw
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ */
+
+#ifndef _VFIO_CCW_H_
+#define _VFIO_CCW_H_
+
+#include <linux/types.h>
+
+struct ccw_io_region {
+#define ORB_AREA_SIZE 12
+ __u8 orb_area[ORB_AREA_SIZE];
+#define SCSW_AREA_SIZE 12
+ __u8 scsw_area[SCSW_AREA_SIZE];
+#define IRB_AREA_SIZE 96
+ __u8 irb_area[IRB_AREA_SIZE];
+ __u32 ret_code;
+} __packed;
+
+#endif
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index f9466fa54ba4..3ea90aea5617 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -92,6 +92,6 @@ struct dlfb_data {
/* remove these once align.h patch is taken into kernel */
#define DL_ALIGN_UP(x, a) ALIGN(x, a)
-#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
#endif
diff --git a/include/xen/page.h b/include/xen/page.h
index 9dc46cb8a0fd..064194f6453e 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -38,7 +38,7 @@ struct xen_memory_region {
unsigned long n_pfns;
};
-#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
+#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820_MAX_ENTRIES_ZEROPAGE */
extern __initdata
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS];