From 69b90f1aadd68fb61d7ff182886f11761086c04a Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 3 Jul 2013 12:51:36 +0200 Subject: ARM: at91/DT: at91sam9x5ek: fix USB host property to enable port C Device Tree "num-ports" property of USB host node has to be set to maximum number of ports available. The possibility to activate a particular port is done by specifying the proper gpio configuration for its vbus. This patch fixes the USB host node by configuring the 3 ports available on the product and letting "port A" available for USB gadget usage. Reported-by: Rodolfo Giometti Signed-off-by: Nicolas Ferre Acked-by: Jean-Christophe PLAGNIOL-VILLARD Cc: Bo Shen --- arch/arm/boot/dts/at91sam9x5ek.dtsi | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi index b753855b2058..49e3c45818c2 100644 --- a/arch/arm/boot/dts/at91sam9x5ek.dtsi +++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi @@ -94,8 +94,9 @@ usb0: ohci@00600000 { status = "okay"; - num-ports = <2>; - atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW + num-ports = <3>; + atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */ + &pioD 19 GPIO_ACTIVE_LOW &pioD 20 GPIO_ACTIVE_LOW >; }; -- cgit From 35f8550c8f27968af94ba5235cc611e3b6d86981 Mon Sep 17 00:00:00 2001 From: Kukjin Kim Date: Tue, 30 Jul 2013 11:32:40 +0900 Subject: ARM: SAMSUNG: fix to support for missing cpu specific map_io Since commit 7ed76e08 (ARM: EXYNOS: Fix low level debug support) map_io() is not needed for exynos5440 so need to fix to lookup cpu which using map_io(). Without this, kernel boot log complains 'CPU EXYNOS5440 support not enabled' on exynos5440 and panic(). Signed-off-by: Kukjin Kim --- arch/arm/plat-samsung/init.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c index 3e5c4619caa5..50a3ea0037db 100644 --- a/arch/arm/plat-samsung/init.c +++ b/arch/arm/plat-samsung/init.c @@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode, printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); - if (cpu->map_io == NULL || cpu->init == NULL) { + if (cpu->init == NULL) { printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); panic("Unsupported Samsung CPU"); } - cpu->map_io(); + if (cpu->map_io) + cpu->map_io(); } /* s3c24xx_init_clocks -- cgit From 0d7febe58413884f6428143221971618fbf3a47d Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Mon, 29 Jul 2013 17:06:05 +0100 Subject: xen/arm: missing put_cpu in xen_percpu_init When CONFIG_PREEMPT is enabled, Linux will not be able to boot and warn: [ 4.127825] ------------[ cut here ]------------ [ 4.133376] WARNING: at init/main.c:699 do_one_initcall+0x150/0x158() [ 4.140738] initcall xen_init_events+0x0/0x10c returned with preemption imbalance This is because xen_percpu_init uses get_cpu but doesn't have the corresponding put_cpu. Signed-off-by: Julien Grall Signed-off-by: Stefano Stabellini --- arch/arm/xen/enlighten.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 13609e01f4b7..05db95d010ae 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) per_cpu(xen_vcpu, cpu) = vcpup; enable_percpu_irq(xen_events_irq, 0); + put_cpu(); } static void xen_restart(char str, const char *cmd) -- cgit From 240e99cbd00aa541b572480e3ea7ecb0d480bc79 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Mon, 5 Aug 2013 18:08:41 -0700 Subject: ARM: KVM: Fix 64-bit coprocessor handling The PAR was exported as CRn == 7 and CRm == 0, but in fact the primary coprocessor register number was determined by CRm for 64-bit coprocessor registers as the user space API was modeled after the coprocessor access instructions (see the ARM ARM rev. C - B3-1445). However, just changing the CRn to CRm breaks the sorting check when booting the kernel, because the internal kernel logic always treats CRn as the primary register number, and it makes the table sorting impossible to understand for humans. Alternatively we could change the logic to always have CRn == CRm, but that becomes unclear in the number of ways we do look up of a coprocessor register. We could also have a separate 64-bit table but that feels somewhat over-engineered. Instead, keep CRn the primary representation of the primary coproc. register number in-kernel and always export the primary number as CRm as per the existing user space ABI. Note: The TTBR registers just magically worked because they happened to follow the CRn(0) regs and were considered CRn(0) in the in-kernel representation. Signed-off-by: Christoffer Dall --- arch/arm/kvm/coproc.c | 26 +++++++++++++++++++------- arch/arm/kvm/coproc.h | 3 +++ arch/arm/kvm/coproc_a15.c | 6 +++++- 3 files changed, 27 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4a5199070430..db9cf692d4dd 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu, #define access_pmintenclr pm_fake /* Architected CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + * CRn denotes the primary register number, but is copied to the CRm in the + * user space API for 64-bit register access in line with the terminology used + * in the ARM ARM. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit + * registers preceding 32-bit ones. */ static const struct coproc_reg cp15_regs[] = { /* CSSELR: swapped by interrupt.S. */ @@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = { NULL, reset_unknown, c0_CSSELR }, /* TTBR0/TTBR1: swapped by interrupt.S. */ - { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, - { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, + { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, + { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, /* TTBCR: swapped by interrupt.S. */ { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, @@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = { NULL, reset_unknown, c6_IFAR }, /* PAR swapped by interrupt.S */ - { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, + { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, /* * DC{C,I,CI}SW operations: @@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params) | KVM_REG_ARM_OPC1_MASK)) return false; params->is_64bit = true; - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) + /* CRm to CRn: see cp15_to_index for details */ + params->CRn = ((id & KVM_REG_ARM_CRM_MASK) >> KVM_REG_ARM_CRM_SHIFT); params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) >> KVM_REG_ARM_OPC1_SHIFT); params->Op2 = 0; - params->CRn = 0; + params->CRm = 0; return true; default: return false; @@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg) if (reg->is_64) { val |= KVM_REG_SIZE_U64; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); + /* + * CRn always denotes the primary coproc. reg. nr. for the + * in-kernel representation, but the user space API uses the + * CRm for the encoding, because it is modelled after the + * MRRC/MCRR instructions: see the ARM ARM rev. c page + * B3-1445 + */ + val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); } else { val |= KVM_REG_SIZE_U32; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index b7301d3e4799..0461d5c8d3de 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, return -1; if (i1->CRn != i2->CRn) return i1->CRn - i2->CRn; + if (i1->is_64 != i2->is_64) + return i2->is_64 - i1->is_64; if (i1->CRm != i2->CRm) return i1->CRm - i2->CRm; if (i1->Op1 != i2->Op1) @@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define CRn(_x) .CRn = _x #define CRm(_x) .CRm = _x +#define CRm64(_x) .CRn = _x, .CRm = 0 #define Op1(_x) .Op1 = _x #define Op2(_x) .Op2 = _x #define is64 .is_64 = true diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index 685063a6d0cf..cf93472b9dd6 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c @@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, /* * A15-specific CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + * CRn denotes the primary register number, but is copied to the CRm in the + * user space API for 64-bit register access in line with the terminology used + * in the ARM ARM. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit + * registers preceding 32-bit ones. */ static const struct coproc_reg a15_regs[] = { /* MPIDR: we use VMPIDR for guest access. */ -- cgit From d3840b26614d8ce3db53c98061d9fcb1b9ccb0dd Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 6 Aug 2013 13:50:54 -0700 Subject: ARM: KVM: Fix unaligned unmap_range leak The unmap_range function did not properly cover the case when the start address was not aligned to PMD_SIZE or PUD_SIZE and an entire pte table or pmd table was cleared, causing us to leak memory when incrementing the addr. The fix is to always move onto the next page table entry boundary instead of adding the full size of the VA range covered by the corresponding table level entry. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index ca6bea4859b4..80a83ec4a9ae 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -132,37 +132,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, pmd_t *pmd; pte_t *pte; unsigned long long addr = start, end = start + size; - u64 range; + u64 next; while (addr < end) { pgd = pgdp + pgd_index(addr); pud = pud_offset(pgd, addr); if (pud_none(*pud)) { - addr += PUD_SIZE; + addr = pud_addr_end(addr, end); continue; } pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { - addr += PMD_SIZE; + addr = pmd_addr_end(addr, end); continue; } pte = pte_offset_kernel(pmd, addr); clear_pte_entry(kvm, pte, addr); - range = PAGE_SIZE; + next = addr + PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ if (pte_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); - range = PMD_SIZE; + next = pmd_addr_end(addr, end); if (pmd_empty(pmd)) { clear_pud_entry(kvm, pud, addr); - range = PUD_SIZE; + next = pud_addr_end(addr, end); } } - addr += range; + addr = next; } } -- cgit From 979acd5e18c3e5cb7e3308c699d79553af5af8c6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 6 Aug 2013 13:05:48 +0100 Subject: arm64: KVM: fix 2-level page tables unmapping When using 64kB pages, we only have two levels of page tables, meaning that PGD, PUD and PMD are fused. In this case, trying to refcount PUDs and PMDs independently is a a complete disaster, as they are the same. We manage to get it right for the allocation (stage2_set_pte uses {pmd,pud}_none), but the unmapping path clears both pud and pmd refcounts, which fails spectacularly with 2-level page tables. The fix is to avoid calling clear_pud_entry when both the pmd and pud pages are empty. For this, and instead of introducing another pud_empty function, consolidate both pte_empty and pmd_empty into page_empty (the code is actually identical) and use that to also test the validity of the pud. Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmu.c | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 80a83ec4a9ae..0988d9e04dd4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) return p; } +static bool page_empty(void *ptr) +{ + struct page *ptr_page = virt_to_page(ptr); + return page_count(ptr_page) == 1; +} + static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) { pmd_t *pmd_table = pmd_offset(pud, 0); @@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) put_page(virt_to_page(pmd)); } -static bool pmd_empty(pmd_t *pmd) -{ - struct page *pmd_page = virt_to_page(pmd); - return page_count(pmd_page) == 1; -} - static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) { if (pte_present(*pte)) { @@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) } } -static bool pte_empty(pte_t *pte) -{ - struct page *pte_page = virt_to_page(pte); - return page_count(pte_page) == 1; -} - static void unmap_range(struct kvm *kvm, pgd_t *pgdp, unsigned long long start, u64 size) { @@ -153,10 +147,10 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, next = addr + PAGE_SIZE; /* If we emptied the pte, walk back up the ladder */ - if (pte_empty(pte)) { + if (page_empty(pte)) { clear_pmd_entry(kvm, pmd, addr); next = pmd_addr_end(addr, end); - if (pmd_empty(pmd)) { + if (page_empty(pmd) && !page_empty(pud)) { clear_pud_entry(kvm, pud, addr); next = pud_addr_end(addr, end); } -- cgit From 1bbd80549810637b7381ab0649ba7c7d62f1342a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 7 Jun 2013 11:02:34 +0100 Subject: arm64: KVM: perform save/restore of PAR_EL1 Not saving PAR_EL1 is an unfortunate oversight. If the guest performs an AT* operation and gets scheduled out before reading the result of the translation from PAREL1, it could become corrupted by another guest or the host. Saving this register is made slightly more complicated as KVM also uses it on the permission fault handling path, leading to an ugly "stash and restore" sequence. Fortunately, this is already a slow path so we don't really care. Also, Linux doesn't do any AT* operation, so Linux guests are not impacted by this bug. Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_asm.h | 17 ++++++++++------- arch/arm64/kvm/hyp.S | 10 ++++++++++ arch/arm64/kvm/sys_regs.c | 3 +++ 3 files changed, 23 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index c92de4163eba..b25763bc0ec4 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -42,14 +42,15 @@ #define TPIDR_EL1 18 /* Thread ID, Privileged */ #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ +#define PAR_EL1 21 /* Physical Address Register */ /* 32bit specific registers. Keep them at the end of the range */ -#define DACR32_EL2 21 /* Domain Access Control Register */ -#define IFSR32_EL2 22 /* Instruction Fault Status Register */ -#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ -#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ -#define TEECR32_EL1 25 /* ThumbEE Configuration Register */ -#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ -#define NR_SYS_REGS 27 +#define DACR32_EL2 22 /* Domain Access Control Register */ +#define IFSR32_EL2 23 /* Instruction Fault Status Register */ +#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ +#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ +#define TEECR32_EL1 26 /* ThumbEE Configuration Register */ +#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ +#define NR_SYS_REGS 28 /* 32bit mapping */ #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ @@ -69,6 +70,8 @@ #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ +#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ +#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index ff985e3d8b72..218802f68b20 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -214,6 +214,7 @@ __kvm_hyp_code_start: mrs x21, tpidr_el1 mrs x22, amair_el1 mrs x23, cntkctl_el1 + mrs x24, par_el1 stp x4, x5, [x3] stp x6, x7, [x3, #16] @@ -225,6 +226,7 @@ __kvm_hyp_code_start: stp x18, x19, [x3, #112] stp x20, x21, [x3, #128] stp x22, x23, [x3, #144] + str x24, [x3, #160] .endm .macro restore_sysregs @@ -243,6 +245,7 @@ __kvm_hyp_code_start: ldp x18, x19, [x3, #112] ldp x20, x21, [x3, #128] ldp x22, x23, [x3, #144] + ldr x24, [x3, #160] msr vmpidr_el2, x4 msr csselr_el1, x5 @@ -264,6 +267,7 @@ __kvm_hyp_code_start: msr tpidr_el1, x21 msr amair_el1, x22 msr cntkctl_el1, x23 + msr par_el1, x24 .endm .macro skip_32bit_state tmp, target @@ -753,6 +757,10 @@ el1_trap: */ tbnz x1, #7, 1f // S1PTW is set + /* Preserve PAR_EL1 */ + mrs x3, par_el1 + push x3, xzr + /* * Permission fault, HPFAR_EL2 is invalid. * Resolve the IPA the hard way using the guest VA. @@ -766,6 +774,8 @@ el1_trap: /* Read result */ mrs x3, par_el1 + pop x0, xzr // Restore PAR_EL1 from the stack + msr par_el1, x0 tbnz x3, #0, 3f // Bail out if we failed the translation ubfx x3, x3, #12, #36 // Extract IPA lsl x3, x3, #4 // and present it like HPFAR diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 94923609753b..02e9d09e1d80 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* FAR_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), NULL, reset_unknown, FAR_EL1 }, + /* PAR_EL1 */ + { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), + NULL, reset_unknown, PAR_EL1 }, /* PMINTENSET_EL1 */ { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), -- cgit From f142e5eeb724cfbedd203b32b3b542d78dbe2545 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 11 Jun 2013 18:05:25 +0100 Subject: arm64: KVM: add missing dsb before invalidating Stage-2 TLBs When performing a Stage-2 TLB invalidation, it is necessary to make sure the write to the page tables is observable by all CPUs. For this purpose, add dsb instructions to __kvm_tlb_flush_vmid_ipa and __kvm_flush_vm_context before doing the TLB invalidation itself. Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 218802f68b20..1ac0bbbdddb2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -604,6 +604,8 @@ END(__kvm_vcpu_run) // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); ENTRY(__kvm_tlb_flush_vmid_ipa) + dsb ishst + kern_hyp_va x0 ldr x2, [x0, #KVM_VTTBR] msr vttbr_el2, x2 @@ -625,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) ENDPROC(__kvm_tlb_flush_vmid_ipa) ENTRY(__kvm_flush_vm_context) + dsb ishst tlbi alle1is ic ialluis dsb sy -- cgit From 6c8c0c4dc0e98ee2191211d66e9f876e95787073 Mon Sep 17 00:00:00 2001 From: Chen Gang Date: Mon, 22 Jul 2013 04:40:38 +0100 Subject: arm64: KVM: use 'int' instead of 'u32' for variable 'target' in kvm_host.h. 'target' will be set to '-1' in kvm_arch_vcpu_init(), and it need check 'target' whether less than zero or not in kvm_vcpu_initialized(). So need define target as 'int' instead of 'u32', just like ARM has done. The related warning: arch/arm64/kvm/../../../arch/arm/kvm/arm.c:497:2: warning: comparison of unsigned expression >= 0 is always true [-Wtype-limits] Signed-off-by: Chen Gang [Marc: reformated the Subject line to fit the series] Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 644d73956864..0859a4ddd1e7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -129,7 +129,7 @@ struct kvm_vcpu_arch { struct kvm_mmu_memory_cache mmu_page_cache; /* Target CPU and feature flags */ - u32 target; + int target; DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); /* Detect first run of a vcpu */ -- cgit From d55e37bb0f51316e552376ddc0a3fff34ca7108b Mon Sep 17 00:00:00 2001 From: Daniel Drake Date: Fri, 9 Aug 2013 18:14:20 -0400 Subject: x86: Don't clear olpc_ofw_header when sentinel is detected OpenFirmware wasn't quite following the protocol described in boot.txt and the kernel has detected this through use of the sentinel value in boot_params. OFW does zero out almost all of the stuff that it should do, but not the sentinel. This causes the kernel to clear olpc_ofw_header, which breaks x86 OLPC support. OpenFirmware has now been fixed. However, it would be nice if we could maintain Linux compatibility with old firmware versions. To do that, we just have to avoid zeroing out olpc_ofw_header. OFW does not write to any other parts of the header that are being zapped by the sentinel-detection code, and all users of olpc_ofw_header are somewhat protected through checking for the OLPC_OFW_SIG magic value before using it. So this should not cause any problems for anyone. Signed-off-by: Daniel Drake Link: http://lkml.kernel.org/r/20130809221420.618E6FAB03@dev.laptop.org Acked-by: Yinghai Lu Signed-off-by: H. Peter Anvin Cc: # v3.9+ --- arch/x86/include/asm/bootparam_utils.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 653668d140f9..4a8cb8d7cbd5 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h @@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params) */ if (boot_params->sentinel) { /* fields in boot_params are left uninitialized, clear them */ - memset(&boot_params->olpc_ofw_header, 0, + memset(&boot_params->ext_ramdisk_image, 0, (char *)&boot_params->efi_info - - (char *)&boot_params->olpc_ofw_header); + (char *)&boot_params->ext_ramdisk_image); memset(&boot_params->kbd_status, 0, (char *)&boot_params->hdr - (char *)&boot_params->kbd_status); -- cgit From 2184a60de26b94bc5a88de3e5a960ef9ff54ba5a Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Mon, 29 Jul 2013 20:46:04 -0700 Subject: KVM: ARM: Squash len warning The 'len' variable was declared an unsigned and then checked for less than 0, which results in warnings on some compilers. Since len is assigned an int, make it an int. Signed-off-by: Christoffer Dall --- arch/arm/kvm/mmio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index b8e06b7a2833..0c25d9487d53 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c @@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_exit_mmio *mmio) { - unsigned long rt, len; + unsigned long rt; + int len; bool is_write, sign_extend; if (kvm_vcpu_dabt_isextabt(vcpu)) { -- cgit From 8c6b79bb1211d91fb31bcbc2a1eea8d6963d3ad9 Mon Sep 17 00:00:00 2001 From: Torsten Kaiser Date: Tue, 23 Jul 2013 19:40:49 +0200 Subject: x86, microcode, AMD: Make cpu_has_amd_erratum() use the correct struct cpuinfo_x86 cpu_has_amd_erratum() is buggy, because it uses the per-cpu cpu_info before it is filled by smp_store_boot_cpu_info() / smp_store_cpu_info(). If early microcode loading is enabled its collect_cpu_info_amd_early() will fill ->x86 and so the fallback to boot_cpu_data is not used. But ->x86_vendor was not filled and is still X86_VENDOR_INTEL resulting in no errata fixes getting applied and my system hangs on boot. Using cpu_info in cpu_has_amd_erratum() is wrong anyway: its only caller init_amd() will have a struct cpuinfo_x86 as parameter and the set_cpu_bug() that is controlled by cpu_has_amd_erratum() also only uses that struct. So pass the struct cpuinfo_x86 from init_amd() to cpu_has_amd_erratum() and the broken fallback can be dropped. [ Boris: Drop WARN_ON() since we're called only from init_amd() ] Signed-off-by: Torsten Kaiser Signed-off-by: Borislav Petkov --- arch/x86/kernel/cpu/amd.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f654ecefea5b..08a089043ccf 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) static const int amd_erratum_383[]; static const int amd_erratum_400[]; -static bool cpu_has_amd_erratum(const int *erratum); +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); static void init_amd(struct cpuinfo_x86 *c) { @@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c) value &= ~(1ULL << 24); wrmsrl_safe(MSR_AMD64_BU_CFG2, value); - if (cpu_has_amd_erratum(amd_erratum_383)) + if (cpu_has_amd_erratum(c, amd_erratum_383)) set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); } - if (cpu_has_amd_erratum(amd_erratum_400)) + if (cpu_has_amd_erratum(c, amd_erratum_400)) set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); @@ -878,23 +878,13 @@ static const int amd_erratum_400[] = static const int amd_erratum_383[] = AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); -static bool cpu_has_amd_erratum(const int *erratum) + +static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { - struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); int osvw_id = *erratum++; u32 range; u32 ms; - /* - * If called early enough that current_cpu_data hasn't been initialized - * yet, fall back to boot_cpu_data. - */ - if (cpu->x86 == 0) - cpu = &boot_cpu_data; - - if (cpu->x86_vendor != X86_VENDOR_AMD) - return false; - if (osvw_id >= 0 && osvw_id < 65536 && cpu_has(cpu, X86_FEATURE_OSVW)) { u64 osvw_len; -- cgit From 84516098b58e05821780dc0b89abcee434b4dca5 Mon Sep 17 00:00:00 2001 From: Torsten Kaiser Date: Thu, 8 Aug 2013 19:38:18 +0200 Subject: x86, microcode, AMD: Fix early microcode loading load_microcode_amd() (and the helper it is using) should not have an cpu parameter. The microcode loading does not depend on the CPU wrt the patches loaded since they will end up in a global list for all CPUs anyway. The change from cpu to x86family in load_microcode_amd() now allows to drop the code messing with cpu_data(cpu) from collect_cpu_info_amd_early(), which is wrong anyway because at that point the per-cpu cpu_info is not yet setup (These values would later be overwritten by smp_store_boot_cpu_info() / smp_store_cpu_info()). Fold the rest of collect_cpu_info_amd_early() into load_ucode_amd_ap(), because its only used at one place and without the cpuinfo_x86 accesses it was not much left. Signed-off-by: Torsten Kaiser [ Fengguang: build fix ] Signed-off-by: Fengguang Wu [ Boris: adapt it to current tree. ] Signed-off-by: Borislav Petkov --- arch/x86/include/asm/microcode_amd.h | 2 +- arch/x86/kernel/microcode_amd.c | 27 +++++++++++++-------------- arch/x86/kernel/microcode_amd_early.c | 27 +++++++++++++-------------- 3 files changed, 27 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 50e5c58ced23..4c019179a57d 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h @@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, extern int __apply_microcode_amd(struct microcode_amd *mc_amd); extern int apply_microcode_amd(int cpu); -extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); +extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); #ifdef CONFIG_MICROCODE_AMD_EARLY #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 7a0adb7ee433..7123b5df479d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) return 0; } -static unsigned int verify_patch_size(int cpu, u32 patch_size, +static unsigned int verify_patch_size(u8 family, u32 patch_size, unsigned int size) { - struct cpuinfo_x86 *c = &cpu_data(cpu); u32 max_size; #define F1XH_MPB_MAX_SIZE 2048 @@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, #define F15H_MPB_MAX_SIZE 4096 #define F16H_MPB_MAX_SIZE 3458 - switch (c->x86) { + switch (family) { case 0x14: max_size = F14H_MPB_MAX_SIZE; break; @@ -277,9 +276,8 @@ static void cleanup(void) * driver cannot continue functioning normally. In such cases, we tear * down everything we've used up so far and exit. */ -static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) +static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) { - struct cpuinfo_x86 *c = &cpu_data(cpu); struct microcode_header_amd *mc_hdr; struct ucode_patch *patch; unsigned int patch_size, crnt_size, ret; @@ -299,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) /* check if patch is for the current family */ proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); - if (proc_fam != c->x86) + if (proc_fam != family) return crnt_size; if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { @@ -308,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) return crnt_size; } - ret = verify_patch_size(cpu, patch_size, leftover); + ret = verify_patch_size(family, patch_size, leftover); if (!ret) { pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); return crnt_size; @@ -339,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) return crnt_size; } -static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) +static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, + size_t size) { enum ucode_state ret = UCODE_ERROR; unsigned int leftover; @@ -362,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz } while (leftover) { - crnt_size = verify_and_add_patch(cpu, fw, leftover); + crnt_size = verify_and_add_patch(family, fw, leftover); if (crnt_size < 0) return ret; @@ -373,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz return UCODE_OK; } -enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) +enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) { enum ucode_state ret; /* free old equiv table */ free_equiv_cpu_table(); - ret = __load_microcode_amd(cpu, data, size); + ret = __load_microcode_amd(family, data, size); if (ret != UCODE_OK) cleanup(); #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) /* save BSP's matching patch for early load */ - if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { - struct ucode_patch *p = find_patch(cpu); + if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { + struct ucode_patch *p = find_patch(smp_processor_id()); if (p) { memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), @@ -441,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, goto fw_release; } - ret = load_microcode_amd(cpu, fw->data, fw->size); + ret = load_microcode_amd(c->x86, fw->data, fw->size); fw_release: release_firmware(fw); diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c index 1d14ffee5749..6073104ccaa3 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/microcode_amd_early.c @@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg) uci->cpu_sig.sig = cpuid_eax(0x00000001); } #else -static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, - struct ucode_cpu_info *uci) +void load_ucode_amd_ap(void) { + unsigned int cpu = smp_processor_id(); + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u32 rev, eax; rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); eax = cpuid_eax(0x00000001); - uci->cpu_sig.sig = eax; uci->cpu_sig.rev = rev; - c->microcode = rev; - c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); -} - -void load_ucode_amd_ap(void) -{ - unsigned int cpu = smp_processor_id(); - - collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); + uci->cpu_sig.sig = eax; if (cpu && !ucode_loaded) { void *ucode; @@ -265,8 +257,10 @@ void load_ucode_amd_ap(void) return; ucode = (void *)(initrd_start + ucode_offset); - if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) return; + ucode_loaded = true; } @@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void) { enum ucode_state ret; void *ucode; + u32 eax; + #ifdef CONFIG_X86_32 unsigned int bsp = boot_cpu_data.cpu_index; struct ucode_cpu_info *uci = ucode_cpu_info + bsp; @@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void) return 0; ucode = (void *)(initrd_start + ucode_offset); - ret = load_microcode_amd(0, ucode, ucode_size); + eax = cpuid_eax(0x00000001); + eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); + + ret = load_microcode_amd(eax, ucode, ucode_size); if (ret != UCODE_OK) return -EINVAL; -- cgit From b524f389702a908aa0e045dff2b79465b1084b88 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Wed, 7 Aug 2013 18:29:44 +0200 Subject: ARM: at91: add missing uart clocks DT entries Add clocks to clock lookup table for uart DT entries. Signed-off-by: Boris BREZILLON Tested-by: Douglas Gilbert Signed-off-by: Nicolas Ferre --- arch/arm/mach-at91/at91sam9x5.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index 2abee6626aac..916e5a142917 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c @@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = { CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), + CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk), + CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk), CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), -- cgit From a57603ca2871ee0773b00839c1ea35c4a2d3eeb0 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Fri, 28 Jun 2013 10:39:15 +0200 Subject: ARM: at91/DT: fix at91sam9n12ek memory node Signed-off-by: Nicolas Ferre Cc: stable # 3.5+ --- arch/arm/boot/dts/at91sam9n12ek.dts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index d59b70c6a6a0..3d77dbe406f4 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts @@ -14,11 +14,11 @@ compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; chosen { - bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; }; memory { - reg = <0x20000000 0x10000000>; + reg = <0x20000000 0x8000000>; }; clocks { -- cgit From 386d20ab9edb3751c96aaed842bef06716a92a92 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 6 Aug 2013 20:06:15 +0300 Subject: ARM: OMAP2: fix musb usage for n8x0 Commit b7e2e75a8c ("usb: gadget: drop unused USB_GADGET_MUSB_HDRC") dropped a config symbol that was unused by the musb core, but it turns out that board support code still had references to it. As the core now handles both dual role and host-only modes, we can just pass MUSB_OTG as mode from board files. Signed-off-by: Daniel Mack Tested-by: Aaro Koskinen Signed-off-by: Aaro Koskinen Signed-off-by: Tony Lindgren Signed-off-by: Olof Johansson --- arch/arm/mach-omap2/board-n8x0.c | 4 ---- arch/arm/mach-omap2/usb-musb.c | 5 +---- 2 files changed, 1 insertion(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index f6eeb87e4e95..827d15009a86 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c @@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = { }; static struct musb_hdrc_platform_data tusb_data = { -#ifdef CONFIG_USB_GADGET_MUSB_HDRC .mode = MUSB_OTG, -#else - .mode = MUSB_HOST, -#endif .set_power = tusb_set_power, .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ .power = 100, /* Max 100 mA VBUS for host mode */ diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 8c4de2708cf2..bc897231bd10 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = { }; static struct musb_hdrc_platform_data musb_plat = { -#ifdef CONFIG_USB_GADGET_MUSB_HDRC .mode = MUSB_OTG, -#else - .mode = MUSB_HOST, -#endif + /* .clock is set dynamically */ .config = &musb_config, -- cgit From cc05fcc4b095a5f1da1d6b7cc2b73f90e38835d4 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Tue, 6 Aug 2013 20:06:16 +0300 Subject: ARM: OMAP: rx51: change musb mode to OTG Peripheral-only mode got broken in v3.11-rc1 because of unknown reasons. Change the mode to OTG, in practice that should work equally well even when/if the regression gets fixed. Note that the peripheral-only regression is a separate patch, this change is still correct as the role is handled by hardware. Signed-off-by: Aaro Koskinen [tony@atomide.com: updated comments] Signed-off-by: Tony Lindgren Signed-off-by: Olof Johansson --- arch/arm/mach-omap2/board-rx51.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index d2ea68ea678a..7735105561d8 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c @@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = { static struct omap_musb_board_data musb_board_data = { .interface_type = MUSB_INTERFACE_ULPI, - .mode = MUSB_PERIPHERAL, + .mode = MUSB_OTG, .power = 0, }; -- cgit From acd36357edc08649e85ff15dc4ed62353c912eff Mon Sep 17 00:00:00 2001 From: Sekhar Nori Date: Fri, 16 Aug 2013 14:43:48 +0530 Subject: ARM: davinci: nand: specify ecc strength Starting with kernel v3.5, it is mandatory to specify ECC strength when using hardware ECC. Without this, kernel panics with a warning of the sort: Driver must set ecc.strength when using hardware ECC ------------[ cut here ]------------ kernel BUG at drivers/mtd/nand/nand_base.c:3519! Fix this by specifying ECC strength for the boards which were missing this. Reported-by: Holger Freyther Cc: #v3.5+ Signed-off-by: Sekhar Nori Signed-off-by: Kevin Hilman --- arch/arm/mach-davinci/board-dm355-leopard.c | 1 + arch/arm/mach-davinci/board-dm644x-evm.c | 1 + arch/arm/mach-davinci/board-dm646x-evm.c | 1 + arch/arm/mach-davinci/board-neuros-osd2.c | 1 + 4 files changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index dff4ddc5ef81..139e42da25f0 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c @@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW_SYNDROME, + .ecc_bits = 4, .bbt_options = NAND_BBT_USE_FLASH, }; diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a33686a6fbb2..fa4bfaf952d8 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c @@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = { .parts = davinci_evm_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, .timing = &davinci_evm_nandflash_timing, }; diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index fbb8e5ab1dc1..0c005e876cac 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c @@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = { .parts = davinci_nand_partitions, .nr_parts = ARRAY_SIZE(davinci_nand_partitions), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .options = 0, }; diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 2bc112adf565..808233b60e3d 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c @@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = { .parts = davinci_ntosd2_nandflash_partition, .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), .ecc_mode = NAND_ECC_HW, + .ecc_bits = 1, .bbt_options = NAND_BBT_USE_FLASH, }; -- cgit From 7cb3be0a27805c625ff7cce20c53c926d9483243 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Fri, 16 Aug 2013 12:55:56 +0100 Subject: ARM: 7819/1: fiq: Cast the first argument of flush_icache_range() Commit 2ba85e7af4 (ARM: Fix FIQ code on VIVT CPUs) causes the following build warning: arch/arm/kernel/fiq.c:92:3: warning: passing argument 1 of 'cpu_cache.coherent_kern_range' makes integer from pointer without a cast [enabled by default] Cast it as '(unsigned long)base' to avoid the warning. Signed-off-by: Fabio Estevam Signed-off-by: Russell King --- arch/arm/kernel/fiq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index fc7920288a3d..918875d96d5d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c @@ -89,7 +89,8 @@ void set_fiq_handler(void *start, unsigned int length) memcpy(base + offset, start, length); if (!cache_is_vipt_nonaliasing()) - flush_icache_range(base + offset, offset + length); + flush_icache_range((unsigned long)base + offset, offset + + length); flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); } -- cgit From 4f9b4fb7a2091eec339413a460b1665758401828 Mon Sep 17 00:00:00 2001 From: Vijaya Kumar K Date: Wed, 14 Aug 2013 13:28:28 +0100 Subject: ARM: 7815/1: kexec: offline non panic CPUs on Kdump panic In case of normal kexec kernel load, all cpu's are offlined before calling machine_kexec().But in case crash panic cpus are relaxed in machine_crash_nonpanic_core() SMP function but not offlined. When crash kernel is loaded with kexec and on panic trigger machine_kexec() checks for number of cpus online. If more than one cpu is online machine_kexec() fails to load with below error kexec: error: multiple CPUs still online In machine_crash_nonpanic_core() SMP function, offline CPU before cpu_relax Signed-off-by: Vijaya Kumar K Acked-by: Stephen Warren Acked-by: Will Deacon Signed-off-by: Russell King --- arch/arm/kernel/machine_kexec.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index d7c82df69243..57221e349a7c 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -82,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused) crash_save_cpu(®s, smp_processor_id()); flush_cache_all(); + set_cpu_online(smp_processor_id(), false); atomic_dec(&waiting_for_crash_ipi); while (1) cpu_relax(); -- cgit From ac124504ecf6b20a2457d873d0728a8b991a5b0c Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 14 Aug 2013 22:36:32 +0100 Subject: ARM: 7816/1: CONFIG_KUSER_HELPERS: fix help text Commit f6f91b0d9fd9 ("ARM: allow kuser helpers to be removed from the vector page") introduced some help text for the CONFIG_KUSER_HELPERS option which is rather contradictory. Let's fix that, and improve it a little. Cc: Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index db5c2cab8fda..cd2c88e7a8f7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -809,15 +809,18 @@ config KUSER_HELPERS the CPU type fitted to the system. This permits binaries to be run on ARMv4 through to ARMv7 without modification. + See Documentation/arm/kernel_user_helpers.txt for details. + However, the fixed address nature of these helpers can be used by ROP (return orientated programming) authors when creating exploits. If all of the binaries and libraries which run on your platform are built specifically for your platform, and make no use of - these helpers, then you can turn this option off. However, - when such an binary or library is run, it will receive a SIGILL - signal, which will terminate the program. + these helpers, then you can turn this option off to hinder + such exploits. However, in that case, if a binary or library + relying on those helpers is run, it will receive a SIGILL signal, + which will terminate the program. Say N here only if you are absolutely certain that you do not need these helpers; otherwise, the safe option is to say Y. -- cgit From 868f6fea8fa63f09acbfa93256d0d2abdcabff79 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2013 11:47:39 +0100 Subject: arm64: perf: fix array out of bounds access in armpmu_map_hw_event() This is a port of d9f966357b14 ("ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event()") to arm64, which fixes an oops in the arm64 perf backend found as a result of Vince's fuzzing tool. Cc: Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/perf_event.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..2012646fb46f 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -EINVAL; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } -- cgit From ee7538a008a45050c8f706d38b600f55953169f9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2013 11:47:40 +0100 Subject: arm64: perf: fix event validation for software group leaders This is a port of c95eb3184ea1 ("ARM: 7809/1: perf: fix event validation for software group leaders") to arm64, which fixes a panic in the arm64 perf backend found as a result of Vince's fuzzing tool. Cc: Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/kernel/perf_event.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 2012646fb46f..12e6ccb88691 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -322,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events, struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; + if (is_software_event(event)) + return 1; + if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) return 1; -- cgit From 3bc38cbceb85881a8eb789ee1aa56678038b1909 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Fri, 16 Aug 2013 15:42:55 +0100 Subject: x86/xen: do not identity map UNUSABLE regions in the machine E820 If there are UNUSABLE regions in the machine memory map, dom0 will attempt to map them 1:1 which is not permitted by Xen and the kernel will crash. There isn't anything interesting in the UNUSABLE region that the dom0 kernel needs access to so we can avoid making the 1:1 mapping and treat it as RAM. We only do this for dom0, as that is where tboot case shows up. A PV domU could have an UNUSABLE region in its pseudo-physical map and would need to be handled in another patch. This fixes a boot failure on hosts with tboot. tboot marks a region in the e820 map as unusable and the dom0 kernel would attempt to map this region and Xen does not permit unusable regions to be mapped by guests. (XEN) 0000000000000000 - 0000000000060000 (usable) (XEN) 0000000000060000 - 0000000000068000 (reserved) (XEN) 0000000000068000 - 000000000009e000 (usable) (XEN) 0000000000100000 - 0000000000800000 (usable) (XEN) 0000000000800000 - 0000000000972000 (unusable) tboot marked this region as unusable. (XEN) 0000000000972000 - 00000000cf200000 (usable) (XEN) 00000000cf200000 - 00000000cf38f000 (reserved) (XEN) 00000000cf38f000 - 00000000cf3ce000 (ACPI data) (XEN) 00000000cf3ce000 - 00000000d0000000 (reserved) (XEN) 00000000e0000000 - 00000000f0000000 (reserved) (XEN) 00000000fe000000 - 0000000100000000 (reserved) (XEN) 0000000100000000 - 0000000630000000 (usable) Signed-off-by: David Vrabel [v1: Altered the patch and description with domU's with UNUSABLE regions] Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/setup.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'arch') diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 94eac5c85cdc..0a9fb7a0b452 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) e820_add_region(start, end - start, type); } +void xen_ignore_unusable(struct e820entry *list, size_t map_size) +{ + struct e820entry *entry; + unsigned int i; + + for (i = 0, entry = list; i < map_size; i++, entry++) { + if (entry->type == E820_UNUSABLE) + entry->type = E820_RAM; + } +} + /** * machine_specific_memory_setup - Hook for machine specific memory setup. **/ @@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) } BUG_ON(rc); + /* + * Xen won't allow a 1:1 mapping to be created to UNUSABLE + * regions, so if we're using the machine memory map leave the + * region as RAM as it is in the pseudo-physical map. + * + * UNUSABLE regions in domUs are not handled and will need + * a patch in the future. + */ + if (xen_initial_domain()) + xen_ignore_unusable(map, memmap.nr_entries); + /* Make sure the Xen-supplied memory map is well-ordered. */ sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); -- cgit From fc78d343fa74514f6fd117b5ef4cd27e4ac30236 Mon Sep 17 00:00:00 2001 From: Chuck Anderson Date: Tue, 6 Aug 2013 15:12:19 -0700 Subject: xen/smp: initialize IPI vectors before marking CPU online An older PVHVM guest (v3.0 based) crashed during vCPU hot-plug with: kernel BUG at drivers/xen/events.c:1328! RCU has detected that a CPU has not entered a quiescent state within the grace period. It needs to send the CPU a reschedule IPI if it is not offline. rcu_implicit_offline_qs() does this check: /* * If the CPU is offline, it is in a quiescent state. We can * trust its state not to change because interrupts are disabled. */ if (cpu_is_offline(rdp->cpu)) { rdp->offline_fqs++; return 1; } Else the CPU is online. Send it a reschedule IPI. The CPU is in the middle of being hot-plugged and has been marked online (!cpu_is_offline()). See start_secondary(): set_cpu_online(smp_processor_id(), true); ... per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; start_secondary() then waits for the CPU bringing up the hot-plugged CPU to mark it as active: /* * Wait until the cpu which brought this one up marked it * online before enabling interrupts. If we don't do that then * we can end up waking up the softirq thread before this cpu * reached the active state, which makes the scheduler unhappy * and schedule the softirq thread on the wrong cpu. This is * only observable with forced threaded interrupts, but in * theory it could also happen w/o them. It's just way harder * to achieve. */ while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) cpu_relax(); /* enable local interrupts */ local_irq_enable(); The CPU being hot-plugged will be marked active after it has been fully initialized by the CPU managing the hot-plug. In the Xen PVHVM case xen_smp_intr_init() is called to set up the hot-plugged vCPU's XEN_RESCHEDULE_VECTOR. The hot-plugging CPU is marked online, not marked active and does not have its IPI vectors set up. rcu_implicit_offline_qs() sees the hot-plugging cpu is !cpu_is_offline() and tries to send it a reschedule IPI: This will lead to: kernel BUG at drivers/xen/events.c:1328! xen_send_IPI_one() xen_smp_send_reschedule() rcu_implicit_offline_qs() rcu_implicit_dynticks_qs() force_qs_rnp() force_quiescent_state() __rcu_process_callbacks() rcu_process_callbacks() __do_softirq() call_softirq() do_softirq() irq_exit() xen_evtchn_do_upcall() because xen_send_IPI_one() will attempt to use an uninitialized IRQ for the XEN_RESCHEDULE_VECTOR. There is at least one other place that has caused the same crash: xen_smp_send_reschedule() wake_up_idle_cpu() add_timer_on() clocksource_watchdog() call_timer_fn() run_timer_softirq() __do_softirq() call_softirq() do_softirq() irq_exit() xen_evtchn_do_upcall() xen_hvm_callback_vector() clocksource_watchdog() uses cpu_online_mask to pick the next CPU to handle a watchdog timer: /* * Cycle through CPUs to check if the CPUs stay synchronized * to each other. */ next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); if (next_cpu >= nr_cpu_ids) next_cpu = cpumask_first(cpu_online_mask); watchdog_timer.expires += WATCHDOG_INTERVAL; add_timer_on(&watchdog_timer, next_cpu); This resulted in an attempt to send an IPI to a hot-plugging CPU that had not initialized its reschedule vector. One option would be to make the RCU code check to not check for CPU offline but for CPU active. As becoming active is done after a CPU is online (in older kernels). But Srivatsa pointed out that "the cpu_active vs cpu_online ordering has been completely reworked - in the online path, cpu_active is set *before* cpu_online, and also, in the cpu offline path, the cpu_active bit is reset in the CPU_DYING notification instead of CPU_DOWN_PREPARE." Drilling in this the bring-up path: "[brought up CPU].. send out a CPU_STARTING notification, and in response to that, the scheduler sets the CPU in the cpu_active_mask. Again, this mask is better left to the scheduler alone, since it has the intelligence to use it judiciously." The conclusion was that: " 1. At the IPI sender side: It is incorrect to send an IPI to an offline CPU (cpu not present in the cpu_online_mask). There are numerous places where we check this and warn/complain. 2. At the IPI receiver side: It is incorrect to let the world know of our presence (by setting ourselves in global bitmasks) until our initialization steps are complete to such an extent that we can handle the consequences (such as receiving interrupts without crashing the sender etc.) " (from Srivatsa) As the native code enables the interrupts at some point we need to be able to service them. In other words a CPU must have valid IPI vectors if it has been marked online. It doesn't need to handle the IPI (interrupts may be disabled) but needs to have valid IPI vectors because another CPU may find it in cpu_online_mask and attempt to send it an IPI. This patch will change the order of the Xen vCPU bring-up functions so that Xen vectors have been set up before start_secondary() is called. It also will not continue to bring up a Xen vCPU if xen_smp_intr_init() fails to initialize it. Orabug 13823853 Signed-off-by Chuck Anderson Acked-by: Srivatsa S. Bhat Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/smp.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 37fbe71795c1..34ed6edf85d0 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -686,8 +686,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) { int rc; - rc = native_cpu_up(cpu, tidle); - WARN_ON (xen_smp_intr_init(cpu)); + /* + * xen_smp_intr_init() needs to run before native_cpu_up() + * so that IPI vectors are set up on the booting CPU before + * it is marked online in native_cpu_up(). + */ + rc = xen_smp_intr_init(cpu); + WARN_ON(rc); + if (!rc) + rc = native_cpu_up(cpu, tidle); return rc; } -- cgit From c26d421987d5595ef1758d42dbce02308d10e17e Mon Sep 17 00:00:00 2001 From: David Daney Date: Mon, 19 Aug 2013 12:10:34 -0700 Subject: MIPS: Handle OCTEON BBIT instructions in FPU emulator. The branch emulation needs to handle the OCTEON BBIT instructions, otherwise we get SIGILL instead of emulation. Signed-off-by: David Daney Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/5726/ Signed-off-by: Ralf Baechle --- arch/mips/math-emu/cp1emu.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) (limited to 'arch') diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index e773659ccf9f..46048d24328c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; break; +#ifdef CONFIG_CPU_CAVIUM_OCTEON + case lwc2_op: /* This is bbit0 on Octeon */ + if ((regs->regs[insn.i_format.rs] & (1ull<cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case ldc2_op: /* This is bbit032 on Octeon */ + if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case swc2_op: /* This is bbit1 on Octeon */ + if (regs->regs[insn.i_format.rs] & (1ull<cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; + case sdc2_op: /* This is bbit132 on Octeon */ + if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) + *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + 8; + return 1; +#endif case cop0_op: case cop1_op: case cop2_op: -- cgit From 30ca2226bea6f0db519dc53381b893cd66cb5b66 Mon Sep 17 00:00:00 2001 From: Stephen Warren Date: Tue, 20 Aug 2013 14:00:13 -0600 Subject: ARM: tegra: always enable USB VBUS regulators This fixes a regression exposed during the merge window by commit 9f310de "ARM: tegra: fix VBUS regulator GPIO polarity in DT"; namely that USB VBUS doesn't get turned on, so USB devices are not detected. This affects the internal USB port on TrimSlice (i.e. the USB->SATA bridge, to which the SSD is connected) and the external port(s) on Seaboard/ Springbank and Whistler. The Tegra DT as written in v3.11 allows two paths to enable USB VBUS: 1) Via the legacy DT binding for the USB controller; it can directly acquire a VBUS GPIO and activate it. 2) Via a regulator for VBUS, which is referenced by the new DT binding for the USB controller. Those two methods both use the same GPIO, and hence whichever of the USB controller and regulator gets probed first ends up owning the GPIO. In practice, the USB driver only supports path (1) above, since the patches to support the new USB binding are not present until v3.12:-( In practice, the regulator ends up being probed first and owning the GPIO. Since nothing enables the regulator (the USB driver code is not yet present), the regulator ends up being turned off. This originally caused no problem, because the polarity in the regulator definition was incorrect, so attempting to turn off the regulator actually turned it on, and everything worked:-( However, when testing the new USB driver code in v3.12, I noticed the incorrect polarity and fixed it in commit 9f310de "ARM: tegra: fix VBUS regulator GPIO polarity in DT". In the context of v3.11, this patch then caused the USB VBUS to actually turn off, which broke USB ports with VBUS control. I got this patch included in v3.11-rc1 since it fixed a bug in device tree (incorrect polarity specification), and hence was suitable to be included early in the rc series. I evidently did not test the patch at all, or correctly, in the context of v3.11, and hence did not notice the issue that I have explained above:-( Fix this by making the USB VBUS regulators always enabled. This way, if the regulator owns the GPIO, it will always be turned on, even if there is no USB driver code to request the regulator be turned on. Even ignoring this bug, this is a reasonable way to configure the HW anyway. If this patch is applied to v3.11, it will cause a couple pretty trivial conflicts in tegra20-{trimslice,seaboard}.dts when creating v3.12, since the context right above the added lines changed in patches destined for v3.12. Reported-by: Kyle McMartin Signed-off-by: Stephen Warren Signed-off-by: Olof Johansson --- arch/arm/boot/dts/tegra20-seaboard.dts | 2 ++ arch/arm/boot/dts/tegra20-trimslice.dts | 2 ++ arch/arm/boot/dts/tegra20-whistler.dts | 4 ++++ 3 files changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index 365760b33a26..40e6fb280333 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts @@ -830,6 +830,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&gpio 24 0>; /* PD0 */ + regulator-always-on; + regulator-boot-on; }; }; diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index ed4b901b0227..37c93d3c4812 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts @@ -412,6 +412,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&gpio 170 0>; /* PV2 */ + regulator-always-on; + regulator-boot-on; }; }; diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index ab67c94db280..a3d0ebad78a1 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts @@ -588,6 +588,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ + regulator-always-on; + regulator-boot-on; }; vbus3_reg: regulator@3 { @@ -598,6 +600,8 @@ regulator-max-microvolt = <5000000>; enable-active-high; gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ + regulator-always-on; + regulator-boot-on; }; }; -- cgit From 5ea80f76a56605a190a7ea16846c82aa63dbd0aa Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 22 Aug 2013 09:13:06 -0700 Subject: Revert "x86 get_unmapped_area(): use proper mmap base for bottom-up direction" This reverts commit df54d6fa54275ce59660453e29d1228c2b45a826. The commit isn't necessarily wrong, but because it recalculates the random mmap_base every time, it seems to confuse user memory allocators that expect contiguous mmap allocations even when the mmap address isn't specified. In particular, the MATLAB Java runtime seems to be unhappy. See https://bugzilla.kernel.org/show_bug.cgi?id=60774 So we'll want to apply the random offset only once, and Radu has a patch for that. Revert this older commit in order to apply the other one. Reported-by: Jeff Shorey Cc: Radu Caragea Cc: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/sys_x86_64.c | 2 +- arch/x86/mm/mmap.c | 2 +- include/linux/sched.h | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index 48f8375e4c6b..dbded5aedb81 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = mmap_legacy_base(); + *begin = TASK_UNMAPPED_BASE; *end = TASK_SIZE; } } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index f63778cb2363..62c29a5bfe26 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -98,7 +98,7 @@ static unsigned long mmap_base(void) * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 * does, but not when emulating X86_32 */ -unsigned long mmap_legacy_base(void) +static unsigned long mmap_legacy_base(void) { if (mmap_is_ia32()) return TASK_UNMAPPED_BASE; diff --git a/include/linux/sched.h b/include/linux/sched.h index e9995eb5985c..078066daffd4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -314,7 +314,6 @@ struct nsproxy; struct user_namespace; #ifdef CONFIG_MMU -extern unsigned long mmap_legacy_base(void); extern void arch_pick_mmap_layout(struct mm_struct *mm); extern unsigned long arch_get_unmapped_area(struct file *, unsigned long, unsigned long, -- cgit From 41aacc1eea645c99edbe8fbcf78a97dc9b862adc Mon Sep 17 00:00:00 2001 From: Radu Caragea Date: Wed, 21 Aug 2013 20:55:59 +0300 Subject: x86 get_unmapped_area: Access mmap_legacy_base through mm_struct member This is the updated version of df54d6fa5427 ("x86 get_unmapped_area(): use proper mmap base for bottom-up direction") that only randomizes the mmap base address once. Signed-off-by: Radu Caragea Reported-and-tested-by: Jeff Shorey Cc: Andrew Morton Cc: Michel Lespinasse Cc: Oleg Nesterov Cc: Rik van Riel Cc: Ingo Molnar Cc: Adrian Sendroiu Cc: Greg KH Cc: Kamal Mostafa Signed-off-by: Linus Torvalds --- arch/x86/kernel/sys_x86_64.c | 2 +- arch/x86/mm/mmap.c | 6 ++++-- include/linux/mm_types.h | 1 + 3 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..30277e27431a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, *begin = new_begin; } } else { - *begin = TASK_UNMAPPED_BASE; + *begin = current->mm->mmap_legacy_base; *end = TASK_SIZE; } } diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 62c29a5bfe26..25e7e1372bb2 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void) */ void arch_pick_mmap_layout(struct mm_struct *mm) { + mm->mmap_legacy_base = mmap_legacy_base(); + mm->mmap_base = mmap_base(); + if (mmap_is_legacy()) { - mm->mmap_base = mmap_legacy_base(); + mm->mmap_base = mm->mmap_legacy_base; mm->get_unmapped_area = arch_get_unmapped_area; } else { - mm->mmap_base = mmap_base(); mm->get_unmapped_area = arch_get_unmapped_area_topdown; } } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index fb425aa16c01..faf4b7c1ad12 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -332,6 +332,7 @@ struct mm_struct { unsigned long pgoff, unsigned long flags); #endif unsigned long mmap_base; /* base of mmap area */ + unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */ unsigned long task_size; /* size of task vm space */ unsigned long highest_vm_end; /* highest vma end address */ pgd_t * pgd; -- cgit From b0f55f2a1a295c364be012e82dbab079a2454006 Mon Sep 17 00:00:00 2001 From: Joern Rennecke Date: Sat, 24 Aug 2013 12:03:06 +0530 Subject: ARC: [lib] strchr breakage in Big-endian configuration For a search buffer, 2 byte aligned, strchr() was returning pointer outside of buffer (buf - 1) ------------->8---------------- // Input buffer (default 4 byte aigned) char *buffer = "1AA_"; // actual search start (to mimick 2 byte alignment) char *current_line = &(buffer[2]); // Character to search for char c = 'A'; char *c_pos = strchr(current_line, c); printf("%s\n", c_pos) --> 'AA_' as oppose to 'A_' ------------->8---------------- Reported-by: Anton Kolesov Debugged-by: Anton Kolesov Cc: # [3.9 and 3.10] Cc: Noam Camus Signed-off-by: Joern Rennecke Signed-off-by: Vineet Gupta Signed-off-by: Linus Torvalds --- arch/arc/lib/strchr-700.S | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S index 99c10475d477..9c548c7cf001 100644 --- a/arch/arc/lib/strchr-700.S +++ b/arch/arc/lib/strchr-700.S @@ -39,9 +39,18 @@ ARC_ENTRY strchr ld.a r2,[r0,4] sub r12,r6,r7 bic r12,r12,r6 +#ifdef __LITTLE_ENDIAN__ and r7,r12,r4 breq r7,0,.Loop ; For speed, we want this branch to be unaligned. b .Lfound_char ; Likewise this one. +#else + and r12,r12,r4 + breq r12,0,.Loop ; For speed, we want this branch to be unaligned. + lsr_s r12,r12,7 + bic r2,r7,r6 + b.d .Lfound_char_b + and_s r2,r2,r12 +#endif ; /* We require this code address to be unaligned for speed... */ .Laligned: ld_s r2,[r0] @@ -95,6 +104,7 @@ ARC_ENTRY strchr lsr r7,r7,7 bic r2,r7,r6 +.Lfound_char_b: norm r2,r2 sub_s r0,r0,4 asr_s r2,r2,3 -- cgit