diff options
38 files changed, 367 insertions, 184 deletions
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt index de8e10a94103..0d8d23581c44 100644 --- a/Documentation/sound/alsa/HD-Audio-Models.txt +++ b/Documentation/sound/alsa/HD-Audio-Models.txt @@ -139,6 +139,7 @@ ALC883/888 acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc) acer-aspire Acer Aspire 9810 acer-aspire-4930g Acer Aspire 4930G + acer-aspire-6530g Acer Aspire 6530G acer-aspire-8930g Acer Aspire 8930G medion Medion Laptops medion-md2 Medion MD2 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 4829f96585b1..00a31deaa96e 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c @@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, address, cause > 0); + fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); up_read(&mm->mmap_sem); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0455557a2899..6fdcbb709827 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -208,7 +208,7 @@ good_area: * than endlessly redo the fault. */ survive: - fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); + fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 62d4abbaa654..b61d86d3debf 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c @@ -133,7 +133,7 @@ good_area: * fault. */ survive: - fault = handle_mm_fault(mm, vma, address, writeaccess); + fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index c4c76db90f9c..f925115e3250 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c @@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs, * the fault. */ - fault = handle_mm_fault(mm, vma, address, writeaccess & 1); + fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 05093d41d98e..30f5d100a81c 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, ear0, write); + fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 23088bed111e..19261a99e623 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re * sure we exit gracefully rather than endlessly redo the * fault. */ - fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0); + fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { /* * We ran out of memory, or some other thing happened diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 4a71df4c1b30..7274b47f4c22 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -196,7 +196,7 @@ survive: */ addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - fault = handle_mm_fault(mm, vma, addr, write); + fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index f493f03231d5..d0e35cf99fc6 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -155,7 +155,7 @@ good_area: */ survive: - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); #ifdef DEBUG printk("handle_mm_fault returns %d\n",fault); #endif diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index 5e67cd1fab40..956607a63f4c 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c @@ -232,7 +232,7 @@ good_area: * the fault. */ survive: - fault = handle_mm_fault(mm, vma, address, is_write); + fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index 55767ad9f00e..6751ce9ede9e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -102,7 +102,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 33cf25025dac..a62e1e138bc1 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -258,7 +258,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 92c7fa4ecc3f..bfb6dd6ab380 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -202,7 +202,7 @@ good_area: * fault. */ - fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0); + fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { /* * We hit a shared mapping outside of the file, or some diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 5beffc8f481e..830bef0a1131 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -302,7 +302,7 @@ good_area: * the fault. */ survive: - ret = handle_mm_fault(mm, vma, address, is_write); + ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(ret & VM_FAULT_ERROR)) { if (ret & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/powerpc/platforms/cell/spu_fault.c b/arch/powerpc/platforms/cell/spu_fault.c index 95d8dadf2d87..d06ba87f1a19 100644 --- a/arch/powerpc/platforms/cell/spu_fault.c +++ b/arch/powerpc/platforms/cell/spu_fault.c @@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea, } ret = 0; - *flt = handle_mm_fault(mm, vma, ea, is_write); + *flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(*flt & VM_FAULT_ERROR)) { if (*flt & VM_FAULT_OOM) { ret = -ENOMEM; diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index b0b84c35b0ad..cb5d59eab0ee 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c @@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address, } survive: - fault = handle_mm_fault(mm, vma, address, write_access); + fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 220a152c836c..74eb26bf1970 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -352,7 +352,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { up_read(&mm->mmap_sem); diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 2c50f80fc332..cc8ddbdf3d7a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -133,7 +133,7 @@ good_area: * the fault. */ survive: - fault = handle_mm_fault(mm, vma, address, writeaccess); + fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index 7876997ba19a..fcbb6e135cef 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c @@ -187,7 +187,7 @@ good_area: * the fault. */ survive: - fault = handle_mm_fault(mm, vma, address, writeaccess); + fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 12e447fc8542..a5e30c642ee3 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c @@ -241,7 +241,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -484,7 +484,7 @@ good_area: if(!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } - switch (handle_mm_fault(mm, vma, address, write)) { + switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) { case VM_FAULT_SIGBUS: case VM_FAULT_OOM: goto do_sigbus; diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 4ab8993b0863..e5620b27c8bf 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -398,7 +398,7 @@ good_area: goto bad_area; } - fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE)); + fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 7384d8accfe7..637c6505dc00 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c @@ -65,7 +65,7 @@ good_area: do { int fault; - fault = handle_mm_fault(mm, vma, address, is_write); + fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { goto out_of_memory; diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index caba99601703..eb0566e83319 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S @@ -845,7 +845,7 @@ ENTRY(aesni_cbc_enc) */ ENTRY(aesni_cbc_dec) cmp $16, LEN - jb .Lcbc_dec_ret + jb .Lcbc_dec_just_ret mov 480(KEYP), KLEN add $240, KEYP movups (IVP), IV @@ -891,6 +891,7 @@ ENTRY(aesni_cbc_dec) add $16, OUTP cmp $16, LEN jge .Lcbc_dec_loop1 - movups IV, (IVP) .Lcbc_dec_ret: + movups IV, (IVP) +.Lcbc_dec_just_ret: ret diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 4e663398f77f..c580c5ec1cad 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -198,6 +198,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { @@ -221,6 +222,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { @@ -266,6 +268,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { @@ -289,6 +292,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, blkcipher_walk_init(&walk, dst, src, nbytes); err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { diff --git a/arch/x86/crypto/fpu.c b/arch/x86/crypto/fpu.c index 5f9781a3815f..daef6cd2b45d 100644 --- a/arch/x86/crypto/fpu.c +++ b/arch/x86/crypto/fpu.c @@ -48,7 +48,7 @@ static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in, struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, - .flags = desc_in->flags, + .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); @@ -67,7 +67,7 @@ static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in, struct blkcipher_desc desc = { .tfm = child, .info = desc_in->info, - .flags = desc_in->flags, + .flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, }; kernel_fpu_begin(); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c403526d5d15..78a5fff857be 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1113,7 +1113,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault: */ - fault = handle_mm_fault(mm, vma, address, write); + fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, error_code, address, fault); diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index bdd860d93f72..bc0733359a88 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -106,7 +106,7 @@ good_area: * the fault. */ survive: - fault = handle_mm_fault(mm, vma, address, is_write); + fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 87f92c39b5f0..a9952b1236b0 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -18,9 +18,22 @@ #include <linux/percpu.h> #include <linux/smp.h> #include <asm/byteorder.h> +#include <asm/processor.h> #include <asm/i387.h> #include "padlock.h" +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + /* Control word. */ struct cword { unsigned int __attribute__ ((__packed__)) @@ -172,73 +185,111 @@ static inline void padlock_store_cword(struct cword *cword) * should be used only inside the irq_ts_save/restore() context */ -static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key, - struct cword *control_word) +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) { asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(1)); + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, + u8 *iv, struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; } -static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword) +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) { - u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1]; + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); - memcpy(tmp, in, AES_BLOCK_SIZE); - padlock_xcrypt(tmp, out, key, cword); + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); } -static inline void aes_crypt(const u8 *in, u8 *out, u32 *key, - struct cword *cword) +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, + struct cword *cword, int count) { - /* padlock_xcrypt requires at least two blocks of data. */ - if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) & - (PAGE_SIZE - 1)))) { - aes_crypt_copy(in, out, key, cword); + /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); return; } - padlock_xcrypt(in, out, key, cword); + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, + u8 *iv, struct cword *cword, int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); } static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, u32 count) { - if (count == 1) { - aes_crypt(input, output, key, control_word); + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); return; } - asm volatile ("test $1, %%cl;" - "je 1f;" -#ifndef CONFIG_X86_64 - "lea -1(%%ecx), %%eax;" - "mov $1, %%ecx;" -#else - "lea -1(%%rcx), %%rax;" - "mov $1, %%rcx;" -#endif - ".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */ -#ifndef CONFIG_X86_64 - "mov %%eax, %%ecx;" -#else - "mov %%rax, %%rcx;" -#endif - "1:" - ".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ : "+S"(input), "+D"(output) - : "d"(control_word), "b"(key), "c"(count) - : "ax"); + : "d"(control_word), "b"(key), "c"(count - initial)); } static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, void *control_word, u32 count) { - /* rep xcryptcbc */ - asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ : "+S" (input), "+D" (output), "+a" (iv) - : "d" (control_word), "b" (key), "c" (count)); + : "d" (control_word), "b" (key), "c" (count-initial)); return iv; } @@ -249,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->E, &ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -261,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) padlock_reset_key(&ctx->cword.encrypt); ts_state = irq_ts_save(); - aes_crypt(in, out, ctx->D, &ctx->cword.decrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); irq_ts_restore(ts_state); padlock_store_cword(&ctx->cword.encrypt); } @@ -454,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = { static int __init padlock_init(void) { int ret; + struct cpuinfo_x86 *c = &cpu_data(0); if (!cpu_has_xcrypt) { printk(KERN_NOTICE PFX "VIA PadLock not detected.\n"); @@ -476,6 +528,12 @@ static int __init padlock_init(void) printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n"); + if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) { + ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS; + cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS; + printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n"); + } + out: return ret; diff --git a/include/linux/mm.h b/include/linux/mm.h index cf260d848eb9..d006e93d5c93 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); #ifdef CONFIG_MMU extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access); + unsigned long address, unsigned int flags); #else static inline int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, - int write_access) + unsigned int flags) { /* should never happen if there's no MMU */ BUG(); diff --git a/ipc/util.h b/ipc/util.h index ab3ebf2621b9..764b51a37a6a 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -10,6 +10,7 @@ #ifndef _IPC_UTIL_H #define _IPC_UTIL_H +#include <linux/unistd.h> #include <linux/err.h> #define SEQ_MULTIPLIER (IPCMNI) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6b0c2d8a2129..23067ab1a73c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -472,7 +472,7 @@ config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE - select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390 + select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 select KALLSYMS select KALLSYMS_ALL diff --git a/lib/dma-debug.c b/lib/dma-debug.c index ad65fc0317d9..3b93129a968c 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, */ matches += 1; match_lvl = 0; - entry->size == ref->size ? ++match_lvl : match_lvl; - entry->type == ref->type ? ++match_lvl : match_lvl; - entry->direction == ref->direction ? ++match_lvl : match_lvl; + entry->size == ref->size ? ++match_lvl : 0; + entry->type == ref->type ? ++match_lvl : 0; + entry->direction == ref->direction ? ++match_lvl : 0; + entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; - if (match_lvl == 3) { + if (match_lvl == 4) { /* perfect-fit - return the result */ return entry; } else if (match_lvl > last_lvl) { @@ -873,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size) "[addr=%p] [size=%llu]\n", addr, size); } -static void check_sync(struct device *dev, dma_addr_t addr, - u64 size, u64 offset, int direction, bool to_cpu) +static void check_sync(struct device *dev, + struct dma_debug_entry *ref, + bool to_cpu) { - struct dma_debug_entry ref = { - .dev = dev, - .dev_addr = addr, - .size = size, - .direction = direction, - }; struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; - bucket = get_hash_bucket(&ref, &flags); + bucket = get_hash_bucket(ref, &flags); - entry = hash_bucket_find(bucket, &ref); + entry = hash_bucket_find(bucket, ref); if (!entry) { err_printk(dev, NULL, "DMA-API: device driver tries " "to sync DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes]\n", - (unsigned long long)addr, size); + (unsigned long long)ref->dev_addr, ref->size); goto out; } - if ((offset + size) > entry->size) { + if (ref->size > entry->size) { err_printk(dev, entry, "DMA-API: device driver syncs" " DMA memory outside allocated range " "[device address=0x%016llx] " - "[allocation size=%llu bytes] [sync offset=%llu] " - "[sync size=%llu]\n", entry->dev_addr, entry->size, - offset, size); + "[allocation size=%llu bytes] " + "[sync offset+size=%llu]\n", + entry->dev_addr, entry->size, + ref->size); } - if (direction != entry->direction) { + if (ref->direction != entry->direction) { err_printk(dev, entry, "DMA-API: device driver syncs " "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", - (unsigned long long)addr, entry->size, + (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], - dir2name[direction]); + dir2name[ref->direction]); } if (entry->direction == DMA_BIDIRECTIONAL) goto out; if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && - !(direction == DMA_TO_DEVICE)) + !(ref->direction == DMA_TO_DEVICE)) err_printk(dev, entry, "DMA-API: device driver syncs " "device read-only DMA memory for cpu " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", - (unsigned long long)addr, entry->size, + (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], - dir2name[direction]); + dir2name[ref->direction]); if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && - !(direction == DMA_FROM_DEVICE)) + !(ref->direction == DMA_FROM_DEVICE)) err_printk(dev, entry, "DMA-API: device driver syncs " "device write-only DMA memory to device " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", - (unsigned long long)addr, entry->size, + (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], - dir2name[direction]); + dir2name[ref->direction]); out: put_hash_bucket(bucket, &flags); @@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, } EXPORT_SYMBOL(debug_dma_map_sg); -static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s) +static int get_nr_mapped_entries(struct device *dev, + struct dma_debug_entry *ref) { - struct dma_debug_entry *entry, ref; + struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; int mapped_ents; - ref.dev = dev; - ref.dev_addr = sg_dma_address(s); - ref.size = sg_dma_len(s), - - bucket = get_hash_bucket(&ref, &flags); - entry = hash_bucket_find(bucket, &ref); + bucket = get_hash_bucket(ref, &flags); + entry = hash_bucket_find(bucket, ref); mapped_ents = 0; if (entry) @@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = dir, - .sg_call_ents = 0, + .sg_call_ents = nelems, }; if (mapped_ents && i >= mapped_ents) break; - if (!i) { - ref.sg_call_ents = nelems; - mapped_ents = get_nr_mapped_entries(dev, s); - } + if (!i) + mapped_ents = get_nr_mapped_entries(dev, &ref); check_unmap(&ref); } @@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent); void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_debug_entry ref; + if (unlikely(global_disable)) return; - check_sync(dev, dma_handle, size, 0, direction, true); + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, true); } EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); @@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { + struct dma_debug_entry ref; + if (unlikely(global_disable)) return; - check_sync(dev, dma_handle, size, 0, direction, false); + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, false); } EXPORT_SYMBOL(debug_dma_sync_single_for_device); @@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev, unsigned long offset, size_t size, int direction) { + struct dma_debug_entry ref; + if (unlikely(global_disable)) return; - check_sync(dev, dma_handle, size, offset, direction, true); + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = offset + size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, true); } EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); @@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev, unsigned long offset, size_t size, int direction) { + struct dma_debug_entry ref; + if (unlikely(global_disable)) return; - check_sync(dev, dma_handle, size, offset, direction, false); + ref.type = dma_debug_single; + ref.dev = dev; + ref.dev_addr = dma_handle; + ref.size = offset + size; + ref.direction = direction; + ref.sg_call_ents = 0; + + check_sync(dev, &ref, false); } EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); @@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, return; for_each_sg(sg, s, nelems, i) { + + struct dma_debug_entry ref = { + .type = dma_debug_sg, + .dev = dev, + .paddr = sg_phys(s), + .dev_addr = sg_dma_address(s), + .size = sg_dma_len(s), + .direction = direction, + .sg_call_ents = nelems, + }; + if (!i) - mapped_ents = get_nr_mapped_entries(dev, s); + mapped_ents = get_nr_mapped_entries(dev, &ref); if (i >= mapped_ents) break; - check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, - direction, true); + check_sync(dev, &ref, true); } } EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); @@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, return; for_each_sg(sg, s, nelems, i) { + + struct dma_debug_entry ref = { + .type = dma_debug_sg, + .dev = dev, + .paddr = sg_phys(s), + .dev_addr = sg_dma_address(s), + .size = sg_dma_len(s), + .direction = direction, + .sg_call_ents = nelems, + }; if (!i) - mapped_ents = get_nr_mapped_entries(dev, s); + mapped_ents = get_nr_mapped_entries(dev, &ref); if (i >= mapped_ents) break; - check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0, - direction, false); + check_sync(dev, &ref, false); } } EXPORT_SYMBOL(debug_dma_sync_sg_for_device); diff --git a/mm/memory.c b/mm/memory.c index d5d1653d60a6..98bcb90d5957 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, cond_resched(); while (!(page = follow_page(vma, start, foll_flags))) { int ret; - ret = handle_mm_fault(mm, vma, start, - foll_flags & FOLL_WRITE); + + /* FOLL_WRITE matches FAULT_FLAG_WRITE! */ + ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE); if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_OOM) return i ? i : -ENOMEM; @@ -2496,7 +2497,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end) */ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, - int write_access, pte_t orig_pte) + unsigned int flags, pte_t orig_pte) { spinlock_t *ptl; struct page *page; @@ -2572,9 +2573,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, inc_mm_counter(mm, anon_rss); pte = mk_pte(page, vma->vm_page_prot); - if (write_access && reuse_swap_page(page)) { + if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) { pte = maybe_mkwrite(pte_mkdirty(pte), vma); - write_access = 0; + flags &= ~FAULT_FLAG_WRITE; } flush_icache_page(vma, page); set_pte_at(mm, address, page_table, pte); @@ -2587,7 +2588,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, try_to_free_swap(page); unlock_page(page); - if (write_access) { + if (flags & FAULT_FLAG_WRITE) { ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte); if (ret & VM_FAULT_ERROR) ret &= VM_FAULT_ERROR; @@ -2616,7 +2617,7 @@ out_page: */ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, - int write_access) + unsigned int flags) { struct page *page; spinlock_t *ptl; @@ -2776,7 +2777,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, * due to the bad i386 page protection. But it's valid * for other architectures too. * - * Note that if write_access is true, we either now have + * Note that if FAULT_FLAG_WRITE is set, we either now have * an exclusive copy of the page, or this is a shared mapping, * so we can make it writable and dirty to avoid having to * handle that later. @@ -2847,11 +2848,10 @@ unwritable_page: static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, - int write_access, pte_t orig_pte) + unsigned int flags, pte_t orig_pte) { pgoff_t pgoff = (((address & PAGE_MASK) - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0); pte_unmap(page_table); return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte); @@ -2868,12 +2868,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, - int write_access, pte_t orig_pte) + unsigned int flags, pte_t orig_pte) { - unsigned int flags = FAULT_FLAG_NONLINEAR | - (write_access ? FAULT_FLAG_WRITE : 0); pgoff_t pgoff; + flags |= FAULT_FLAG_NONLINEAR; + if (!pte_unmap_same(mm, pmd, page_table, orig_pte)) return 0; @@ -2904,7 +2904,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma, */ static inline int handle_pte_fault(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, - pte_t *pte, pmd_t *pmd, int write_access) + pte_t *pte, pmd_t *pmd, unsigned int flags) { pte_t entry; spinlock_t *ptl; @@ -2915,30 +2915,30 @@ static inline int handle_pte_fault(struct mm_struct *mm, if (vma->vm_ops) { if (likely(vma->vm_ops->fault)) return do_linear_fault(mm, vma, address, - pte, pmd, write_access, entry); + pte, pmd, flags, entry); } return do_anonymous_page(mm, vma, address, - pte, pmd, write_access); + pte, pmd, flags); } if (pte_file(entry)) return do_nonlinear_fault(mm, vma, address, - pte, pmd, write_access, entry); + pte, pmd, flags, entry); return do_swap_page(mm, vma, address, - pte, pmd, write_access, entry); + pte, pmd, flags, entry); } ptl = pte_lockptr(mm, pmd); spin_lock(ptl); if (unlikely(!pte_same(*pte, entry))) goto unlock; - if (write_access) { + if (flags & FAULT_FLAG_WRITE) { if (!pte_write(entry)) return do_wp_page(mm, vma, address, pte, pmd, ptl, entry); entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); - if (ptep_set_access_flags(vma, address, pte, entry, write_access)) { + if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) { update_mmu_cache(vma, address, entry); } else { /* @@ -2947,7 +2947,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, * This still avoids useless tlb flushes for .text page faults * with threads. */ - if (write_access) + if (flags & FAULT_FLAG_WRITE) flush_tlb_page(vma, address); } unlock: @@ -2959,7 +2959,7 @@ unlock: * By the time we get here, we already hold the mm semaphore */ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, - unsigned long address, int write_access) + unsigned long address, unsigned int flags) { pgd_t *pgd; pud_t *pud; @@ -2971,7 +2971,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, count_vm_event(PGFAULT); if (unlikely(is_vm_hugetlb_page(vma))) - return hugetlb_fault(mm, vma, address, write_access); + return hugetlb_fault(mm, vma, address, flags); pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); @@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte) return VM_FAULT_OOM; - return handle_pte_fault(mm, vma, address, pte, pmd, write_access); + return handle_pte_fault(mm, vma, address, pte, pmd, flags); } #ifndef __PAGETABLE_PUD_FOLDED diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 562403a23488..462e2cedaa6a 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -972,8 +972,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_SUBSYSTEM_ID, 0); } - if (bus->modelname) - codec->modelname = kstrdup(bus->modelname, GFP_KERNEL); /* power-up all before initialization */ hda_set_power_state(codec, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d22b26068014..bf4b78a74a8f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -224,6 +224,7 @@ enum { ALC883_ACER, ALC883_ACER_ASPIRE, ALC888_ACER_ASPIRE_4930G, + ALC888_ACER_ASPIRE_6530G, ALC888_ACER_ASPIRE_8930G, ALC883_MEDION, ALC883_MEDION_MD2, @@ -970,7 +971,7 @@ static void alc_automute_pin(struct hda_codec *codec) } } -#if 0 /* it's broken in some acses -- temporarily disabled */ +#if 0 /* it's broken in some cases -- temporarily disabled */ static void alc_mic_automute(struct hda_codec *codec) { struct alc_spec *spec = codec->spec; @@ -1170,7 +1171,7 @@ static int alc_subsystem_id(struct hda_codec *codec, /* invalid SSID, check the special NID pin defcfg instead */ /* - * 31~30 : port conetcivity + * 31~30 : port connectivity * 29~21 : reserve * 20 : PCBEEP input * 19~16 : Check sum (15:1) @@ -1471,6 +1472,25 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = { }; /* + * ALC888 Acer Aspire 6530G model + */ + +static struct hda_verb alc888_acer_aspire_6530g_verbs[] = { +/* Bias voltage on for external mic port */ + {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80}, +/* Enable unsolicited event for HP jack */ + {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN}, +/* Enable speaker output */ + {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, + {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, +/* Enable headphone output */ + {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP}, + {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE}, + {0x15, AC_VERB_SET_CONNECT_SEL, 0x00}, + { } +}; + +/* * ALC889 Acer Aspire 8930G model */ @@ -1544,6 +1564,25 @@ static struct hda_input_mux alc888_2_capture_sources[2] = { } }; +static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = { + /* Interal mic only available on one ADC */ + { + .num_items = 3, + .items = { + { "Ext Mic", 0x0 }, + { "CD", 0x4 }, + { "Int Mic", 0xb }, + }, + }, + { + .num_items = 2, + .items = { + { "Ext Mic", 0x0 }, + { "CD", 0x4 }, + }, + } +}; + static struct hda_input_mux alc889_capture_sources[3] = { /* Digital mic only available on first "ADC" */ { @@ -6347,7 +6386,7 @@ static struct hda_channel_mode alc882_sixstack_modes[2] = { }; /* - * macbook pro ALC885 can switch LineIn to LineOut without loosing Mic + * macbook pro ALC885 can switch LineIn to LineOut without losing Mic */ /* @@ -7047,7 +7086,7 @@ static struct hda_verb alc882_auto_init_verbs[] = { #define alc882_loopbacks alc880_loopbacks #endif -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc882_pcm_analog_playback alc880_pcm_analog_playback #define alc882_pcm_analog_capture alc880_pcm_analog_capture #define alc882_pcm_digital_playback alc880_pcm_digital_playback @@ -8068,7 +8107,7 @@ static struct snd_kcontrol_new alc883_fivestack_mixer[] = { { } /* end */ }; -static struct snd_kcontrol_new alc883_tagra_mixer[] = { +static struct snd_kcontrol_new alc883_targa_mixer[] = { HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), @@ -8088,7 +8127,7 @@ static struct snd_kcontrol_new alc883_tagra_mixer[] = { { } /* end */ }; -static struct snd_kcontrol_new alc883_tagra_2ch_mixer[] = { +static struct snd_kcontrol_new alc883_targa_2ch_mixer[] = { HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT), HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT), @@ -8153,6 +8192,19 @@ static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = { { } /* end */ }; +static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = { + HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), + HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), + HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT), + HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT), + HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT), + HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT), + HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT), + HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT), + { } /* end */ +}; + static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = { HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT), HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT), @@ -8417,7 +8469,7 @@ static struct hda_verb alc883_2ch_fujitsu_pi2515_verbs[] = { { } /* end */ }; -static struct hda_verb alc883_tagra_verbs[] = { +static struct hda_verb alc883_targa_verbs[] = { {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)}, {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)}, @@ -8626,8 +8678,8 @@ static void alc883_medion_md2_init_hook(struct hda_codec *codec) } /* toggle speaker-output according to the hp-jack state */ -#define alc883_tagra_init_hook alc882_targa_init_hook -#define alc883_tagra_unsol_event alc882_targa_unsol_event +#define alc883_targa_init_hook alc882_targa_init_hook +#define alc883_targa_unsol_event alc882_targa_unsol_event static void alc883_clevo_m720_mic_automute(struct hda_codec *codec) { @@ -8957,7 +9009,7 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res) #define alc883_loopbacks alc880_loopbacks #endif -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc883_pcm_analog_playback alc880_pcm_analog_playback #define alc883_pcm_analog_capture alc880_pcm_analog_capture #define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture @@ -8978,6 +9030,7 @@ static const char *alc883_models[ALC883_MODEL_LAST] = { [ALC883_ACER] = "acer", [ALC883_ACER_ASPIRE] = "acer-aspire", [ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g", + [ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g", [ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g", [ALC883_MEDION] = "medion", [ALC883_MEDION_MD2] = "medion-md2", @@ -9021,7 +9074,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G", ALC888_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G", - ALC888_ACER_ASPIRE_4930G), + ALC888_ACER_ASPIRE_6530G), /* default Acer -- disabled as it causes more problems. * model=auto should work fine now */ @@ -9069,6 +9122,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = { SND_PCI_QUIRK(0x1462, 0x7267, "MSI", ALC883_3ST_6ch_DIG), SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG), SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG), + SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG), SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG), SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG), SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720), @@ -9165,8 +9219,8 @@ static struct alc_config_preset alc883_presets[] = { .input_mux = &alc883_capture_source, }, [ALC883_TARGA_DIG] = { - .mixers = { alc883_tagra_mixer, alc883_chmode_mixer }, - .init_verbs = { alc883_init_verbs, alc883_tagra_verbs}, + .mixers = { alc883_targa_mixer, alc883_chmode_mixer }, + .init_verbs = { alc883_init_verbs, alc883_targa_verbs}, .num_dacs = ARRAY_SIZE(alc883_dac_nids), .dac_nids = alc883_dac_nids, .dig_out_nid = ALC883_DIGOUT_NID, @@ -9174,12 +9228,12 @@ static struct alc_config_preset alc883_presets[] = { .channel_mode = alc883_3ST_6ch_modes, .need_dac_fix = 1, .input_mux = &alc883_capture_source, - .unsol_event = alc883_tagra_unsol_event, - .init_hook = alc883_tagra_init_hook, + .unsol_event = alc883_targa_unsol_event, + .init_hook = alc883_targa_init_hook, }, [ALC883_TARGA_2ch_DIG] = { - .mixers = { alc883_tagra_2ch_mixer}, - .init_verbs = { alc883_init_verbs, alc883_tagra_verbs}, + .mixers = { alc883_targa_2ch_mixer}, + .init_verbs = { alc883_init_verbs, alc883_targa_verbs}, .num_dacs = ARRAY_SIZE(alc883_dac_nids), .dac_nids = alc883_dac_nids, .adc_nids = alc883_adc_nids_alt, @@ -9188,13 +9242,13 @@ static struct alc_config_preset alc883_presets[] = { .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), .channel_mode = alc883_3ST_2ch_modes, .input_mux = &alc883_capture_source, - .unsol_event = alc883_tagra_unsol_event, - .init_hook = alc883_tagra_init_hook, + .unsol_event = alc883_targa_unsol_event, + .init_hook = alc883_targa_init_hook, }, [ALC883_TARGA_8ch_DIG] = { .mixers = { alc883_base_mixer, alc883_chmode_mixer }, .init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs, - alc883_tagra_verbs }, + alc883_targa_verbs }, .num_dacs = ARRAY_SIZE(alc883_dac_nids), .dac_nids = alc883_dac_nids, .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev), @@ -9206,8 +9260,8 @@ static struct alc_config_preset alc883_presets[] = { .channel_mode = alc883_4ST_8ch_modes, .need_dac_fix = 1, .input_mux = &alc883_capture_source, - .unsol_event = alc883_tagra_unsol_event, - .init_hook = alc883_tagra_init_hook, + .unsol_event = alc883_targa_unsol_event, + .init_hook = alc883_targa_init_hook, }, [ALC883_ACER] = { .mixers = { alc883_base_mixer }, @@ -9255,6 +9309,24 @@ static struct alc_config_preset alc883_presets[] = { .unsol_event = alc_automute_amp_unsol_event, .init_hook = alc888_acer_aspire_4930g_init_hook, }, + [ALC888_ACER_ASPIRE_6530G] = { + .mixers = { alc888_acer_aspire_6530_mixer }, + .init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs, + alc888_acer_aspire_6530g_verbs }, + .num_dacs = ARRAY_SIZE(alc883_dac_nids), + .dac_nids = alc883_dac_nids, + .num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev), + .adc_nids = alc883_adc_nids_rev, + .capsrc_nids = alc883_capsrc_nids_rev, + .dig_out_nid = ALC883_DIGOUT_NID, + .num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes), + .channel_mode = alc883_3ST_2ch_modes, + .num_mux_defs = + ARRAY_SIZE(alc888_2_capture_sources), + .input_mux = alc888_acer_aspire_6530_sources, + .unsol_event = alc_automute_amp_unsol_event, + .init_hook = alc888_acer_aspire_4930g_init_hook, + }, [ALC888_ACER_ASPIRE_8930G] = { .mixers = { alc888_base_mixer, alc883_chmode_mixer }, @@ -9361,7 +9433,7 @@ static struct alc_config_preset alc883_presets[] = { .init_hook = alc888_lenovo_ms7195_front_automute, }, [ALC883_HAIER_W66] = { - .mixers = { alc883_tagra_2ch_mixer}, + .mixers = { alc883_targa_2ch_mixer}, .init_verbs = { alc883_init_verbs, alc883_haier_w66_verbs}, .num_dacs = ARRAY_SIZE(alc883_dac_nids), .dac_nids = alc883_dac_nids, @@ -11131,7 +11203,7 @@ static struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = { #define alc262_loopbacks alc880_loopbacks #endif -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc262_pcm_analog_playback alc880_pcm_analog_playback #define alc262_pcm_analog_capture alc880_pcm_analog_capture #define alc262_pcm_digital_playback alc880_pcm_digital_playback @@ -12286,7 +12358,7 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec) AC_VERB_SET_AMP_GAIN_MUTE, dac_vol2); } -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc268_pcm_analog_playback alc880_pcm_analog_playback #define alc268_pcm_analog_capture alc880_pcm_analog_capture #define alc268_pcm_analog_alt_capture alc880_pcm_analog_alt_capture @@ -13197,7 +13269,7 @@ static int alc269_auto_create_analog_input_ctls(struct alc_spec *spec, #define alc269_loopbacks alc880_loopbacks #endif -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc269_pcm_analog_playback alc880_pcm_analog_playback #define alc269_pcm_analog_capture alc880_pcm_analog_capture #define alc269_pcm_digital_playback alc880_pcm_digital_playback @@ -14059,7 +14131,7 @@ static void alc861_toshiba_unsol_event(struct hda_codec *codec, alc861_toshiba_automute(codec); } -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc861_pcm_analog_playback alc880_pcm_analog_playback #define alc861_pcm_analog_capture alc880_pcm_analog_capture #define alc861_pcm_digital_playback alc880_pcm_digital_playback @@ -14582,7 +14654,7 @@ static hda_nid_t alc861vd_dac_nids[4] = { /* dac_nids for ALC660vd are in a different order - according to * Realtek's driver. - * This should probably tesult in a different mixer for 6stack models + * This should probably result in a different mixer for 6stack models * of ALC660vd codecs, but for now there is only 3stack mixer * - and it is the same as in 861vd. * adc_nids in ALC660vd are (is) the same as in 861vd @@ -15027,7 +15099,7 @@ static void alc861vd_dallas_init_hook(struct hda_codec *codec) #define alc861vd_loopbacks alc880_loopbacks #endif -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc861vd_pcm_analog_playback alc880_pcm_analog_playback #define alc861vd_pcm_analog_capture alc880_pcm_analog_capture #define alc861vd_pcm_digital_playback alc880_pcm_digital_playback @@ -15206,7 +15278,7 @@ static void alc861vd_auto_init_hp_out(struct hda_codec *codec) hda_nid_t pin; pin = spec->autocfg.hp_pins[0]; - if (pin) /* connect to front and use dac 0 */ + if (pin) /* connect to front and use dac 0 */ alc861vd_auto_set_output_and_unmute(codec, pin, PIN_HP, 0); pin = spec->autocfg.speaker_pins[0]; if (pin) @@ -16669,7 +16741,7 @@ static struct snd_kcontrol_new alc272_nc10_mixer[] = { #endif -/* pcm configuration: identiacal with ALC880 */ +/* pcm configuration: identical with ALC880 */ #define alc662_pcm_analog_playback alc880_pcm_analog_playback #define alc662_pcm_analog_capture alc880_pcm_analog_capture #define alc662_pcm_digital_playback alc880_pcm_digital_playback diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c index fa336616152e..938a58a5a244 100644 --- a/sound/soc/txx9/txx9aclc.c +++ b/sound/soc/txx9/txx9aclc.c @@ -297,9 +297,9 @@ static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai, static bool filter(struct dma_chan *chan, void *param) { struct txx9aclc_dmadata *dmadata = param; - char devname[BUS_ID_SIZE + 2]; + char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */ - sprintf(devname, "%s.%d", dmadata->dma_res->name, + snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name, (int)dmadata->dma_res->start); if (strcmp(dev_name(chan->device->dev), devname) == 0) { chan->private = &dmadata->dma_slave; diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index b14451342166..8f9b60c5d74c 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -199,8 +199,9 @@ static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream) dev->period_out_count[index] = BYTES_PER_SAMPLE + 1; dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1; } else { - dev->period_in_count[index] = BYTES_PER_SAMPLE; - dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE; + int in_pos = (dev->spec.data_alignment == 2) ? 0 : 2; + dev->period_in_count[index] = BYTES_PER_SAMPLE + in_pos; + dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE + in_pos; } if (dev->streaming) diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index 22406245a98b..0e5db719de24 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c @@ -35,7 +35,7 @@ #include "input.h" MODULE_AUTHOR("Daniel Mack <[email protected]>"); -MODULE_DESCRIPTION("caiaq USB audio, version 1.3.16"); +MODULE_DESCRIPTION("caiaq USB audio, version 1.3.17"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2}," "{Native Instruments, RigKontrol3}," |