diff options
65 files changed, 2205 insertions, 381 deletions
diff --git a/Documentation/DocBook/Makefile b/Documentation/DocBook/Makefile index c75e5d6b8fa8..a6eb7dcd4dd5 100644 --- a/Documentation/DocBook/Makefile +++ b/Documentation/DocBook/Makefile @@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \ kernel-api.xml filesystems.xml lsm.xml kgdb.xml \ gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \ genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \ - 80211.xml sh.xml regulator.xml w1.xml \ + sh.xml regulator.xml w1.xml \ writing_musb_glue_layer.xml iio.xml ifeq ($(DOCBOOKS),) diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt index 87d3714b956a..a7235e9e1c97 100644 --- a/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt +++ b/Documentation/devicetree/bindings/clock/qcom,rpmcc.txt @@ -11,6 +11,7 @@ Required properties : compatible "qcom,rpmcc" should be also included. "qcom,rpmcc-msm8916", "qcom,rpmcc" + "qcom,rpmcc-msm8974", "qcom,rpmcc" "qcom,rpmcc-apq8064", "qcom,rpmcc" - #clock-cells : shall contain 1 diff --git a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt index 0532d815dae3..8f19d87cbf24 100644 --- a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt +++ b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt @@ -17,6 +17,9 @@ Required properties: property, containing a phandle to the clock device node, an index selecting between gated clocks and other clocks and an index specifying the clock to use. +- clocks: External oscillator clock phandle + - high speed external clock signal (HSE) + - external I2S clock (I2S_CKIN) Example: @@ -25,6 +28,7 @@ Example: #clock-cells = <2> compatible = "st,stm32f42xx-rcc", "st,stm32-rcc"; reg = <0x40023800 0x400>; + clocks = <&clk_hse>, <&clk_i2s_ckin>; }; Specifying gated clocks @@ -66,6 +70,19 @@ The secondary index is bound with the following magic numbers: 0 SYSTICK 1 FCLK + 2 CLK_LSI (low-power clock source) + 3 CLK_LSE (generated from a 32.768 kHz low-speed external + crystal or ceramic resonator) + 4 CLK_HSE_RTC (HSE division factor for RTC clock) + 5 CLK_RTC (real-time clock) + 6 PLL_VCO_I2S (vco frequency of I2S pll) + 7 PLL_VCO_SAI (vco frequency of SAI pll) + 8 CLK_LCD (LCD-TFT) + 9 CLK_I2S (I2S clocks) + 10 CLK_SAI1 (audio clocks) + 11 CLK_SAI2 + 12 CLK_I2SQ_PDIV (post divisor of pll i2s q divisor) + 13 CLK_SAIQ_PDIV (post divisor of pll sai q divisor) Example: diff --git a/Documentation/devicetree/bindings/clock/zx296718-clk.txt b/Documentation/devicetree/bindings/clock/zx296718-clk.txt index 8c18b7b237bf..4ad703808407 100644 --- a/Documentation/devicetree/bindings/clock/zx296718-clk.txt +++ b/Documentation/devicetree/bindings/clock/zx296718-clk.txt @@ -13,6 +13,9 @@ Required properties: "zte,zx296718-lsp1crm": zx296718 device level clock selection and gating + "zte,zx296718-audiocrm": + zx296718 audio clock selection, divider and gating + - reg: Address and length of the register set The clock consumer should specify the desired clock by having the clock diff --git a/Documentation/unaligned-memory-access.txt b/Documentation/unaligned-memory-access.txt index a445da098bc6..3f76c0c37920 100644 --- a/Documentation/unaligned-memory-access.txt +++ b/Documentation/unaligned-memory-access.txt @@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2) #else const u16 *a = (const u16 *)addr1; const u16 *b = (const u16 *)addr2; - return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; + return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0; #endif } @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Roaring Lionus # *DOCUMENTATION* diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h new file mode 100644 index 000000000000..df411f3e083c --- /dev/null +++ b/arch/arm64/include/asm/asm-uaccess.h @@ -0,0 +1,65 @@ +#ifndef __ASM_ASM_UACCESS_H +#define __ASM_ASM_UACCESS_H + +#include <asm/alternative.h> +#include <asm/kernel-pgtable.h> +#include <asm/sysreg.h> +#include <asm/assembler.h> + +/* + * User access enabling/disabling macros. + */ +#ifdef CONFIG_ARM64_SW_TTBR0_PAN + .macro __uaccess_ttbr0_disable, tmp1 + mrs \tmp1, ttbr1_el1 // swapper_pg_dir + add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir + msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 + isb + .endm + + .macro __uaccess_ttbr0_enable, tmp1 + get_thread_info \tmp1 + ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 + msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 + isb + .endm + + .macro uaccess_ttbr0_disable, tmp1 +alternative_if_not ARM64_HAS_PAN + __uaccess_ttbr0_disable \tmp1 +alternative_else_nop_endif + .endm + + .macro uaccess_ttbr0_enable, tmp1, tmp2 +alternative_if_not ARM64_HAS_PAN + save_and_disable_irq \tmp2 // avoid preemption + __uaccess_ttbr0_enable \tmp1 + restore_irq \tmp2 +alternative_else_nop_endif + .endm +#else + .macro uaccess_ttbr0_disable, tmp1 + .endm + + .macro uaccess_ttbr0_enable, tmp1, tmp2 + .endm +#endif + +/* + * These macros are no-ops when UAO is present. + */ + .macro uaccess_disable_not_uao, tmp1 + uaccess_ttbr0_disable \tmp1 +alternative_if ARM64_ALT_PAN_NOT_UAO + SET_PSTATE_PAN(1) +alternative_else_nop_endif + .endm + + .macro uaccess_enable_not_uao, tmp1, tmp2 + uaccess_ttbr0_enable \tmp1, \tmp2 +alternative_if ARM64_ALT_PAN_NOT_UAO + SET_PSTATE_PAN(0) +alternative_else_nop_endif + .endm + +#endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index d26750ca6e06..46da3ea638bb 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -22,8 +22,6 @@ #include <asm/kernel-pgtable.h> #include <asm/sysreg.h> -#ifndef __ASSEMBLY__ - /* * User space memory access functions */ @@ -424,66 +422,4 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); -#else /* __ASSEMBLY__ */ - -#include <asm/assembler.h> - -/* - * User access enabling/disabling macros. - */ -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - .macro __uaccess_ttbr0_disable, tmp1 - mrs \tmp1, ttbr1_el1 // swapper_pg_dir - add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir - msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1 - isb - .endm - - .macro __uaccess_ttbr0_enable, tmp1 - get_thread_info \tmp1 - ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1 - msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1 - isb - .endm - - .macro uaccess_ttbr0_disable, tmp1 -alternative_if_not ARM64_HAS_PAN - __uaccess_ttbr0_disable \tmp1 -alternative_else_nop_endif - .endm - - .macro uaccess_ttbr0_enable, tmp1, tmp2 -alternative_if_not ARM64_HAS_PAN - save_and_disable_irq \tmp2 // avoid preemption - __uaccess_ttbr0_enable \tmp1 - restore_irq \tmp2 -alternative_else_nop_endif - .endm -#else - .macro uaccess_ttbr0_disable, tmp1 - .endm - - .macro uaccess_ttbr0_enable, tmp1, tmp2 - .endm -#endif - -/* - * These macros are no-ops when UAO is present. - */ - .macro uaccess_disable_not_uao, tmp1 - uaccess_ttbr0_disable \tmp1 -alternative_if ARM64_ALT_PAN_NOT_UAO - SET_PSTATE_PAN(1) -alternative_else_nop_endif - .endm - - .macro uaccess_enable_not_uao, tmp1, tmp2 - uaccess_ttbr0_enable \tmp1, \tmp2 -alternative_if ARM64_ALT_PAN_NOT_UAO - SET_PSTATE_PAN(0) -alternative_else_nop_endif - .endm - -#endif /* __ASSEMBLY__ */ - #endif /* __ASM_UACCESS_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index a7504f40d7ee..923841ffe4a9 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -31,7 +31,7 @@ #include <asm/memory.h> #include <asm/ptrace.h> #include <asm/thread_info.h> -#include <linux/uaccess.h> +#include <asm/asm-uaccess.h> #include <asm/unistd.h> /* diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S index add4a1334085..e88fb99c1561 100644 --- a/arch/arm64/lib/clear_user.S +++ b/arch/arm64/lib/clear_user.S @@ -17,7 +17,7 @@ */ #include <linux/linkage.h> -#include <linux/uaccess.h> +#include <asm/asm-uaccess.h> .text diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index fd6cd05593f9..4b5d826895ff 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -17,7 +17,7 @@ #include <linux/linkage.h> #include <asm/cache.h> -#include <linux/uaccess.h> +#include <asm/asm-uaccess.h> /* * Copy from user space to a kernel buffer (alignment handled by the hardware) diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index d828540ded6f..47184c3a97da 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -19,7 +19,7 @@ #include <linux/linkage.h> #include <asm/cache.h> -#include <linux/uaccess.h> +#include <asm/asm-uaccess.h> /* * Copy from user space to user space (alignment handled by the hardware) diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 3e6ae2663b82..351f0766f7a6 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -17,7 +17,7 @@ #include <linux/linkage.h> #include <asm/cache.h> -#include <linux/uaccess.h> +#include <asm/asm-uaccess.h> /* * Copy to user space from a kernel buffer (alignment handled by the hardware) diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index 17f422a4dc55..83c27b6e6dca 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S @@ -23,7 +23,7 @@ #include <asm/assembler.h> #include <asm/cpufeature.h> #include <asm/alternative.h> -#include <linux/uaccess.h> +#include <asm/asm-uaccess.h> /* * flush_icache_range(start,end) diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S index 47cf3f9d89ff..947830a459d2 100644 --- a/arch/arm64/xen/hypercall.S +++ b/arch/arm64/xen/hypercall.S @@ -49,7 +49,7 @@ #include <linux/linkage.h> #include <asm/assembler.h> -#include <linux/uaccess.h> +#include <asm/asm-uaccess.h> #include <xen/interface/xen.h> diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 68557f52b961..854022772c5b 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); } +static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) +{ + bool negative; + asm volatile(LOCK_PREFIX "andb %2,%1\n\t" + CC_SET(s) + : CC_OUT(s) (negative), ADDR + : "ir" ((char) ~(1 << nr)) : "memory"); + return negative; +} + +// Let everybody know we have it +#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte + /* * __clear_bit_unlock - Clears a bit in memory * @nr: Bit to clear diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index ffacfdcacb85..a5fd137417a2 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -1182,6 +1182,9 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank) const char *name = get_name(bank, NULL); int err = 0; + if (!dev) + return -ENODEV; + if (is_shared_bank(bank)) { nb = node_to_amd_nb(amd_get_nb_id(cpu)); diff --git a/crypto/testmgr.c b/crypto/testmgr.c index f616ad74cce7..44e888b0b041 100644 --- a/crypto/testmgr.c +++ b/crypto/testmgr.c @@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, for (i = 0; i < ctcount; i++) { unsigned int dlen = COMP_BUF_SIZE; int ilen = ctemplate[i].inlen; + void *input_vec; + input_vec = kmalloc(ilen, GFP_KERNEL); + if (!input_vec) { + ret = -ENOMEM; + goto out; + } + + memcpy(input_vec, ctemplate[i].input, ilen); memset(output, 0, dlen); init_completion(&result.completion); - sg_init_one(&src, ctemplate[i].input, ilen); + sg_init_one(&src, input_vec, ilen); sg_init_one(&dst, output, dlen); req = acomp_request_alloc(tfm); if (!req) { pr_err("alg: acomp: request alloc failed for %s\n", algo); + kfree(input_vec); ret = -ENOMEM; goto out; } @@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, if (ret) { pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); + kfree(input_vec); acomp_request_free(req); goto out; } @@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n", i + 1, algo, req->dlen); ret = -EINVAL; + kfree(input_vec); acomp_request_free(req); goto out; } @@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, i + 1, algo); hexdump(output, req->dlen); ret = -EINVAL; + kfree(input_vec); acomp_request_free(req); goto out; } + kfree(input_vec); acomp_request_free(req); } for (i = 0; i < dtcount; i++) { unsigned int dlen = COMP_BUF_SIZE; int ilen = dtemplate[i].inlen; + void *input_vec; + + input_vec = kmalloc(ilen, GFP_KERNEL); + if (!input_vec) { + ret = -ENOMEM; + goto out; + } + memcpy(input_vec, dtemplate[i].input, ilen); memset(output, 0, dlen); init_completion(&result.completion); - sg_init_one(&src, dtemplate[i].input, ilen); + sg_init_one(&src, input_vec, ilen); sg_init_one(&dst, output, dlen); req = acomp_request_alloc(tfm); if (!req) { pr_err("alg: acomp: request alloc failed for %s\n", algo); + kfree(input_vec); ret = -ENOMEM; goto out; } @@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, if (ret) { pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n", i + 1, algo, -ret); + kfree(input_vec); acomp_request_free(req); goto out; } @@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n", i + 1, algo, req->dlen); ret = -EINVAL; + kfree(input_vec); acomp_request_free(req); goto out; } @@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate, i + 1, algo); hexdump(output, req->dlen); ret = -EINVAL; + kfree(input_vec); acomp_request_free(req); goto out; } + kfree(input_vec); acomp_request_free(req); } diff --git a/drivers/clk/axs10x/i2s_pll_clock.c b/drivers/clk/axs10x/i2s_pll_clock.c index 411310d29581..02d3bcd6216c 100644 --- a/drivers/clk/axs10x/i2s_pll_clock.c +++ b/drivers/clk/axs10x/i2s_pll_clock.c @@ -182,6 +182,7 @@ static int i2s_pll_clk_probe(struct platform_device *pdev) if (IS_ERR(pll_clk->base)) return PTR_ERR(pll_clk->base); + memset(&init, 0, sizeof(init)); clk_name = node->name; init.name = clk_name; init.ops = &i2s_pll_ops; diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 674785d968a3..e0e02a6e5900 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -40,8 +40,9 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) return 0; pclk = of_clk_get_from_provider(&clkspec); if (IS_ERR(pclk)) { - pr_warn("clk: couldn't get parent clock %d for %s\n", - index, node->full_name); + if (PTR_ERR(pclk) != -EPROBE_DEFER) + pr_warn("clk: couldn't get parent clock %d for %s\n", + index, node->full_name); return PTR_ERR(pclk); } @@ -55,8 +56,9 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier) } clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { - pr_warn("clk: couldn't get assigned clock %d for %s\n", - index, node->full_name); + if (PTR_ERR(clk) != -EPROBE_DEFER) + pr_warn("clk: couldn't get assigned clock %d for %s\n", + index, node->full_name); rc = PTR_ERR(clk); goto err; } @@ -99,8 +101,9 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) clk = of_clk_get_from_provider(&clkspec); if (IS_ERR(clk)) { - pr_warn("clk: couldn't get clock %d for %s\n", - index, node->full_name); + if (PTR_ERR(clk) != -EPROBE_DEFER) + pr_warn("clk: couldn't get clock %d for %s\n", + index, node->full_name); return PTR_ERR(clk); } diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index 2a3e9d8e88b0..96d37175d0ad 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -290,13 +290,15 @@ static int scpi_clocks_probe(struct platform_device *pdev) of_node_put(child); return ret; } - } - /* Add the virtual cpufreq device */ - cpufreq_dev = platform_device_register_simple("scpi-cpufreq", - -1, NULL, 0); - if (IS_ERR(cpufreq_dev)) - pr_warn("unable to register cpufreq device"); + if (match->data != &scpi_dvfs_ops) + continue; + /* Add the virtual cpufreq device if it's DVFS clock provider */ + cpufreq_dev = platform_device_register_simple("scpi-cpufreq", + -1, NULL, 0); + if (IS_ERR(cpufreq_dev)) + pr_warn("unable to register cpufreq device"); + } return 0; } diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c index 5eb05dbf59b8..42f8534996de 100644 --- a/drivers/clk/clk-stm32f4.c +++ b/drivers/clk/clk-stm32f4.c @@ -28,6 +28,14 @@ #include <linux/regmap.h> #include <linux/mfd/syscon.h> +/* + * Include list of clocks wich are not derived from system clock (SYSCLOCK) + * The index of these clocks is the secondary index of DT bindings + * + */ +#include <dt-bindings/clock/stm32fx-clock.h> + +#define STM32F4_RCC_CR 0x00 #define STM32F4_RCC_PLLCFGR 0x04 #define STM32F4_RCC_CFGR 0x08 #define STM32F4_RCC_AHB1ENR 0x30 @@ -37,6 +45,14 @@ #define STM32F4_RCC_APB2ENR 0x44 #define STM32F4_RCC_BDCR 0x70 #define STM32F4_RCC_CSR 0x74 +#define STM32F4_RCC_PLLI2SCFGR 0x84 +#define STM32F4_RCC_PLLSAICFGR 0x88 +#define STM32F4_RCC_DCKCFGR 0x8c + +#define NONE -1 +#define NO_IDX NONE +#define NO_MUX NONE +#define NO_GATE NONE struct stm32f4_gate_data { u8 offset; @@ -195,7 +211,7 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" }, { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" }, - { STM32F4_RCC_APB2ENR, 11, "sdio", "pll48" }, + { STM32F4_RCC_APB2ENR, 11, "sdio", "sdmux" }, { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" }, { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" }, { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" }, @@ -208,8 +224,6 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = { { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, }; -enum { SYSTICK, FCLK, CLK_LSI, CLK_LSE, CLK_HSE_RTC, CLK_RTC, END_PRIMARY_CLK }; - /* * This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx * have gate bits associated with them. Its combined hweight is 71. @@ -324,23 +338,342 @@ static struct clk *clk_register_apb_mul(struct device *dev, const char *name, return clk; } -/* - * Decode current PLL state and (statically) model the state we inherit from - * the bootloader. - */ -static void stm32f4_rcc_register_pll(const char *hse_clk, const char *hsi_clk) +enum { + PLL, + PLL_I2S, + PLL_SAI, +}; + +static const struct clk_div_table pll_divp_table[] = { + { 0, 2 }, { 1, 4 }, { 2, 6 }, { 3, 8 }, { 0 } +}; + +static const struct clk_div_table pll_divr_table[] = { + { 2, 2 }, { 3, 3 }, { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 }, { 0 } +}; + +struct stm32f4_pll { + spinlock_t *lock; + struct clk_gate gate; + u8 offset; + u8 bit_rdy_idx; + u8 status; + u8 n_start; +}; + +#define to_stm32f4_pll(_gate) container_of(_gate, struct stm32f4_pll, gate) + +struct stm32f4_pll_post_div_data { + int idx; + u8 pll_num; + const char *name; + const char *parent; + u8 flag; + u8 offset; + u8 shift; + u8 width; + u8 flag_div; + const struct clk_div_table *div_table; +}; + +struct stm32f4_vco_data { + const char *vco_name; + u8 offset; + u8 bit_idx; + u8 bit_rdy_idx; +}; + +static const struct stm32f4_vco_data vco_data[] = { + { "vco", STM32F4_RCC_PLLCFGR, 24, 25 }, + { "vco-i2s", STM32F4_RCC_PLLI2SCFGR, 26, 27 }, + { "vco-sai", STM32F4_RCC_PLLSAICFGR, 28, 29 }, +}; + + +static const struct clk_div_table post_divr_table[] = { + { 0, 2 }, { 1, 4 }, { 2, 8 }, { 3, 16 }, { 0 } +}; + +#define MAX_POST_DIV 3 +static const struct stm32f4_pll_post_div_data post_div_data[MAX_POST_DIV] = { + { CLK_I2SQ_PDIV, PLL_I2S, "plli2s-q-div", "plli2s-q", + CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 0, 5, 0, NULL}, + + { CLK_SAIQ_PDIV, PLL_SAI, "pllsai-q-div", "pllsai-q", + CLK_SET_RATE_PARENT, STM32F4_RCC_DCKCFGR, 8, 5, 0, NULL }, + + { NO_IDX, PLL_SAI, "pllsai-r-div", "pllsai-r", CLK_SET_RATE_PARENT, + STM32F4_RCC_DCKCFGR, 16, 2, 0, post_divr_table }, +}; + +struct stm32f4_div_data { + u8 shift; + u8 width; + u8 flag_div; + const struct clk_div_table *div_table; +}; + +#define MAX_PLL_DIV 3 +static const struct stm32f4_div_data div_data[MAX_PLL_DIV] = { + { 16, 2, 0, pll_divp_table }, + { 24, 4, CLK_DIVIDER_ONE_BASED, NULL }, + { 28, 3, 0, pll_divr_table }, +}; + +struct stm32f4_pll_data { + u8 pll_num; + u8 n_start; + const char *div_name[MAX_PLL_DIV]; +}; + +static const struct stm32f4_pll_data stm32f429_pll[MAX_PLL_DIV] = { + { PLL, 192, { "pll", "pll48", NULL } }, + { PLL_I2S, 192, { NULL, "plli2s-q", "plli2s-r" } }, + { PLL_SAI, 49, { NULL, "pllsai-q", "pllsai-r" } }, +}; + +static const struct stm32f4_pll_data stm32f469_pll[MAX_PLL_DIV] = { + { PLL, 50, { "pll", "pll-q", NULL } }, + { PLL_I2S, 50, { "plli2s-p", "plli2s-q", "plli2s-r" } }, + { PLL_SAI, 50, { "pllsai-p", "pllsai-q", "pllsai-r" } }, +}; + +static int stm32f4_pll_is_enabled(struct clk_hw *hw) +{ + return clk_gate_ops.is_enabled(hw); +} + +static int stm32f4_pll_enable(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + int ret = 0; + unsigned long reg; + + ret = clk_gate_ops.enable(hw); + + ret = readl_relaxed_poll_timeout_atomic(base + STM32F4_RCC_CR, reg, + reg & (1 << pll->bit_rdy_idx), 0, 10000); + + return ret; +} + +static void stm32f4_pll_disable(struct clk_hw *hw) +{ + clk_gate_ops.disable(hw); +} + +static unsigned long stm32f4_pll_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + unsigned long n; + + n = (readl(base + pll->offset) >> 6) & 0x1ff; + + return parent_rate * n; +} + +static long stm32f4_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + unsigned long n; + + n = rate / *prate; + + if (n < pll->n_start) + n = pll->n_start; + else if (n > 432) + n = 432; + + return *prate * n; +} + +static int stm32f4_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_gate *gate = to_clk_gate(hw); + struct stm32f4_pll *pll = to_stm32f4_pll(gate); + + unsigned long n; + unsigned long val; + int pll_state; + + pll_state = stm32f4_pll_is_enabled(hw); + + if (pll_state) + stm32f4_pll_disable(hw); + + n = rate / parent_rate; + + val = readl(base + pll->offset) & ~(0x1ff << 6); + + writel(val | ((n & 0x1ff) << 6), base + pll->offset); + + if (pll_state) + stm32f4_pll_enable(hw); + + return 0; +} + +static const struct clk_ops stm32f4_pll_gate_ops = { + .enable = stm32f4_pll_enable, + .disable = stm32f4_pll_disable, + .is_enabled = stm32f4_pll_is_enabled, + .recalc_rate = stm32f4_pll_recalc, + .round_rate = stm32f4_pll_round_rate, + .set_rate = stm32f4_pll_set_rate, +}; + +struct stm32f4_pll_div { + struct clk_divider div; + struct clk_hw *hw_pll; +}; + +#define to_pll_div_clk(_div) container_of(_div, struct stm32f4_pll_div, div) + +static unsigned long stm32f4_pll_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return clk_divider_ops.recalc_rate(hw, parent_rate); +} + +static long stm32f4_pll_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return clk_divider_ops.round_rate(hw, rate, prate); +} + +static int stm32f4_pll_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { - unsigned long pllcfgr = readl(base + STM32F4_RCC_PLLCFGR); + int pll_state, ret; + + struct clk_divider *div = to_clk_divider(hw); + struct stm32f4_pll_div *pll_div = to_pll_div_clk(div); + + pll_state = stm32f4_pll_is_enabled(pll_div->hw_pll); + + if (pll_state) + stm32f4_pll_disable(pll_div->hw_pll); + + ret = clk_divider_ops.set_rate(hw, rate, parent_rate); - unsigned long pllm = pllcfgr & 0x3f; - unsigned long plln = (pllcfgr >> 6) & 0x1ff; - unsigned long pllp = BIT(((pllcfgr >> 16) & 3) + 1); - const char *pllsrc = pllcfgr & BIT(22) ? hse_clk : hsi_clk; - unsigned long pllq = (pllcfgr >> 24) & 0xf; + if (pll_state) + stm32f4_pll_enable(pll_div->hw_pll); - clk_register_fixed_factor(NULL, "vco", pllsrc, 0, plln, pllm); - clk_register_fixed_factor(NULL, "pll", "vco", 0, 1, pllp); - clk_register_fixed_factor(NULL, "pll48", "vco", 0, 1, pllq); + return ret; +} + +static const struct clk_ops stm32f4_pll_div_ops = { + .recalc_rate = stm32f4_pll_div_recalc_rate, + .round_rate = stm32f4_pll_div_round_rate, + .set_rate = stm32f4_pll_div_set_rate, +}; + +static struct clk_hw *clk_register_pll_div(const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags, const struct clk_div_table *table, + struct clk_hw *pll_hw, spinlock_t *lock) +{ + struct stm32f4_pll_div *pll_div; + struct clk_hw *hw; + struct clk_init_data init; + int ret; + + /* allocate the divider */ + pll_div = kzalloc(sizeof(*pll_div), GFP_KERNEL); + if (!pll_div) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &stm32f4_pll_div_ops; + init.flags = flags; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + + /* struct clk_divider assignments */ + pll_div->div.reg = reg; + pll_div->div.shift = shift; + pll_div->div.width = width; + pll_div->div.flags = clk_divider_flags; + pll_div->div.lock = lock; + pll_div->div.table = table; + pll_div->div.hw.init = &init; + + pll_div->hw_pll = pll_hw; + + /* register the clock */ + hw = &pll_div->div.hw; + ret = clk_hw_register(NULL, hw); + if (ret) { + kfree(pll_div); + hw = ERR_PTR(ret); + } + + return hw; +} + +static struct clk_hw *stm32f4_rcc_register_pll(const char *pllsrc, + const struct stm32f4_pll_data *data, spinlock_t *lock) +{ + struct stm32f4_pll *pll; + struct clk_init_data init = { NULL }; + void __iomem *reg; + struct clk_hw *pll_hw; + int ret; + int i; + const struct stm32f4_vco_data *vco; + + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + vco = &vco_data[data->pll_num]; + + init.name = vco->vco_name; + init.ops = &stm32f4_pll_gate_ops; + init.flags = CLK_SET_RATE_GATE; + init.parent_names = &pllsrc; + init.num_parents = 1; + + pll->gate.lock = lock; + pll->gate.reg = base + STM32F4_RCC_CR; + pll->gate.bit_idx = vco->bit_idx; + pll->gate.hw.init = &init; + + pll->offset = vco->offset; + pll->n_start = data->n_start; + pll->bit_rdy_idx = vco->bit_rdy_idx; + pll->status = (readl(base + STM32F4_RCC_CR) >> vco->bit_idx) & 0x1; + + reg = base + pll->offset; + + pll_hw = &pll->gate.hw; + ret = clk_hw_register(NULL, pll_hw); + if (ret) { + kfree(pll); + return ERR_PTR(ret); + } + + for (i = 0; i < MAX_PLL_DIV; i++) + if (data->div_name[i]) + clk_register_pll_div(data->div_name[i], + vco->vco_name, + 0, + reg, + div_data[i].shift, + div_data[i].width, + div_data[i].flag_div, + div_data[i].div_table, + pll_hw, + lock); + return pll_hw; } /* @@ -611,22 +944,121 @@ static const char *rtc_parents[4] = { "no-clock", "lse", "lsi", "hse-rtc" }; +static const char *lcd_parent[1] = { "pllsai-r-div" }; + +static const char *i2s_parents[2] = { "plli2s-r", NULL }; + +static const char *sai_parents[4] = { "pllsai-q-div", "plli2s-q-div", NULL, + "no-clock" }; + +static const char *pll48_parents[2] = { "pll-q", "pllsai-p" }; + +static const char *sdmux_parents[2] = { "pll48", "sys" }; + +struct stm32_aux_clk { + int idx; + const char *name; + const char * const *parent_names; + int num_parents; + int offset_mux; + u8 shift; + u8 mask; + int offset_gate; + u8 bit_idx; + unsigned long flags; +}; + struct stm32f4_clk_data { const struct stm32f4_gate_data *gates_data; const u64 *gates_map; int gates_num; + const struct stm32f4_pll_data *pll_data; + const struct stm32_aux_clk *aux_clk; + int aux_clk_num; +}; + +static const struct stm32_aux_clk stm32f429_aux_clk[] = { + { + CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent), + NO_MUX, 0, 0, + STM32F4_RCC_APB2ENR, 26, + CLK_SET_RATE_PARENT + }, + { + CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents), + STM32F4_RCC_CFGR, 23, 1, + NO_GATE, 0, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI1, "sai1-a", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 20, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI2, "sai1-b", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 22, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, +}; + +static const struct stm32_aux_clk stm32f469_aux_clk[] = { + { + CLK_LCD, "lcd-tft", lcd_parent, ARRAY_SIZE(lcd_parent), + NO_MUX, 0, 0, + STM32F4_RCC_APB2ENR, 26, + CLK_SET_RATE_PARENT + }, + { + CLK_I2S, "i2s", i2s_parents, ARRAY_SIZE(i2s_parents), + STM32F4_RCC_CFGR, 23, 1, + NO_GATE, 0, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI1, "sai1-a", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 20, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, + { + CLK_SAI2, "sai1-b", sai_parents, ARRAY_SIZE(sai_parents), + STM32F4_RCC_DCKCFGR, 22, 3, + STM32F4_RCC_APB2ENR, 22, + CLK_SET_RATE_PARENT + }, + { + NO_IDX, "pll48", pll48_parents, ARRAY_SIZE(pll48_parents), + STM32F4_RCC_DCKCFGR, 27, 1, + NO_GATE, 0, + 0 + }, + { + NO_IDX, "sdmux", sdmux_parents, ARRAY_SIZE(sdmux_parents), + STM32F4_RCC_DCKCFGR, 28, 1, + NO_GATE, 0, + 0 + }, }; static const struct stm32f4_clk_data stm32f429_clk_data = { .gates_data = stm32f429_gates, .gates_map = stm32f42xx_gate_map, .gates_num = ARRAY_SIZE(stm32f429_gates), + .pll_data = stm32f429_pll, + .aux_clk = stm32f429_aux_clk, + .aux_clk_num = ARRAY_SIZE(stm32f429_aux_clk), }; static const struct stm32f4_clk_data stm32f469_clk_data = { .gates_data = stm32f469_gates, .gates_map = stm32f46xx_gate_map, .gates_num = ARRAY_SIZE(stm32f469_gates), + .pll_data = stm32f469_pll, + .aux_clk = stm32f469_aux_clk, + .aux_clk_num = ARRAY_SIZE(stm32f469_aux_clk), }; static const struct of_device_id stm32f4_of_match[] = { @@ -641,12 +1073,75 @@ static const struct of_device_id stm32f4_of_match[] = { {} }; +static struct clk_hw *stm32_register_aux_clk(const char *name, + const char * const *parent_names, int num_parents, + int offset_mux, u8 shift, u8 mask, + int offset_gate, u8 bit_idx, + unsigned long flags, spinlock_t *lock) +{ + struct clk_hw *hw; + struct clk_gate *gate; + struct clk_mux *mux = NULL; + struct clk_hw *mux_hw = NULL, *gate_hw = NULL; + const struct clk_ops *mux_ops = NULL, *gate_ops = NULL; + + if (offset_gate != NO_GATE) { + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) { + hw = ERR_PTR(-EINVAL); + goto fail; + } + + gate->reg = base + offset_gate; + gate->bit_idx = bit_idx; + gate->flags = 0; + gate->lock = lock; + gate_hw = &gate->hw; + gate_ops = &clk_gate_ops; + } + + if (offset_mux != NO_MUX) { + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) { + kfree(gate); + hw = ERR_PTR(-EINVAL); + goto fail; + } + + mux->reg = base + offset_mux; + mux->shift = shift; + mux->mask = mask; + mux->flags = 0; + mux_hw = &mux->hw; + mux_ops = &clk_mux_ops; + } + + if (mux_hw == NULL && gate_hw == NULL) + return ERR_PTR(-EINVAL); + + hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, + mux_hw, mux_ops, + NULL, NULL, + gate_hw, gate_ops, + flags); + + if (IS_ERR(hw)) { + kfree(gate); + kfree(mux); + } +fail: + return hw; +} + static void __init stm32f4_rcc_init(struct device_node *np) { - const char *hse_clk; + const char *hse_clk, *i2s_in_clk; int n; const struct of_device_id *match; const struct stm32f4_clk_data *data; + unsigned long pllcfgr; + const char *pllsrc; + unsigned long pllm; base = of_iomap(np, 0); if (!base) { @@ -675,9 +1170,49 @@ static void __init stm32f4_rcc_init(struct device_node *np) hse_clk = of_clk_get_parent_name(np, 0); + i2s_in_clk = of_clk_get_parent_name(np, 1); + + i2s_parents[1] = i2s_in_clk; + sai_parents[2] = i2s_in_clk; + clk_register_fixed_rate_with_accuracy(NULL, "hsi", NULL, 0, 16000000, 160000); - stm32f4_rcc_register_pll(hse_clk, "hsi"); + pllcfgr = readl(base + STM32F4_RCC_PLLCFGR); + pllsrc = pllcfgr & BIT(22) ? hse_clk : "hsi"; + pllm = pllcfgr & 0x3f; + + clk_hw_register_fixed_factor(NULL, "vco_in", pllsrc, + 0, 1, pllm); + + stm32f4_rcc_register_pll("vco_in", &data->pll_data[0], + &stm32f4_clk_lock); + + clks[PLL_VCO_I2S] = stm32f4_rcc_register_pll("vco_in", + &data->pll_data[1], &stm32f4_clk_lock); + + clks[PLL_VCO_SAI] = stm32f4_rcc_register_pll("vco_in", + &data->pll_data[2], &stm32f4_clk_lock); + + for (n = 0; n < MAX_POST_DIV; n++) { + const struct stm32f4_pll_post_div_data *post_div; + struct clk_hw *hw; + + post_div = &post_div_data[n]; + + hw = clk_register_pll_div(post_div->name, + post_div->parent, + post_div->flag, + base + post_div->offset, + post_div->shift, + post_div->width, + post_div->flag_div, + post_div->div_table, + clks[post_div->pll_num], + &stm32f4_clk_lock); + + if (post_div->idx != NO_IDX) + clks[post_div->idx] = hw; + } sys_parents[1] = hse_clk; clk_register_mux_table( @@ -762,11 +1297,33 @@ static void __init stm32f4_rcc_init(struct device_node *np) goto fail; } + for (n = 0; n < data->aux_clk_num; n++) { + const struct stm32_aux_clk *aux_clk; + struct clk_hw *hw; + + aux_clk = &data->aux_clk[n]; + + hw = stm32_register_aux_clk(aux_clk->name, + aux_clk->parent_names, aux_clk->num_parents, + aux_clk->offset_mux, aux_clk->shift, + aux_clk->mask, aux_clk->offset_gate, + aux_clk->bit_idx, aux_clk->flags, + &stm32f4_clk_lock); + + if (IS_ERR(hw)) { + pr_warn("Unable to register %s clk\n", aux_clk->name); + continue; + } + + if (aux_clk->idx != NO_IDX) + clks[aux_clk->idx] = hw; + } + of_clk_add_hw_provider(np, stm32f4_rcc_lookup_clk, NULL); return; fail: kfree(clks); iounmap(base); } -CLK_OF_DECLARE(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init); -CLK_OF_DECLARE(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init); +CLK_OF_DECLARE_DRIVER(stm32f42xx_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init); +CLK_OF_DECLARE_DRIVER(stm32f46xx_rcc, "st,stm32f469-rcc", stm32f4_rcc_init); diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c index 0621fbfb4beb..a47960aacfa5 100644 --- a/drivers/clk/clk-wm831x.c +++ b/drivers/clk/clk-wm831x.c @@ -97,7 +97,8 @@ static int wm831x_fll_prepare(struct clk_hw *hw) if (ret != 0) dev_crit(wm831x->dev, "Failed to enable FLL: %d\n", ret); - usleep_range(2000, 2000); + /* wait 2-3 ms for new frequency taking effect */ + usleep_range(2000, 3000); return ret; } diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index ed3a2df536ea..f1099167ba31 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -21,6 +21,9 @@ #define PLL_NUM_OFFSET 0x10 #define PLL_DENOM_OFFSET 0x20 +#define PLL_VF610_NUM_OFFSET 0x20 +#define PLL_VF610_DENOM_OFFSET 0x30 + #define BM_PLL_POWER (0x1 << 12) #define BM_PLL_LOCK (0x1 << 31) #define IMX7_ENET_PLL_POWER (0x1 << 5) @@ -300,6 +303,99 @@ static const struct clk_ops clk_pllv3_av_ops = { .set_rate = clk_pllv3_av_set_rate, }; +struct clk_pllv3_vf610_mf { + u32 mfi; /* integer part, can be 20 or 22 */ + u32 mfn; /* numerator, 30-bit value */ + u32 mfd; /* denominator, 30-bit value, must be less than mfn */ +}; + +static unsigned long clk_pllv3_vf610_mf_to_rate(unsigned long parent_rate, + struct clk_pllv3_vf610_mf mf) +{ + u64 temp64; + + temp64 = parent_rate; + temp64 *= mf.mfn; + do_div(temp64, mf.mfd); + + return (parent_rate * mf.mfi) + temp64; +} + +static struct clk_pllv3_vf610_mf clk_pllv3_vf610_rate_to_mf( + unsigned long parent_rate, unsigned long rate) +{ + struct clk_pllv3_vf610_mf mf; + u64 temp64; + + mf.mfi = (rate >= 22 * parent_rate) ? 22 : 20; + mf.mfd = 0x3fffffff; /* use max supported value for best accuracy */ + + if (rate <= parent_rate * mf.mfi) + mf.mfn = 0; + else if (rate >= parent_rate * (mf.mfi + 1)) + mf.mfn = mf.mfd - 1; + else { + /* rate = parent_rate * (mfi + mfn/mfd) */ + temp64 = rate - parent_rate * mf.mfi; + temp64 *= mf.mfd; + do_div(temp64, parent_rate); + mf.mfn = temp64; + } + + return mf; +} + +static unsigned long clk_pllv3_vf610_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_pllv3 *pll = to_clk_pllv3(hw); + struct clk_pllv3_vf610_mf mf; + + mf.mfn = readl_relaxed(pll->base + PLL_VF610_NUM_OFFSET); + mf.mfd = readl_relaxed(pll->base + PLL_VF610_DENOM_OFFSET); + mf.mfi = (readl_relaxed(pll->base) & pll->div_mask) ? 22 : 20; + + return clk_pllv3_vf610_mf_to_rate(parent_rate, mf); +} + +static long clk_pllv3_vf610_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_pllv3_vf610_mf mf = clk_pllv3_vf610_rate_to_mf(*prate, rate); + + return clk_pllv3_vf610_mf_to_rate(*prate, mf); +} + +static int clk_pllv3_vf610_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_pllv3 *pll = to_clk_pllv3(hw); + struct clk_pllv3_vf610_mf mf = + clk_pllv3_vf610_rate_to_mf(parent_rate, rate); + u32 val; + + val = readl_relaxed(pll->base); + if (mf.mfi == 20) + val &= ~pll->div_mask; /* clear bit for mfi=20 */ + else + val |= pll->div_mask; /* set bit for mfi=22 */ + writel_relaxed(val, pll->base); + + writel_relaxed(mf.mfn, pll->base + PLL_VF610_NUM_OFFSET); + writel_relaxed(mf.mfd, pll->base + PLL_VF610_DENOM_OFFSET); + + return clk_pllv3_wait_lock(pll); +} + +static const struct clk_ops clk_pllv3_vf610_ops = { + .prepare = clk_pllv3_prepare, + .unprepare = clk_pllv3_unprepare, + .is_prepared = clk_pllv3_is_prepared, + .recalc_rate = clk_pllv3_vf610_recalc_rate, + .round_rate = clk_pllv3_vf610_round_rate, + .set_rate = clk_pllv3_vf610_set_rate, +}; + static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -334,6 +430,9 @@ struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, case IMX_PLLV3_SYS: ops = &clk_pllv3_sys_ops; break; + case IMX_PLLV3_SYS_VF610: + ops = &clk_pllv3_vf610_ops; + break; case IMX_PLLV3_USB_VF610: pll->div_shift = 1; case IMX_PLLV3_USB: diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c index 0476353ab423..59b1863deb88 100644 --- a/drivers/clk/imx/clk-vf610.c +++ b/drivers/clk/imx/clk-vf610.c @@ -219,8 +219,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node) clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); - clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1); - clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1); + clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_SYS_VF610, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1); + clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_SYS_VF610, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1); clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB_VF610, "pll3", "pll3_bypass_src", PLL3_CTRL, 0x2); clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f); clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index 4afad3b96a61..e1f5e425db73 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -34,6 +34,7 @@ enum imx_pllv3_type { IMX_PLLV3_AV, IMX_PLLV3_ENET, IMX_PLLV3_ENET_IMX7, + IMX_PLLV3_SYS_VF610, }; struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name, diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 07e2cc6ed781..3487c267833e 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -462,8 +462,79 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { .num_clks = ARRAY_SIZE(msm8916_clks), }; +/* msm8974 */ +DEFINE_CLK_SMD_RPM(msm8974, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8974, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8974, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8974, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, QCOM_SMD_RPM_BUS_CLK, 3); +DEFINE_CLK_SMD_RPM(msm8974, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8974, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8974, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(msm8974, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d0, cxo_d0_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d1, cxo_d1_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a0, cxo_a0_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a1, cxo_a1_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a2, cxo_a2_a, 6); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, diff_clk, diff_a_clk, 7); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk1, div_a_clk1, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk2, div_a_clk2, 12); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d0_pin, cxo_d0_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d1_pin, cxo_d1_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6); + +static struct clk_smd_rpm *msm8974_clks[] = { + [RPM_SMD_PNOC_CLK] = &msm8974_pnoc_clk, + [RPM_SMD_PNOC_A_CLK] = &msm8974_pnoc_a_clk, + [RPM_SMD_SNOC_CLK] = &msm8974_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8974_snoc_a_clk, + [RPM_SMD_CNOC_CLK] = &msm8974_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &msm8974_cnoc_a_clk, + [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8974_mmssnoc_ahb_clk, + [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8974_mmssnoc_ahb_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8974_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8974_bimc_a_clk, + [RPM_SMD_OCMEMGX_CLK] = &msm8974_ocmemgx_clk, + [RPM_SMD_OCMEMGX_A_CLK] = &msm8974_ocmemgx_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8974_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8974_qdss_a_clk, + [RPM_SMD_CXO_D0] = &msm8974_cxo_d0, + [RPM_SMD_CXO_D0_A] = &msm8974_cxo_d0_a, + [RPM_SMD_CXO_D1] = &msm8974_cxo_d1, + [RPM_SMD_CXO_D1_A] = &msm8974_cxo_d1_a, + [RPM_SMD_CXO_A0] = &msm8974_cxo_a0, + [RPM_SMD_CXO_A0_A] = &msm8974_cxo_a0_a, + [RPM_SMD_CXO_A1] = &msm8974_cxo_a1, + [RPM_SMD_CXO_A1_A] = &msm8974_cxo_a1_a, + [RPM_SMD_CXO_A2] = &msm8974_cxo_a2, + [RPM_SMD_CXO_A2_A] = &msm8974_cxo_a2_a, + [RPM_SMD_DIFF_CLK] = &msm8974_diff_clk, + [RPM_SMD_DIFF_A_CLK] = &msm8974_diff_a_clk, + [RPM_SMD_DIV_CLK1] = &msm8974_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &msm8974_div_a_clk1, + [RPM_SMD_DIV_CLK2] = &msm8974_div_clk2, + [RPM_SMD_DIV_A_CLK2] = &msm8974_div_a_clk2, + [RPM_SMD_CXO_D0_PIN] = &msm8974_cxo_d0_pin, + [RPM_SMD_CXO_D0_A_PIN] = &msm8974_cxo_d0_a_pin, + [RPM_SMD_CXO_D1_PIN] = &msm8974_cxo_d1_pin, + [RPM_SMD_CXO_D1_A_PIN] = &msm8974_cxo_d1_a_pin, + [RPM_SMD_CXO_A0_PIN] = &msm8974_cxo_a0_pin, + [RPM_SMD_CXO_A0_A_PIN] = &msm8974_cxo_a0_a_pin, + [RPM_SMD_CXO_A1_PIN] = &msm8974_cxo_a1_pin, + [RPM_SMD_CXO_A1_A_PIN] = &msm8974_cxo_a1_a_pin, + [RPM_SMD_CXO_A2_PIN] = &msm8974_cxo_a2_pin, + [RPM_SMD_CXO_A2_A_PIN] = &msm8974_cxo_a2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8974 = { + .clks = msm8974_clks, + .num_clks = ARRAY_SIZE(msm8974_clks), +}; static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, + { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, { } }; MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c index 33d09138f5e5..46cb256b4aa2 100644 --- a/drivers/clk/qcom/gcc-ipq4019.c +++ b/drivers/clk/qcom/gcc-ipq4019.c @@ -20,6 +20,9 @@ #include <linux/clk-provider.h> #include <linux/regmap.h> #include <linux/reset-controller.h> +#include <linux/math64.h> +#include <linux/delay.h> +#include <linux/clk.h> #include <dt-bindings/clock/qcom,gcc-ipq4019.h> @@ -28,6 +31,13 @@ #include "clk-rcg.h" #include "clk-branch.h" #include "reset.h" +#include "clk-regmap-divider.h" + +#define to_clk_regmap_div(_hw) container_of(to_clk_regmap(_hw),\ + struct clk_regmap_div, clkr) + +#define to_clk_fepll(_hw) container_of(to_clk_regmap_div(_hw),\ + struct clk_fepll, cdiv) enum { P_XO, @@ -40,6 +50,41 @@ enum { P_DDRPLLAPSS, }; +/* + * struct clk_fepll_vco - vco feedback divider corresponds for FEPLL clocks + * @fdbkdiv_shift: lowest bit for FDBKDIV + * @fdbkdiv_width: number of bits in FDBKDIV + * @refclkdiv_shift: lowest bit for REFCLKDIV + * @refclkdiv_width: number of bits in REFCLKDIV + * @reg: PLL_DIV register address + */ +struct clk_fepll_vco { + u32 fdbkdiv_shift; + u32 fdbkdiv_width; + u32 refclkdiv_shift; + u32 refclkdiv_width; + u32 reg; +}; + +/* + * struct clk_fepll - clk divider corresponds to FEPLL clocks + * @fixed_div: fixed divider value if divider is fixed + * @parent_map: map from software's parent index to hardware's src_sel field + * @cdiv: divider values for PLL_DIV + * @pll_vco: vco feedback divider + * @div_table: mapping for actual divider value to register divider value + * in case of non fixed divider + * @freq_tbl: frequency table + */ +struct clk_fepll { + u32 fixed_div; + const u8 *parent_map; + struct clk_regmap_div cdiv; + const struct clk_fepll_vco *pll_vco; + const struct clk_div_table *div_table; + const struct freq_tbl *freq_tbl; +}; + static struct parent_map gcc_xo_200_500_map[] = { { P_XO, 0 }, { P_FEPLL200, 1 }, @@ -80,7 +125,7 @@ static struct parent_map gcc_xo_sdcc1_500_map[] = { static const char * const gcc_xo_sdcc1_500[] = { "xo", - "ddrpll", + "ddrpllsdcc", "fepll500", }; @@ -121,6 +166,12 @@ static struct parent_map gcc_xo_ddr_500_200_map[] = { { P_DDRPLLAPSS, 1 }, }; +/* + * Contains index for safe clock during APSS freq change. + * fepll500 is being used as safe clock so initialize it + * with its index in parents list gcc_xo_ddr_500_200. + */ +static const int gcc_ipq4019_cpu_safe_parent = 2; static const char * const gcc_xo_ddr_500_200[] = { "xo", "fepll200", @@ -505,7 +556,7 @@ static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk[] = { F(25000000, P_FEPLL500, 1, 1, 20), F(50000000, P_FEPLL500, 1, 1, 10), F(100000000, P_FEPLL500, 1, 1, 5), - F(193000000, P_DDRPLL, 1, 0, 0), + F(192000000, P_DDRPLL, 1, 0, 0), { } }; @@ -524,10 +575,20 @@ static struct clk_rcg2 sdcc1_apps_clk_src = { }; static const struct freq_tbl ftbl_gcc_apps_clk[] = { - F(48000000, P_XO, 1, 0, 0), + F(48000000, P_XO, 1, 0, 0), F(200000000, P_FEPLL200, 1, 0, 0), + F(384000000, P_DDRPLLAPSS, 1, 0, 0), + F(413000000, P_DDRPLLAPSS, 1, 0, 0), + F(448000000, P_DDRPLLAPSS, 1, 0, 0), + F(488000000, P_DDRPLLAPSS, 1, 0, 0), F(500000000, P_FEPLL500, 1, 0, 0), - F(626000000, P_DDRPLLAPSS, 1, 0, 0), + F(512000000, P_DDRPLLAPSS, 1, 0, 0), + F(537000000, P_DDRPLLAPSS, 1, 0, 0), + F(565000000, P_DDRPLLAPSS, 1, 0, 0), + F(597000000, P_DDRPLLAPSS, 1, 0, 0), + F(632000000, P_DDRPLLAPSS, 1, 0, 0), + F(672000000, P_DDRPLLAPSS, 1, 0, 0), + F(716000000, P_DDRPLLAPSS, 1, 0, 0), { } }; @@ -541,6 +602,7 @@ static struct clk_rcg2 apps_clk_src = { .parent_names = gcc_xo_ddr_500_200, .num_parents = 4, .ops = &clk_rcg2_ops, + .flags = CLK_SET_RATE_PARENT, }, }; @@ -1154,6 +1216,364 @@ static struct clk_branch gcc_wcss5g_rtc_clk = { }, }; +/* Calculates the VCO rate for FEPLL. */ +static u64 clk_fepll_vco_calc_rate(struct clk_fepll *pll_div, + unsigned long parent_rate) +{ + const struct clk_fepll_vco *pll_vco = pll_div->pll_vco; + u32 fdbkdiv, refclkdiv, cdiv; + u64 vco; + + regmap_read(pll_div->cdiv.clkr.regmap, pll_vco->reg, &cdiv); + refclkdiv = (cdiv >> pll_vco->refclkdiv_shift) & + (BIT(pll_vco->refclkdiv_width) - 1); + fdbkdiv = (cdiv >> pll_vco->fdbkdiv_shift) & + (BIT(pll_vco->fdbkdiv_width) - 1); + + vco = parent_rate / refclkdiv; + vco *= 2; + vco *= fdbkdiv; + + return vco; +} + +static const struct clk_fepll_vco gcc_apss_ddrpll_vco = { + .fdbkdiv_shift = 16, + .fdbkdiv_width = 8, + .refclkdiv_shift = 24, + .refclkdiv_width = 5, + .reg = 0x2e020, +}; + +static const struct clk_fepll_vco gcc_fepll_vco = { + .fdbkdiv_shift = 16, + .fdbkdiv_width = 8, + .refclkdiv_shift = 24, + .refclkdiv_width = 5, + .reg = 0x2f020, +}; + +/* + * Round rate function for APSS CPU PLL Clock divider. + * It looks up the frequency table and returns the next higher frequency + * supported in hardware. + */ +static long clk_cpu_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *p_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + struct clk_hw *p_hw; + const struct freq_tbl *f; + + f = qcom_find_freq(pll->freq_tbl, rate); + if (!f) + return -EINVAL; + + p_hw = clk_hw_get_parent_by_index(hw, f->src); + *p_rate = clk_hw_get_rate(p_hw); + + return f->freq; +}; + +/* + * Clock set rate function for APSS CPU PLL Clock divider. + * It looks up the frequency table and updates the PLL divider to corresponding + * divider value. + */ +static int clk_cpu_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + const struct freq_tbl *f; + u32 mask; + int ret; + + f = qcom_find_freq(pll->freq_tbl, rate); + if (!f) + return -EINVAL; + + mask = (BIT(pll->cdiv.width) - 1) << pll->cdiv.shift; + ret = regmap_update_bits(pll->cdiv.clkr.regmap, + pll->cdiv.reg, mask, + f->pre_div << pll->cdiv.shift); + /* + * There is no status bit which can be checked for successful CPU + * divider update operation so using delay for the same. + */ + udelay(1); + + return 0; +}; + +/* + * Clock frequency calculation function for APSS CPU PLL Clock divider. + * This clock divider is nonlinear so this function calculates the actual + * divider and returns the output frequency by dividing VCO Frequency + * with this actual divider value. + */ +static unsigned long +clk_cpu_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + u32 cdiv, pre_div; + u64 rate; + + regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv); + cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1); + + /* + * Some dividers have value in 0.5 fraction so multiply both VCO + * frequency(parent_rate) and pre_div with 2 to make integer + * calculation. + */ + if (cdiv > 10) + pre_div = (cdiv + 1) * 2; + else + pre_div = cdiv + 12; + + rate = clk_fepll_vco_calc_rate(pll, parent_rate) * 2; + do_div(rate, pre_div); + + return rate; +}; + +static const struct clk_ops clk_regmap_cpu_div_ops = { + .round_rate = clk_cpu_div_round_rate, + .set_rate = clk_cpu_div_set_rate, + .recalc_rate = clk_cpu_div_recalc_rate, +}; + +static const struct freq_tbl ftbl_apss_ddr_pll[] = { + { 384000000, P_XO, 0xd, 0, 0 }, + { 413000000, P_XO, 0xc, 0, 0 }, + { 448000000, P_XO, 0xb, 0, 0 }, + { 488000000, P_XO, 0xa, 0, 0 }, + { 512000000, P_XO, 0x9, 0, 0 }, + { 537000000, P_XO, 0x8, 0, 0 }, + { 565000000, P_XO, 0x7, 0, 0 }, + { 597000000, P_XO, 0x6, 0, 0 }, + { 632000000, P_XO, 0x5, 0, 0 }, + { 672000000, P_XO, 0x4, 0, 0 }, + { 716000000, P_XO, 0x3, 0, 0 }, + { 768000000, P_XO, 0x2, 0, 0 }, + { 823000000, P_XO, 0x1, 0, 0 }, + { 896000000, P_XO, 0x0, 0, 0 }, + { } +}; + +static struct clk_fepll gcc_apss_cpu_plldiv_clk = { + .cdiv.reg = 0x2e020, + .cdiv.shift = 4, + .cdiv.width = 4, + .cdiv.clkr = { + .enable_reg = 0x2e000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "ddrpllapss", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_regmap_cpu_div_ops, + }, + }, + .freq_tbl = ftbl_apss_ddr_pll, + .pll_vco = &gcc_apss_ddrpll_vco, +}; + +/* Calculates the rate for PLL divider. + * If the divider value is not fixed then it gets the actual divider value + * from divider table. Then, it calculate the clock rate by dividing the + * parent rate with actual divider value. + */ +static unsigned long +clk_regmap_clk_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_fepll *pll = to_clk_fepll(hw); + u32 cdiv, pre_div = 1; + u64 rate; + const struct clk_div_table *clkt; + + if (pll->fixed_div) { + pre_div = pll->fixed_div; + } else { + regmap_read(pll->cdiv.clkr.regmap, pll->cdiv.reg, &cdiv); + cdiv = (cdiv >> pll->cdiv.shift) & (BIT(pll->cdiv.width) - 1); + + for (clkt = pll->div_table; clkt->div; clkt++) { + if (clkt->val == cdiv) + pre_div = clkt->div; + } + } + + rate = clk_fepll_vco_calc_rate(pll, parent_rate); + do_div(rate, pre_div); + + return rate; +}; + +static const struct clk_ops clk_fepll_div_ops = { + .recalc_rate = clk_regmap_clk_div_recalc_rate, +}; + +static struct clk_fepll gcc_apss_sdcc_clk = { + .fixed_div = 28, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "ddrpllsdcc", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_apss_ddrpll_vco, +}; + +static struct clk_fepll gcc_fepll125_clk = { + .fixed_div = 32, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll125", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepll125dly_clk = { + .fixed_div = 32, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll125dly", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepll200_clk = { + .fixed_div = 20, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll200", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepll500_clk = { + .fixed_div = 8, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepll500", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .pll_vco = &gcc_fepll_vco, +}; + +static const struct clk_div_table fepllwcss_clk_div_table[] = { + { 0, 15 }, + { 1, 16 }, + { 2, 18 }, + { 3, 20 }, + { }, +}; + +static struct clk_fepll gcc_fepllwcss2g_clk = { + .cdiv.reg = 0x2f020, + .cdiv.shift = 8, + .cdiv.width = 2, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepllwcss2g", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .div_table = fepllwcss_clk_div_table, + .pll_vco = &gcc_fepll_vco, +}; + +static struct clk_fepll gcc_fepllwcss5g_clk = { + .cdiv.reg = 0x2f020, + .cdiv.shift = 12, + .cdiv.width = 2, + .cdiv.clkr = { + .hw.init = &(struct clk_init_data){ + .name = "fepllwcss5g", + .parent_names = (const char *[]){ + "xo", + }, + .num_parents = 1, + .ops = &clk_fepll_div_ops, + }, + }, + .div_table = fepllwcss_clk_div_table, + .pll_vco = &gcc_fepll_vco, +}; + +static const struct freq_tbl ftbl_gcc_pcnoc_ahb_clk[] = { + F(48000000, P_XO, 1, 0, 0), + F(100000000, P_FEPLL200, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcnoc_ahb_clk_src = { + .cmd_rcgr = 0x21024, + .hid_width = 5, + .parent_map = gcc_xo_200_500_map, + .freq_tbl = ftbl_gcc_pcnoc_ahb_clk, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcnoc_ahb_clk_src", + .parent_names = gcc_xo_200_500, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch pcnoc_clk_src = { + .halt_reg = 0x21030, + .clkr = { + .enable_reg = 0x21030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "pcnoc_clk_src", + .parent_names = (const char *[]){ + "gcc_pcnoc_ahb_clk_src", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + .flags = CLK_SET_RATE_PARENT | + CLK_IS_CRITICAL, + }, + }, +}; + static struct clk_regmap *gcc_ipq4019_clocks[] = { [AUDIO_CLK_SRC] = &audio_clk_src.clkr, [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr, @@ -1214,6 +1634,16 @@ static struct clk_regmap *gcc_ipq4019_clocks[] = { [GCC_WCSS5G_CLK] = &gcc_wcss5g_clk.clkr, [GCC_WCSS5G_REF_CLK] = &gcc_wcss5g_ref_clk.clkr, [GCC_WCSS5G_RTC_CLK] = &gcc_wcss5g_rtc_clk.clkr, + [GCC_SDCC_PLLDIV_CLK] = &gcc_apss_sdcc_clk.cdiv.clkr, + [GCC_FEPLL125_CLK] = &gcc_fepll125_clk.cdiv.clkr, + [GCC_FEPLL125DLY_CLK] = &gcc_fepll125dly_clk.cdiv.clkr, + [GCC_FEPLL200_CLK] = &gcc_fepll200_clk.cdiv.clkr, + [GCC_FEPLL500_CLK] = &gcc_fepll500_clk.cdiv.clkr, + [GCC_FEPLL_WCSS2G_CLK] = &gcc_fepllwcss2g_clk.cdiv.clkr, + [GCC_FEPLL_WCSS5G_CLK] = &gcc_fepllwcss5g_clk.cdiv.clkr, + [GCC_APSS_CPU_PLLDIV_CLK] = &gcc_apss_cpu_plldiv_clk.cdiv.clkr, + [GCC_PCNOC_AHB_CLK_SRC] = &gcc_pcnoc_ahb_clk_src.clkr, + [GCC_PCNOC_AHB_CLK] = &pcnoc_clk_src.clkr, }; static const struct qcom_reset_map gcc_ipq4019_resets[] = { @@ -1294,7 +1724,7 @@ static const struct regmap_config gcc_ipq4019_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .max_register = 0x2dfff, + .max_register = 0x2ffff, .fast_io = true, }; @@ -1312,23 +1742,44 @@ static const struct of_device_id gcc_ipq4019_match_table[] = { }; MODULE_DEVICE_TABLE(of, gcc_ipq4019_match_table); +static int +gcc_ipq4019_cpu_clk_notifier_fn(struct notifier_block *nb, + unsigned long action, void *data) +{ + int err = 0; + + if (action == PRE_RATE_CHANGE) + err = clk_rcg2_ops.set_parent(&apps_clk_src.clkr.hw, + gcc_ipq4019_cpu_safe_parent); + + return notifier_from_errno(err); +} + +static struct notifier_block gcc_ipq4019_cpu_clk_notifier = { + .notifier_call = gcc_ipq4019_cpu_clk_notifier_fn, +}; + static int gcc_ipq4019_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; + int err; - clk_register_fixed_rate(dev, "fepll125", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepll125dly", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepllwcss2g", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepllwcss5g", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepll200", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "fepll500", "xo", 0, 200000000); - clk_register_fixed_rate(dev, "ddrpllapss", "xo", 0, 666000000); + err = qcom_cc_probe(pdev, &gcc_ipq4019_desc); + if (err) + return err; - return qcom_cc_probe(pdev, &gcc_ipq4019_desc); + return clk_notifier_register(apps_clk_src.clkr.hw.clk, + &gcc_ipq4019_cpu_clk_notifier); +} + +static int gcc_ipq4019_remove(struct platform_device *pdev) +{ + return clk_notifier_unregister(apps_clk_src.clkr.hw.clk, + &gcc_ipq4019_cpu_clk_notifier); } static struct platform_driver gcc_ipq4019_driver = { .probe = gcc_ipq4019_probe, + .remove = gcc_ipq4019_remove, .driver = { .name = "qcom,gcc-ipq4019", .of_match_table = gcc_ipq4019_match_table, diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c index 4b1fc1730d29..8abc200d4fd3 100644 --- a/drivers/clk/qcom/gcc-msm8996.c +++ b/drivers/clk/qcom/gcc-msm8996.c @@ -3448,6 +3448,7 @@ static const struct qcom_reset_map gcc_msm8996_resets[] = { [GCC_MSMPU_BCR] = { 0x8d000 }, [GCC_MSS_Q6_BCR] = { 0x8e000 }, [GCC_QREFS_VBG_CAL_BCR] = { 0x88020 }, + [GCC_MSS_RESTART] = { 0x8f008 }, }; static const struct regmap_config gcc_msm8996_regmap_config = { diff --git a/drivers/clk/renesas/clk-mstp.c b/drivers/clk/renesas/clk-mstp.c index 9375777776d9..b533f99550e1 100644 --- a/drivers/clk/renesas/clk-mstp.c +++ b/drivers/clk/renesas/clk-mstp.c @@ -37,12 +37,14 @@ * @smstpcr: module stop control register * @mstpsr: module stop status register (optional) * @lock: protects writes to SMSTPCR + * @width_8bit: registers are 8-bit, not 32-bit */ struct mstp_clock_group { struct clk_onecell_data data; void __iomem *smstpcr; void __iomem *mstpsr; spinlock_t lock; + bool width_8bit; }; /** @@ -59,6 +61,18 @@ struct mstp_clock { #define to_mstp_clock(_hw) container_of(_hw, struct mstp_clock, hw) +static inline u32 cpg_mstp_read(struct mstp_clock_group *group, + u32 __iomem *reg) +{ + return group->width_8bit ? readb(reg) : clk_readl(reg); +} + +static inline void cpg_mstp_write(struct mstp_clock_group *group, u32 val, + u32 __iomem *reg) +{ + group->width_8bit ? writeb(val, reg) : clk_writel(val, reg); +} + static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) { struct mstp_clock *clock = to_mstp_clock(hw); @@ -70,12 +84,12 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) spin_lock_irqsave(&group->lock, flags); - value = clk_readl(group->smstpcr); + value = cpg_mstp_read(group, group->smstpcr); if (enable) value &= ~bitmask; else value |= bitmask; - clk_writel(value, group->smstpcr); + cpg_mstp_write(group, value, group->smstpcr); spin_unlock_irqrestore(&group->lock, flags); @@ -83,7 +97,7 @@ static int cpg_mstp_clock_endisable(struct clk_hw *hw, bool enable) return 0; for (i = 1000; i > 0; --i) { - if (!(clk_readl(group->mstpsr) & bitmask)) + if (!(cpg_mstp_read(group, group->mstpsr) & bitmask)) break; cpu_relax(); } @@ -114,9 +128,9 @@ static int cpg_mstp_clock_is_enabled(struct clk_hw *hw) u32 value; if (group->mstpsr) - value = clk_readl(group->mstpsr); + value = cpg_mstp_read(group, group->mstpsr); else - value = clk_readl(group->smstpcr); + value = cpg_mstp_read(group, group->smstpcr); return !(value & BIT(clock->bit_index)); } @@ -188,6 +202,9 @@ static void __init cpg_mstp_clocks_init(struct device_node *np) return; } + if (of_device_is_compatible(np, "renesas,r7s72100-mstp-clocks")) + group->width_8bit = true; + for (i = 0; i < MSTP_MAX_CLOCKS; ++i) clks[i] = ERR_PTR(-ENOENT); diff --git a/drivers/clk/zte/clk-zx296718.c b/drivers/clk/zte/clk-zx296718.c index 707d62956e9b..ad5d1dfb3682 100644 --- a/drivers/clk/zte/clk-zx296718.c +++ b/drivers/clk/zte/clk-zx296718.c @@ -610,9 +610,12 @@ static int __init top_clocks_init(struct device_node *np) } } - if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &top_hw_onecell_data)) - panic("could not register clk provider\n"); - pr_info("top clk init over, nr:%d\n", TOP_NR_CLKS); + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &top_hw_onecell_data); + if (ret) { + pr_err("failed to register top clk provider: %d\n", ret); + return ret; + } return 0; } @@ -776,9 +779,12 @@ static int __init lsp0_clocks_init(struct device_node *np) } } - if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &lsp0_hw_onecell_data)) - panic("could not register clk provider\n"); - pr_info("lsp0-clk init over:%d\n", LSP0_NR_CLKS); + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &lsp0_hw_onecell_data); + if (ret) { + pr_err("failed to register lsp0 clk provider: %d\n", ret); + return ret; + } return 0; } @@ -881,9 +887,138 @@ static int __init lsp1_clocks_init(struct device_node *np) } } - if (of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &lsp1_hw_onecell_data)) - panic("could not register clk provider\n"); - pr_info("lsp1-clk init over, nr:%d\n", LSP1_NR_CLKS); + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &lsp1_hw_onecell_data); + if (ret) { + pr_err("failed to register lsp1 clk provider: %d\n", ret); + return ret; + } + + return 0; +} + +PNAME(audio_wclk_common_p) = { + "audio_99m", + "audio_24m", +}; + +PNAME(audio_timer_p) = { + "audio_24m", + "audio_32k", +}; + +static struct zx_clk_mux audio_mux_clk[] = { + MUX(0, "i2s0_wclk_mux", audio_wclk_common_p, AUDIO_I2S0_CLK, 0, 1), + MUX(0, "i2s1_wclk_mux", audio_wclk_common_p, AUDIO_I2S1_CLK, 0, 1), + MUX(0, "i2s2_wclk_mux", audio_wclk_common_p, AUDIO_I2S2_CLK, 0, 1), + MUX(0, "i2s3_wclk_mux", audio_wclk_common_p, AUDIO_I2S3_CLK, 0, 1), + MUX(0, "i2c0_wclk_mux", audio_wclk_common_p, AUDIO_I2C0_CLK, 0, 1), + MUX(0, "spdif0_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF0_CLK, 0, 1), + MUX(0, "spdif1_wclk_mux", audio_wclk_common_p, AUDIO_SPDIF1_CLK, 0, 1), + MUX(0, "timer_wclk_mux", audio_timer_p, AUDIO_TIMER_CLK, 0, 1), +}; + +static struct clk_zx_audio_divider audio_adiv_clk[] = { + AUDIO_DIV(0, "i2s0_wclk_div", "i2s0_wclk_mux", AUDIO_I2S0_DIV_CFG1), + AUDIO_DIV(0, "i2s1_wclk_div", "i2s1_wclk_mux", AUDIO_I2S1_DIV_CFG1), + AUDIO_DIV(0, "i2s2_wclk_div", "i2s2_wclk_mux", AUDIO_I2S2_DIV_CFG1), + AUDIO_DIV(0, "i2s3_wclk_div", "i2s3_wclk_mux", AUDIO_I2S3_DIV_CFG1), + AUDIO_DIV(0, "spdif0_wclk_div", "spdif0_wclk_mux", AUDIO_SPDIF0_DIV_CFG1), + AUDIO_DIV(0, "spdif1_wclk_div", "spdif1_wclk_mux", AUDIO_SPDIF1_DIV_CFG1), +}; + +static struct zx_clk_div audio_div_clk[] = { + DIV_T(0, "tdm_wclk_div", "audio_16m384", AUDIO_TDM_CLK, 8, 4, 0, common_div_table), +}; + +static struct zx_clk_gate audio_gate_clk[] = { + GATE(AUDIO_I2S0_WCLK, "i2s0_wclk", "i2s0_wclk_div", AUDIO_I2S0_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2S1_WCLK, "i2s1_wclk", "i2s1_wclk_div", AUDIO_I2S1_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2S2_WCLK, "i2s2_wclk", "i2s2_wclk_div", AUDIO_I2S2_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2S3_WCLK, "i2s3_wclk", "i2s3_wclk_div", AUDIO_I2S3_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_I2C0_WCLK, "i2c0_wclk", "i2c0_wclk_mux", AUDIO_I2C0_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_SPDIF0_WCLK, "spdif0_wclk", "spdif0_wclk_div", AUDIO_SPDIF0_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_SPDIF1_WCLK, "spdif1_wclk", "spdif1_wclk_div", AUDIO_SPDIF1_CLK, 9, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_TDM_WCLK, "tdm_wclk", "tdm_wclk_div", AUDIO_TDM_CLK, 17, CLK_SET_RATE_PARENT, 0), + GATE(AUDIO_TS_PCLK, "tempsensor_pclk", "clk49m5", AUDIO_TS_CLK, 1, 0, 0), +}; + +static struct clk_hw_onecell_data audio_hw_onecell_data = { + .num = AUDIO_NR_CLKS, + .hws = { + [AUDIO_NR_CLKS - 1] = NULL, + }, +}; + +static int __init audio_clocks_init(struct device_node *np) +{ + void __iomem *reg_base; + int i, ret; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_err("%s: Unable to map audio clk base\n", __func__); + return -ENXIO; + } + + for (i = 0; i < ARRAY_SIZE(audio_mux_clk); i++) { + if (audio_mux_clk[i].id) + audio_hw_onecell_data.hws[audio_mux_clk[i].id] = + &audio_mux_clk[i].mux.hw; + + audio_mux_clk[i].mux.reg += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_mux_clk[i].mux.hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_mux_clk[i].mux.hw.init->name); + } + } + + for (i = 0; i < ARRAY_SIZE(audio_adiv_clk); i++) { + if (audio_adiv_clk[i].id) + audio_hw_onecell_data.hws[audio_adiv_clk[i].id] = + &audio_adiv_clk[i].hw; + + audio_adiv_clk[i].reg_base += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_adiv_clk[i].hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_adiv_clk[i].hw.init->name); + } + } + + for (i = 0; i < ARRAY_SIZE(audio_div_clk); i++) { + if (audio_div_clk[i].id) + audio_hw_onecell_data.hws[audio_div_clk[i].id] = + &audio_div_clk[i].div.hw; + + audio_div_clk[i].div.reg += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_div_clk[i].div.hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_div_clk[i].div.hw.init->name); + } + } + + for (i = 0; i < ARRAY_SIZE(audio_gate_clk); i++) { + if (audio_gate_clk[i].id) + audio_hw_onecell_data.hws[audio_gate_clk[i].id] = + &audio_gate_clk[i].gate.hw; + + audio_gate_clk[i].gate.reg += (uintptr_t)reg_base; + ret = clk_hw_register(NULL, &audio_gate_clk[i].gate.hw); + if (ret) { + pr_warn("audio clk %s init error!\n", + audio_gate_clk[i].gate.hw.init->name); + } + } + + ret = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, + &audio_hw_onecell_data); + if (ret) { + pr_err("failed to register audio clk provider: %d\n", ret); + return ret; + } return 0; } @@ -892,6 +1027,7 @@ static const struct of_device_id zx_clkc_match_table[] = { { .compatible = "zte,zx296718-topcrm", .data = &top_clocks_init }, { .compatible = "zte,zx296718-lsp0crm", .data = &lsp0_clocks_init }, { .compatible = "zte,zx296718-lsp1crm", .data = &lsp1_clocks_init }, + { .compatible = "zte,zx296718-audiocrm", .data = &audio_clocks_init }, { } }; diff --git a/drivers/clk/zte/clk.c b/drivers/clk/zte/clk.c index c4c1251bc1e7..878d879b23ff 100644 --- a/drivers/clk/zte/clk.c +++ b/drivers/clk/zte/clk.c @@ -9,6 +9,7 @@ #include <linux/clk-provider.h> #include <linux/err.h> +#include <linux/gcd.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/slab.h> @@ -310,3 +311,129 @@ struct clk *clk_register_zx_audio(const char *name, return clk; } + +#define CLK_AUDIO_DIV_FRAC BIT(0) +#define CLK_AUDIO_DIV_INT BIT(1) +#define CLK_AUDIO_DIV_UNCOMMON BIT(1) + +#define CLK_AUDIO_DIV_FRAC_NSHIFT 16 +#define CLK_AUDIO_DIV_INT_FRAC_RE BIT(16) +#define CLK_AUDIO_DIV_INT_FRAC_MAX (0xffff) +#define CLK_AUDIO_DIV_INT_FRAC_MIN (0x2) +#define CLK_AUDIO_DIV_INT_INT_SHIFT 24 +#define CLK_AUDIO_DIV_INT_INT_WIDTH 4 + +struct zx_clk_audio_div_table { + unsigned long rate; + unsigned int int_reg; + unsigned int frac_reg; +}; + +#define to_clk_zx_audio_div(_hw) container_of(_hw, struct clk_zx_audio_divider, hw) + +static unsigned long audio_calc_rate(struct clk_zx_audio_divider *audio_div, + u32 reg_frac, u32 reg_int, + unsigned long parent_rate) +{ + unsigned long rate, m, n; + + m = reg_frac & 0xffff; + n = (reg_frac >> 16) & 0xffff; + + m = (reg_int & 0xffff) * n + m; + rate = (parent_rate * n) / m; + + return rate; +} + +static void audio_calc_reg(struct clk_zx_audio_divider *audio_div, + struct zx_clk_audio_div_table *div_table, + unsigned long rate, unsigned long parent_rate) +{ + unsigned int reg_int, reg_frac; + unsigned long m, n, div; + + reg_int = parent_rate / rate; + + if (reg_int > CLK_AUDIO_DIV_INT_FRAC_MAX) + reg_int = CLK_AUDIO_DIV_INT_FRAC_MAX; + else if (reg_int < CLK_AUDIO_DIV_INT_FRAC_MIN) + reg_int = 0; + m = parent_rate - rate * reg_int; + n = rate; + + div = gcd(m, n); + m = m / div; + n = n / div; + + if ((m >> 16) || (n >> 16)) { + if (m > n) { + n = n * 0xffff / m; + m = 0xffff; + } else { + m = m * 0xffff / n; + n = 0xffff; + } + } + reg_frac = m | (n << 16); + + div_table->rate = parent_rate * n / (reg_int * n + m); + div_table->int_reg = reg_int; + div_table->frac_reg = reg_frac; +} + +static unsigned long zx_audio_div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw); + u32 reg_frac, reg_int; + + reg_frac = readl_relaxed(zx_audio_div->reg_base); + reg_int = readl_relaxed(zx_audio_div->reg_base + 0x4); + + return audio_calc_rate(zx_audio_div, reg_frac, reg_int, parent_rate); +} + +static long zx_audio_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw); + struct zx_clk_audio_div_table divt; + + audio_calc_reg(zx_audio_div, &divt, rate, *prate); + + return audio_calc_rate(zx_audio_div, divt.frac_reg, divt.int_reg, *prate); +} + +static int zx_audio_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_zx_audio_divider *zx_audio_div = to_clk_zx_audio_div(hw); + struct zx_clk_audio_div_table divt; + unsigned int val; + + audio_calc_reg(zx_audio_div, &divt, rate, parent_rate); + if (divt.rate != rate) + pr_debug("the real rate is:%ld", divt.rate); + + writel_relaxed(divt.frac_reg, zx_audio_div->reg_base); + + val = readl_relaxed(zx_audio_div->reg_base + 0x4); + val &= ~0xffff; + val |= divt.int_reg | CLK_AUDIO_DIV_INT_FRAC_RE; + writel_relaxed(val, zx_audio_div->reg_base + 0x4); + + mdelay(1); + + val = readl_relaxed(zx_audio_div->reg_base + 0x4); + val &= ~CLK_AUDIO_DIV_INT_FRAC_RE; + writel_relaxed(val, zx_audio_div->reg_base + 0x4); + + return 0; +} + +const struct clk_ops zx_audio_div_ops = { + .recalc_rate = zx_audio_div_recalc_rate, + .round_rate = zx_audio_div_round_rate, + .set_rate = zx_audio_div_set_rate, +}; diff --git a/drivers/clk/zte/clk.h b/drivers/clk/zte/clk.h index 0df3474b2cf3..84a55a3e2bd4 100644 --- a/drivers/clk/zte/clk.h +++ b/drivers/clk/zte/clk.h @@ -153,6 +153,25 @@ struct zx_clk_div { .id = _id, \ } +struct clk_zx_audio_divider { + struct clk_hw hw; + void __iomem *reg_base; + unsigned int rate_count; + spinlock_t *lock; + u16 id; +}; + +#define AUDIO_DIV(_id, _name, _parent, _reg) \ +{ \ + .reg_base = (void __iomem *) _reg, \ + .lock = &clk_lock, \ + .hw.init = CLK_HW_INIT(_name, \ + _parent, \ + &zx_audio_div_ops, \ + 0), \ + .id = _id, \ +} + struct clk *clk_register_zx_pll(const char *name, const char *parent_name, unsigned long flags, void __iomem *reg_base, const struct zx_pll_config *lookup_table, int count, spinlock_t *lock); @@ -167,4 +186,6 @@ struct clk *clk_register_zx_audio(const char *name, unsigned long flags, void __iomem *reg_base); extern const struct clk_ops zx_pll_ops; +extern const struct clk_ops zx_audio_div_ops; + #endif diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h index a768da7138a1..b7872f62f674 100644 --- a/drivers/crypto/marvell/cesa.h +++ b/drivers/crypto/marvell/cesa.h @@ -273,7 +273,8 @@ struct mv_cesa_op_ctx { #define CESA_TDMA_SRC_IN_SRAM BIT(30) #define CESA_TDMA_END_OF_REQ BIT(29) #define CESA_TDMA_BREAK_CHAIN BIT(28) -#define CESA_TDMA_TYPE_MSK GENMASK(27, 0) +#define CESA_TDMA_SET_STATE BIT(27) +#define CESA_TDMA_TYPE_MSK GENMASK(26, 0) #define CESA_TDMA_DUMMY 0 #define CESA_TDMA_DATA 1 #define CESA_TDMA_OP 2 diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c index 317cf029c0cf..77c0fb936f47 100644 --- a/drivers/crypto/marvell/hash.c +++ b/drivers/crypto/marvell/hash.c @@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req) sreq->offset = 0; } +static void mv_cesa_ahash_dma_step(struct ahash_request *req) +{ + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); + struct mv_cesa_req *base = &creq->base; + + /* We must explicitly set the digest state. */ + if (base->chain.first->flags & CESA_TDMA_SET_STATE) { + struct mv_cesa_engine *engine = base->engine; + int i; + + /* Set the hash state in the IVDIG regs. */ + for (i = 0; i < ARRAY_SIZE(creq->state); i++) + writel_relaxed(creq->state[i], engine->regs + + CESA_IVDIG(i)); + } + + mv_cesa_dma_step(base); +} + static void mv_cesa_ahash_step(struct crypto_async_request *req) { struct ahash_request *ahashreq = ahash_request_cast(req); struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq); if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) - mv_cesa_dma_step(&creq->base); + mv_cesa_ahash_dma_step(ahashreq); else mv_cesa_ahash_std_step(ahashreq); } @@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) struct mv_cesa_ahash_dma_iter iter; struct mv_cesa_op_ctx *op = NULL; unsigned int frag_len; + bool set_state = false; int ret; u32 type; basereq->chain.first = NULL; basereq->chain.last = NULL; + if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl)) + set_state = true; + if (creq->src_nents) { ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE); @@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req) if (type != CESA_TDMA_RESULT) basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN; + if (set_state) { + /* + * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to + * let the step logic know that the IVDIG registers should be + * explicitly set before launching a TDMA chain. + */ + basereq->chain.first->flags |= CESA_TDMA_SET_STATE; + } + return 0; err_free_tdma: diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c index 4416b88eca70..c76375ff376d 100644 --- a/drivers/crypto/marvell/tdma.c +++ b/drivers/crypto/marvell/tdma.c @@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, last->next = dreq->chain.first; engine->chain.last = dreq->chain.last; - if (!(last->flags & CESA_TDMA_BREAK_CHAIN)) + /* + * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on + * the last element of the current chain, or if the request + * being queued needs the IV regs to be set before lauching + * the request. + */ + if (!(last->flags & CESA_TDMA_BREAK_CHAIN) && + !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) last->next_dma = dreq->chain.first->cur_dma; } } diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index cbeea915f026..8037426ec50f 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work) DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); + korina_free_ring(dev); + if (korina_init(dev) < 0) { printk(KERN_ERR "%s: cannot restart device\n", dev->name); return; @@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev) tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR; writel(tmp, &lp->rx_dma_regs->dmasm); - korina_free_ring(dev); - napi_disable(&lp->napi); cancel_work_sync(&lp->restart_task); + korina_free_ring(dev); + free_irq(lp->rx_irq, dev); free_irq(lp->tx_irq, dev); free_irq(lp->ovr_irq, dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bcd955339058..edbe200ac2fa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1638,7 +1638,8 @@ int mlx4_en_start_port(struct net_device *dev) /* Configure tx cq's and rings */ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) { - u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1; + u8 num_tx_rings_p_up = t == TX ? + priv->num_tx_rings_p_up : priv->tx_ring_num[t]; for (i = 0; i < priv->tx_ring_num[t]; i++) { /* Configure cq */ diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f9b97f5946f8..44389c90056a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -326,6 +326,7 @@ enum cfg_version { static const struct pci_device_id rtl8169_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index fda01f770eff..b0344c213752 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -116,7 +116,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, unsigned int mii_address = priv->hw->mii.addr; unsigned int mii_data = priv->hw->mii.data; - u32 value = MII_WRITE | MII_BUSY; + u32 value = MII_BUSY; value |= (phyaddr << priv->hw->mii.addr_shift) & priv->hw->mii.addr_mask; @@ -126,6 +126,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg, & priv->hw->mii.clk_csr_mask; if (priv->plat->has_gmac4) value |= MII_GMAC4_WRITE; + else + value |= MII_WRITE; /* Wait until any existing MII operation is complete */ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address)) diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 031093e1c25f..dbfbb33ac66c 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h @@ -99,6 +99,11 @@ struct ipvl_port { int count; }; +struct ipvl_skb_cb { + bool tx_pkt; +}; +#define IPVL_SKB_CB(_skb) ((struct ipvl_skb_cb *)&((_skb)->cb[0])) + static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) { return rcu_dereference(d->rx_handler_data); diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index b4e990743e1d..83ce74acf82d 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -198,7 +198,7 @@ void ipvlan_process_multicast(struct work_struct *work) unsigned int mac_hash; int ret; u8 pkt_type; - bool hlocal, dlocal; + bool tx_pkt; __skb_queue_head_init(&list); @@ -207,8 +207,11 @@ void ipvlan_process_multicast(struct work_struct *work) spin_unlock_bh(&port->backlog.lock); while ((skb = __skb_dequeue(&list)) != NULL) { + struct net_device *dev = skb->dev; + bool consumed = false; + ethh = eth_hdr(skb); - hlocal = ether_addr_equal(ethh->h_source, port->dev->dev_addr); + tx_pkt = IPVL_SKB_CB(skb)->tx_pkt; mac_hash = ipvlan_mac_hash(ethh->h_dest); if (ether_addr_equal(ethh->h_dest, port->dev->broadcast)) @@ -216,41 +219,45 @@ void ipvlan_process_multicast(struct work_struct *work) else pkt_type = PACKET_MULTICAST; - dlocal = false; rcu_read_lock(); list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) { - if (hlocal && (ipvlan->dev == skb->dev)) { - dlocal = true; + if (tx_pkt && (ipvlan->dev == skb->dev)) continue; - } if (!test_bit(mac_hash, ipvlan->mac_filters)) continue; - + if (!(ipvlan->dev->flags & IFF_UP)) + continue; ret = NET_RX_DROP; len = skb->len + ETH_HLEN; nskb = skb_clone(skb, GFP_ATOMIC); - if (!nskb) - goto acct; - - nskb->pkt_type = pkt_type; - nskb->dev = ipvlan->dev; - if (hlocal) - ret = dev_forward_skb(ipvlan->dev, nskb); - else - ret = netif_rx(nskb); -acct: + local_bh_disable(); + if (nskb) { + consumed = true; + nskb->pkt_type = pkt_type; + nskb->dev = ipvlan->dev; + if (tx_pkt) + ret = dev_forward_skb(ipvlan->dev, nskb); + else + ret = netif_rx(nskb); + } ipvlan_count_rx(ipvlan, len, ret == NET_RX_SUCCESS, true); + local_bh_enable(); } rcu_read_unlock(); - if (dlocal) { + if (tx_pkt) { /* If the packet originated here, send it out. */ skb->dev = port->dev; skb->pkt_type = pkt_type; dev_queue_xmit(skb); } else { - kfree_skb(skb); + if (consumed) + consume_skb(skb); + else + kfree_skb(skb); } + if (dev) + dev_put(dev); } } @@ -470,15 +477,24 @@ out: } static void ipvlan_multicast_enqueue(struct ipvl_port *port, - struct sk_buff *skb) + struct sk_buff *skb, bool tx_pkt) { if (skb->protocol == htons(ETH_P_PAUSE)) { kfree_skb(skb); return; } + /* Record that the deferred packet is from TX or RX path. By + * looking at mac-addresses on packet will lead to erronus decisions. + * (This would be true for a loopback-mode on master device or a + * hair-pin mode of the switch.) + */ + IPVL_SKB_CB(skb)->tx_pkt = tx_pkt; + spin_lock(&port->backlog.lock); if (skb_queue_len(&port->backlog) < IPVLAN_QBACKLOG_LIMIT) { + if (skb->dev) + dev_hold(skb->dev); __skb_queue_tail(&port->backlog, skb); spin_unlock(&port->backlog.lock); schedule_work(&port->wq); @@ -537,7 +553,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) } else if (is_multicast_ether_addr(eth->h_dest)) { ipvlan_skb_crossing_ns(skb, NULL); - ipvlan_multicast_enqueue(ipvlan->port, skb); + ipvlan_multicast_enqueue(ipvlan->port, skb, true); return NET_XMIT_SUCCESS; } @@ -634,7 +650,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, */ if (nskb) { ipvlan_skb_crossing_ns(nskb, NULL); - ipvlan_multicast_enqueue(port, nskb); + ipvlan_multicast_enqueue(port, nskb, false); } } } else { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 693ec5b66222..8b0f99300cbc 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -135,6 +135,7 @@ err: static void ipvlan_port_destroy(struct net_device *dev) { struct ipvl_port *port = ipvlan_port_get_rtnl(dev); + struct sk_buff *skb; dev->priv_flags &= ~IFF_IPVLAN_MASTER; if (port->mode == IPVLAN_MODE_L3S) { @@ -144,7 +145,11 @@ static void ipvlan_port_destroy(struct net_device *dev) } netdev_rx_handler_unregister(dev); cancel_work_sync(&port->wq); - __skb_queue_purge(&port->backlog); + while ((skb = __skb_dequeue(&port->backlog)) != NULL) { + if (skb->dev) + dev_put(skb->dev); + kfree_skb(skb); + } kfree(port); } @@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping, __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); } +static int __dax_invalidate_mapping_entry(struct address_space *mapping, + pgoff_t index, bool trunc) +{ + int ret = 0; + void *entry; + struct radix_tree_root *page_tree = &mapping->page_tree; + + spin_lock_irq(&mapping->tree_lock); + entry = get_unlocked_mapping_entry(mapping, index, NULL); + if (!entry || !radix_tree_exceptional_entry(entry)) + goto out; + if (!trunc && + (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || + radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))) + goto out; + radix_tree_delete(page_tree, index); + mapping->nrexceptional--; + ret = 1; +out: + put_unlocked_mapping_entry(mapping, index, entry); + spin_unlock_irq(&mapping->tree_lock); + return ret; +} /* * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree * entry to get unlocked before deleting it. */ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) { - void *entry; + int ret = __dax_invalidate_mapping_entry(mapping, index, true); - spin_lock_irq(&mapping->tree_lock); - entry = get_unlocked_mapping_entry(mapping, index, NULL); /* * This gets called from truncate / punch_hole path. As such, the caller * must hold locks protecting against concurrent modifications of the @@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) * caller has seen exceptional entry for this index, we better find it * at that index as well... */ - if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) { - spin_unlock_irq(&mapping->tree_lock); - return 0; - } - radix_tree_delete(&mapping->page_tree, index); + WARN_ON_ONCE(!ret); + return ret; +} + +/* + * Invalidate exceptional DAX entry if easily possible. This handles DAX + * entries for invalidate_inode_pages() so we evict the entry only if we can + * do so without blocking. + */ +int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index) +{ + int ret = 0; + void *entry, **slot; + struct radix_tree_root *page_tree = &mapping->page_tree; + + spin_lock_irq(&mapping->tree_lock); + entry = __radix_tree_lookup(page_tree, index, NULL, &slot); + if (!entry || !radix_tree_exceptional_entry(entry) || + slot_locked(mapping, slot)) + goto out; + if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) || + radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)) + goto out; + radix_tree_delete(page_tree, index); mapping->nrexceptional--; + ret = 1; +out: spin_unlock_irq(&mapping->tree_lock); - dax_wake_mapping_entry_waiter(mapping, index, entry, true); + if (ret) + dax_wake_mapping_entry_waiter(mapping, index, entry, true); + return ret; +} - return 1; +/* + * Invalidate exceptional DAX entry if it is clean. + */ +int dax_invalidate_mapping_entry_sync(struct address_space *mapping, + pgoff_t index) +{ + return __dax_invalidate_mapping_entry(mapping, index, false); } /* @@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index) * otherwise it will simply fall out of the page cache under memory * pressure without ever having been dirtied. */ -static int dax_load_hole(struct address_space *mapping, void *entry, +static int dax_load_hole(struct address_space *mapping, void **entry, struct vm_fault *vmf) { struct page *page; + int ret; /* Hole page already exists? Return it... */ - if (!radix_tree_exceptional_entry(entry)) { - vmf->page = entry; - return VM_FAULT_LOCKED; + if (!radix_tree_exceptional_entry(*entry)) { + page = *entry; + goto out; } /* This will replace locked radix tree entry with a hole page */ @@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry, vmf->gfp_mask | __GFP_ZERO); if (!page) return VM_FAULT_OOM; + out: vmf->page = page; - return VM_FAULT_LOCKED; + ret = finish_fault(vmf); + vmf->page = NULL; + *entry = page; + if (!ret) { + /* Grab reference for PTE that is now referencing the page */ + get_page(page); + return VM_FAULT_NOPAGE; + } + return ret; } static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size, @@ -934,6 +995,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data, if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED)) return -EIO; + /* + * Write can allocate block for an area which has a hole page mapped + * into page tables. We have to tear down these mappings so that data + * written by write(2) is visible in mmap. + */ + if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) { + invalidate_inode_pages2_range(inode->i_mapping, + pos >> PAGE_SHIFT, + (end - 1) >> PAGE_SHIFT); + } + while (pos < end) { unsigned offset = pos & (PAGE_SIZE - 1); struct blk_dax_ctl dax = { 0 }; @@ -992,23 +1064,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, if (iov_iter_rw(iter) == WRITE) flags |= IOMAP_WRITE; - /* - * Yes, even DAX files can have page cache attached to them: A zeroed - * page is inserted into the pagecache when we have to serve a write - * fault on a hole. It should never be dirtied and can simply be - * dropped from the pagecache once we get real data for the page. - * - * XXX: This is racy against mmap, and there's nothing we can do about - * it. We'll eventually need to shift this down even further so that - * we can check if we allocated blocks over a hole first. - */ - if (mapping->nrpages) { - ret = invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, - (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT); - WARN_ON_ONCE(ret); - } - while (iov_iter_count(iter)) { ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops, iter, dax_iomap_actor); @@ -1023,6 +1078,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, } EXPORT_SYMBOL_GPL(dax_iomap_rw); +static int dax_fault_return(int error) +{ + if (error == 0) + return VM_FAULT_NOPAGE; + if (error == -ENOMEM) + return VM_FAULT_OOM; + return VM_FAULT_SIGBUS; +} + /** * dax_iomap_fault - handle a page fault on a DAX file * @vma: The virtual memory area where the fault occurred @@ -1055,12 +1119,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (pos >= i_size_read(inode)) return VM_FAULT_SIGBUS; - entry = grab_mapping_entry(mapping, vmf->pgoff, 0); - if (IS_ERR(entry)) { - error = PTR_ERR(entry); - goto out; - } - if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page) flags |= IOMAP_WRITE; @@ -1071,9 +1129,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, */ error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap); if (error) - goto unlock_entry; + return dax_fault_return(error); if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) { - error = -EIO; /* fs corruption? */ + vmf_ret = dax_fault_return(-EIO); /* fs corruption? */ + goto finish_iomap; + } + + entry = grab_mapping_entry(mapping, vmf->pgoff, 0); + if (IS_ERR(entry)) { + vmf_ret = dax_fault_return(PTR_ERR(entry)); goto finish_iomap; } @@ -1096,13 +1160,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, } if (error) - goto finish_iomap; + goto error_unlock_entry; __SetPageUptodate(vmf->cow_page); vmf_ret = finish_fault(vmf); if (!vmf_ret) vmf_ret = VM_FAULT_DONE_COW; - goto finish_iomap; + goto unlock_entry; } switch (iomap.type) { @@ -1114,12 +1178,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, } error = dax_insert_mapping(mapping, iomap.bdev, sector, PAGE_SIZE, &entry, vma, vmf); + /* -EBUSY is fine, somebody else faulted on the same PTE */ + if (error == -EBUSY) + error = 0; break; case IOMAP_UNWRITTEN: case IOMAP_HOLE: if (!(vmf->flags & FAULT_FLAG_WRITE)) { - vmf_ret = dax_load_hole(mapping, entry, vmf); - break; + vmf_ret = dax_load_hole(mapping, &entry, vmf); + goto unlock_entry; } /*FALLTHRU*/ default: @@ -1128,31 +1195,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, break; } + error_unlock_entry: + vmf_ret = dax_fault_return(error) | major; + unlock_entry: + put_locked_mapping_entry(mapping, vmf->pgoff, entry); finish_iomap: if (ops->iomap_end) { - if (error || (vmf_ret & VM_FAULT_ERROR)) { - /* keep previous error */ - ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags, - &iomap); - } else { - error = ops->iomap_end(inode, pos, PAGE_SIZE, - PAGE_SIZE, flags, &iomap); - } - } - unlock_entry: - if (vmf_ret != VM_FAULT_LOCKED || error) - put_locked_mapping_entry(mapping, vmf->pgoff, entry); - out: - if (error == -ENOMEM) - return VM_FAULT_OOM | major; - /* -EBUSY is fine, somebody else faulted on the same PTE */ - if (error < 0 && error != -EBUSY) - return VM_FAULT_SIGBUS | major; - if (vmf_ret) { - WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */ - return vmf_ret; + int copied = PAGE_SIZE; + + if (vmf_ret & VM_FAULT_ERROR) + copied = 0; + /* + * The fault is done by now and there's no way back (other + * thread may be already happily using PTE we have installed). + * Just ignore error from ->iomap_end since we cannot do much + * with it. + */ + ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap); } - return VM_FAULT_NOPAGE | major; + return vmf_ret; } EXPORT_SYMBOL_GPL(dax_iomap_fault); @@ -1277,16 +1338,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, goto fallback; /* - * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX - * PMD or a HZP entry. If it can't (because a 4k page is already in - * the tree, for instance), it will return -EEXIST and we just fall - * back to 4k entries. - */ - entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); - if (IS_ERR(entry)) - goto fallback; - - /* * Note that we don't use iomap_apply here. We aren't doing I/O, only * setting up a mapping, so really we're using iomap_begin() as a way * to look up our filesystem block. @@ -1294,10 +1345,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, pos = (loff_t)pgoff << PAGE_SHIFT; error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap); if (error) - goto unlock_entry; + goto fallback; + if (iomap.offset + iomap.length < pos + PMD_SIZE) goto finish_iomap; + /* + * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX + * PMD or a HZP entry. If it can't (because a 4k page is already in + * the tree, for instance), it will return -EEXIST and we just fall + * back to 4k entries. + */ + entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD); + if (IS_ERR(entry)) + goto finish_iomap; + vmf.pgoff = pgoff; vmf.flags = flags; vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO; @@ -1310,7 +1372,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, case IOMAP_UNWRITTEN: case IOMAP_HOLE: if (WARN_ON_ONCE(write)) - goto finish_iomap; + goto unlock_entry; result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap, &entry); break; @@ -1319,20 +1381,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address, break; } + unlock_entry: + put_locked_mapping_entry(mapping, pgoff, entry); finish_iomap: if (ops->iomap_end) { - if (result == VM_FAULT_FALLBACK) { - ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags, - &iomap); - } else { - error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE, - iomap_flags, &iomap); - if (error) - result = VM_FAULT_FALLBACK; - } + int copied = PMD_SIZE; + + if (result == VM_FAULT_FALLBACK) + copied = 0; + /* + * The fault is done by now and there's no way back (other + * thread may be already happily using PMD we have installed). + * Just ignore error from ->iomap_end since we cannot do much + * with it. + */ + ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags, + &iomap); } - unlock_entry: - put_locked_mapping_entry(mapping, pgoff, entry); fallback: if (result == VM_FAULT_FALLBACK) { split_huge_pmd(vma, pmd, address); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 0093ea2512a8..f073bfca694b 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode, mutex_unlock(&ei->truncate_mutex); goto cleanup; } - } else { - *new = true; } + *new = true; ext2_splice_branch(inode, iblock, partial, indirect_blks, count); mutex_unlock(&ei->truncate_mutex); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index b5f184493c57..d663d3d7c81c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -258,7 +258,6 @@ out: static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { int result; - handle_t *handle = NULL; struct inode *inode = file_inode(vma->vm_file); struct super_block *sb = inode->i_sb; bool write = vmf->flags & FAULT_FLAG_WRITE; @@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); - down_read(&EXT4_I(inode)->i_mmap_sem); - handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, - EXT4_DATA_TRANS_BLOCKS(sb)); - } else - down_read(&EXT4_I(inode)->i_mmap_sem); - - if (IS_ERR(handle)) - result = VM_FAULT_SIGBUS; - else - result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops); - - if (write) { - if (!IS_ERR(handle)) - ext4_journal_stop(handle); - up_read(&EXT4_I(inode)->i_mmap_sem); + } + down_read(&EXT4_I(inode)->i_mmap_sem); + result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops); + up_read(&EXT4_I(inode)->i_mmap_sem); + if (write) sb_end_pagefault(sb); - } else - up_read(&EXT4_I(inode)->i_mmap_sem); return result; } @@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd, unsigned int flags) { int result; - handle_t *handle = NULL; struct inode *inode = file_inode(vma->vm_file); struct super_block *sb = inode->i_sb; bool write = flags & FAULT_FLAG_WRITE; @@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, if (write) { sb_start_pagefault(sb); file_update_time(vma->vm_file); - down_read(&EXT4_I(inode)->i_mmap_sem); - handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE, - ext4_chunk_trans_blocks(inode, - PMD_SIZE / PAGE_SIZE)); - } else - down_read(&EXT4_I(inode)->i_mmap_sem); - - if (IS_ERR(handle)) - result = VM_FAULT_SIGBUS; - else { - result = dax_iomap_pmd_fault(vma, addr, pmd, flags, - &ext4_iomap_ops); } - - if (write) { - if (!IS_ERR(handle)) - ext4_journal_stop(handle); - up_read(&EXT4_I(inode)->i_mmap_sem); + down_read(&EXT4_I(inode)->i_mmap_sem); + result = dax_iomap_pmd_fault(vma, addr, pmd, flags, + &ext4_iomap_ops); + up_read(&EXT4_I(inode)->i_mmap_sem); + if (write) sb_end_pagefault(sb); - } else - up_read(&EXT4_I(inode)->i_mmap_sem); return result; } diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h index 6240e5b0e900..7e8a7be6dcda 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h @@ -81,6 +81,17 @@ #define GCC_WCSS5G_CLK 62 #define GCC_WCSS5G_REF_CLK 63 #define GCC_WCSS5G_RTC_CLK 64 +#define GCC_APSS_DDRPLL_VCO 65 +#define GCC_SDCC_PLLDIV_CLK 66 +#define GCC_FEPLL_VCO 67 +#define GCC_FEPLL125_CLK 68 +#define GCC_FEPLL125DLY_CLK 69 +#define GCC_FEPLL200_CLK 70 +#define GCC_FEPLL500_CLK 71 +#define GCC_FEPLL_WCSS2G_CLK 72 +#define GCC_FEPLL_WCSS5G_CLK 73 +#define GCC_APSS_CPU_PLLDIV_CLK 74 +#define GCC_PCNOC_AHB_CLK_SRC 75 #define WIFI0_CPU_INIT_RESET 0 #define WIFI0_RADIO_SRIF_RESET 1 diff --git a/include/dt-bindings/clock/qcom,gcc-msm8996.h b/include/dt-bindings/clock/qcom,gcc-msm8996.h index 1828723eb621..1f5c42254798 100644 --- a/include/dt-bindings/clock/qcom,gcc-msm8996.h +++ b/include/dt-bindings/clock/qcom,gcc-msm8996.h @@ -339,6 +339,7 @@ #define GCC_PCIE_PHY_COM_NOCSR_BCR 102 #define GCC_USB3_PHY_BCR 103 #define GCC_USB3PHY_PHY_BCR 104 +#define GCC_MSS_RESTART 105 /* Indexes for GDSCs */ diff --git a/include/dt-bindings/clock/qcom,rpmcc.h b/include/dt-bindings/clock/qcom,rpmcc.h index 5924cdb71336..96b63c00249e 100644 --- a/include/dt-bindings/clock/qcom,rpmcc.h +++ b/include/dt-bindings/clock/qcom,rpmcc.h @@ -14,7 +14,7 @@ #ifndef _DT_BINDINGS_CLK_MSM_RPMCC_H #define _DT_BINDINGS_CLK_MSM_RPMCC_H -/* apq8064 */ +/* RPM clocks */ #define RPM_PXO_CLK 0 #define RPM_PXO_A_CLK 1 #define RPM_CXO_CLK 2 @@ -38,7 +38,7 @@ #define RPM_SFPB_CLK 20 #define RPM_SFPB_A_CLK 21 -/* msm8916 */ +/* SMD RPM clocks */ #define RPM_SMD_XO_CLK_SRC 0 #define RPM_SMD_XO_A_CLK_SRC 1 #define RPM_SMD_PCNOC_CLK 2 @@ -65,5 +65,41 @@ #define RPM_SMD_RF_CLK1_A_PIN 23 #define RPM_SMD_RF_CLK2_PIN 24 #define RPM_SMD_RF_CLK2_A_PIN 25 +#define RPM_SMD_PNOC_CLK 26 +#define RPM_SMD_PNOC_A_CLK 27 +#define RPM_SMD_CNOC_CLK 28 +#define RPM_SMD_CNOC_A_CLK 29 +#define RPM_SMD_MMSSNOC_AHB_CLK 30 +#define RPM_SMD_MMSSNOC_AHB_A_CLK 31 +#define RPM_SMD_GFX3D_CLK_SRC 32 +#define RPM_SMD_GFX3D_A_CLK_SRC 33 +#define RPM_SMD_OCMEMGX_CLK 34 +#define RPM_SMD_OCMEMGX_A_CLK 35 +#define RPM_SMD_CXO_D0 36 +#define RPM_SMD_CXO_D0_A 37 +#define RPM_SMD_CXO_D1 38 +#define RPM_SMD_CXO_D1_A 39 +#define RPM_SMD_CXO_A0 40 +#define RPM_SMD_CXO_A0_A 41 +#define RPM_SMD_CXO_A1 42 +#define RPM_SMD_CXO_A1_A 43 +#define RPM_SMD_CXO_A2 44 +#define RPM_SMD_CXO_A2_A 45 +#define RPM_SMD_DIV_CLK1 46 +#define RPM_SMD_DIV_A_CLK1 47 +#define RPM_SMD_DIV_CLK2 48 +#define RPM_SMD_DIV_A_CLK2 49 +#define RPM_SMD_DIFF_CLK 50 +#define RPM_SMD_DIFF_A_CLK 51 +#define RPM_SMD_CXO_D0_PIN 52 +#define RPM_SMD_CXO_D0_A_PIN 53 +#define RPM_SMD_CXO_D1_PIN 54 +#define RPM_SMD_CXO_D1_A_PIN 55 +#define RPM_SMD_CXO_A0_PIN 56 +#define RPM_SMD_CXO_A0_A_PIN 57 +#define RPM_SMD_CXO_A1_PIN 58 +#define RPM_SMD_CXO_A1_A_PIN 59 +#define RPM_SMD_CXO_A2_PIN 60 +#define RPM_SMD_CXO_A2_A_PIN 61 #endif diff --git a/include/dt-bindings/clock/stm32fx-clock.h b/include/dt-bindings/clock/stm32fx-clock.h new file mode 100644 index 000000000000..08bcab61b714 --- /dev/null +++ b/include/dt-bindings/clock/stm32fx-clock.h @@ -0,0 +1,39 @@ +/* + * stm32fx-clock.h + * + * Copyright (C) 2016 STMicroelectronics + * Author: Gabriel Fernandez for STMicroelectronics. + * License terms: GNU General Public License (GPL), version 2 + */ + +/* + * List of clocks wich are not derived from system clock (SYSCLOCK) + * + * The index of these clocks is the secondary index of DT bindings + * (see Documentatoin/devicetree/bindings/clock/st,stm32-rcc.txt) + * + * e.g: + <assigned-clocks = <&rcc 1 CLK_LSE>; +*/ + +#ifndef _DT_BINDINGS_CLK_STMFX_H +#define _DT_BINDINGS_CLK_STMFX_H + +#define SYSTICK 0 +#define FCLK 1 +#define CLK_LSI 2 +#define CLK_LSE 3 +#define CLK_HSE_RTC 4 +#define CLK_RTC 5 +#define PLL_VCO_I2S 6 +#define PLL_VCO_SAI 7 +#define CLK_LCD 8 +#define CLK_I2S 9 +#define CLK_SAI1 10 +#define CLK_SAI2 11 +#define CLK_I2SQ_PDIV 12 +#define CLK_SAIQ_PDIV 13 + +#define END_PRIMARY_CLK 14 + +#endif diff --git a/include/linux/dax.h b/include/linux/dax.h index f97bcfe79472..24ad71173995 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf, struct iomap_ops *ops); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); +int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index); +int dax_invalidate_mapping_entry_sync(struct address_space *mapping, + pgoff_t index); void dax_wake_mapping_entry_waiter(struct address_space *mapping, pgoff_t index, void *entry, bool wake_all); diff --git a/include/linux/filter.h b/include/linux/filter.h index 702314253797..a0934e6c9bab 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -610,7 +610,6 @@ bool bpf_helper_changes_pkt_data(void *func); struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); void bpf_warn_invalid_xdp_action(u32 act); -void bpf_warn_invalid_xdp_buffer(void); #ifdef CONFIG_BPF_JIT extern int bpf_jit_enable; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index c56b39890a41..6b5818d6de32 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -73,13 +73,13 @@ */ enum pageflags { PG_locked, /* Page is locked. Don't touch. */ - PG_waiters, /* Page has waiters, check its waitqueue */ PG_error, PG_referenced, PG_uptodate, PG_dirty, PG_lru, PG_active, + PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ PG_slab, PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ PG_arch_1, diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index f0cf5a1b777e..0378e88f6fd3 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -110,6 +110,7 @@ struct netns_ipv4 { int sysctl_tcp_orphan_retries; int sysctl_tcp_fin_timeout; unsigned int sysctl_tcp_notsent_lowat; + int sysctl_tcp_tw_reuse; int sysctl_igmp_max_memberships; int sysctl_igmp_max_msf; diff --git a/include/net/tcp.h b/include/net/tcp.h index 207147b4c6b2..6061963cca98 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -252,7 +252,6 @@ extern int sysctl_tcp_wmem[3]; extern int sysctl_tcp_rmem[3]; extern int sysctl_tcp_app_win; extern int sysctl_tcp_adv_win_scale; -extern int sysctl_tcp_tw_reuse; extern int sysctl_tcp_frto; extern int sysctl_tcp_low_latency; extern int sysctl_tcp_nometrics_save; diff --git a/kernel/cpu.c b/kernel/cpu.c index 042fd7e8e030..f75c4d031eeb 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1471,6 +1471,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, bool multi_instance) { int cpu, ret = 0; + bool dynstate; if (cpuhp_cb_check(state) || !name) return -EINVAL; @@ -1480,6 +1481,12 @@ int __cpuhp_setup_state(enum cpuhp_state state, ret = cpuhp_store_callbacks(state, name, startup, teardown, multi_instance); + dynstate = state == CPUHP_AP_ONLINE_DYN; + if (ret > 0 && dynstate) { + state = ret; + ret = 0; + } + if (ret || !invoke || !startup) goto out; @@ -1508,7 +1515,7 @@ out: * If the requested state is CPUHP_AP_ONLINE_DYN, return the * dynamically allocated state in case of success. */ - if (!ret && state == CPUHP_AP_ONLINE_DYN) + if (!ret && dynstate) return state; return ret; } diff --git a/mm/filemap.c b/mm/filemap.c index 82f26cde830c..d0e4d1002059 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter) } EXPORT_SYMBOL_GPL(add_page_wait_queue); +#ifndef clear_bit_unlock_is_negative_byte + +/* + * PG_waiters is the high bit in the same byte as PG_lock. + * + * On x86 (and on many other architectures), we can clear PG_lock and + * test the sign bit at the same time. But if the architecture does + * not support that special operation, we just do this all by hand + * instead. + * + * The read of PG_waiters has to be after (or concurrently with) PG_locked + * being cleared, but a memory barrier should be unneccssary since it is + * in the same byte as PG_locked. + */ +static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem) +{ + clear_bit_unlock(nr, mem); + /* smp_mb__after_atomic(); */ + return test_bit(PG_waiters, mem); +} + +#endif + /** * unlock_page - unlock a locked page * @page: the page @@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue); * mechanism between PageLocked pages and PageWriteback pages is shared. * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. * - * The mb is necessary to enforce ordering between the clear_bit and the read - * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()). + * Note that this depends on PG_waiters being the sign bit in the byte + * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to + * clear the PG_locked bit and test PG_waiters at the same time fairly + * portably (architectures that do LL/SC can test any bit, while x86 can + * test the sign bit). */ void unlock_page(struct page *page) { + BUILD_BUG_ON(PG_waiters != 7); page = compound_head(page); VM_BUG_ON_PAGE(!PageLocked(page), page); - clear_bit_unlock(PG_locked, &page->flags); - smp_mb__after_atomic(); - wake_up_page(page, PG_locked); + if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) + wake_up_page_bit(page, PG_locked); } EXPORT_SYMBOL(unlock_page); diff --git a/mm/truncate.c b/mm/truncate.c index fd97f1dbce29..dd7b24e083c5 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -24,20 +24,12 @@ #include <linux/rmap.h> #include "internal.h" -static void clear_exceptional_entry(struct address_space *mapping, - pgoff_t index, void *entry) +static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, + void *entry) { struct radix_tree_node *node; void **slot; - /* Handled by shmem itself */ - if (shmem_mapping(mapping)) - return; - - if (dax_mapping(mapping)) { - dax_delete_mapping_entry(mapping, index); - return; - } spin_lock_irq(&mapping->tree_lock); /* * Regular page slots are stabilized by the page lock even @@ -55,6 +47,56 @@ unlock: spin_unlock_irq(&mapping->tree_lock); } +/* + * Unconditionally remove exceptional entry. Usually called from truncate path. + */ +static void truncate_exceptional_entry(struct address_space *mapping, + pgoff_t index, void *entry) +{ + /* Handled by shmem itself */ + if (shmem_mapping(mapping)) + return; + + if (dax_mapping(mapping)) { + dax_delete_mapping_entry(mapping, index); + return; + } + clear_shadow_entry(mapping, index, entry); +} + +/* + * Invalidate exceptional entry if easily possible. This handles exceptional + * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and + * clean entries. + */ +static int invalidate_exceptional_entry(struct address_space *mapping, + pgoff_t index, void *entry) +{ + /* Handled by shmem itself */ + if (shmem_mapping(mapping)) + return 1; + if (dax_mapping(mapping)) + return dax_invalidate_mapping_entry(mapping, index); + clear_shadow_entry(mapping, index, entry); + return 1; +} + +/* + * Invalidate exceptional entry if clean. This handles exceptional entries for + * invalidate_inode_pages2() so for DAX it evicts only clean entries. + */ +static int invalidate_exceptional_entry2(struct address_space *mapping, + pgoff_t index, void *entry) +{ + /* Handled by shmem itself */ + if (shmem_mapping(mapping)) + return 1; + if (dax_mapping(mapping)) + return dax_invalidate_mapping_entry_sync(mapping, index); + clear_shadow_entry(mapping, index, entry); + return 1; +} + /** * do_invalidatepage - invalidate part or all of a page * @page: the page which is affected @@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping, break; if (radix_tree_exceptional_entry(page)) { - clear_exceptional_entry(mapping, index, page); + truncate_exceptional_entry(mapping, index, + page); continue; } @@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping, } if (radix_tree_exceptional_entry(page)) { - clear_exceptional_entry(mapping, index, page); + truncate_exceptional_entry(mapping, index, + page); continue; } @@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, break; if (radix_tree_exceptional_entry(page)) { - clear_exceptional_entry(mapping, index, page); + invalidate_exceptional_entry(mapping, index, + page); continue; } @@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping, break; if (radix_tree_exceptional_entry(page)) { - clear_exceptional_entry(mapping, index, page); + if (!invalidate_exceptional_entry2(mapping, + index, page)) + ret = -EBUSY; continue; } diff --git a/net/core/filter.c b/net/core/filter.c index e6c412b94dec..1969b3f118c1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -2972,12 +2972,6 @@ void bpf_warn_invalid_xdp_action(u32 act) } EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); -void bpf_warn_invalid_xdp_buffer(void) -{ - WARN_ONCE(1, "Illegal XDP buffer encountered, expect throughput degradation\n"); -} -EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_buffer); - static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg, int src_reg, int ctx_off, struct bpf_insn *insn_buf, diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 80bc36b25de2..22cbd61079b5 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -433,13 +433,6 @@ static struct ctl_table ipv4_table[] = { .extra2 = &tcp_adv_win_scale_max, }, { - .procname = "tcp_tw_reuse", - .data = &sysctl_tcp_tw_reuse, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, - { .procname = "tcp_frto", .data = &sysctl_tcp_frto, .maxlen = sizeof(int), @@ -960,6 +953,13 @@ static struct ctl_table ipv4_net_table[] = { .mode = 0644, .proc_handler = proc_dointvec, }, + { + .procname = "tcp_tw_reuse", + .data = &init_net.ipv4.sysctl_tcp_tw_reuse, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, #ifdef CONFIG_IP_ROUTE_MULTIPATH { .procname = "fib_multipath_use_neigh", diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 30d81f533ada..fe9da4fb96bf 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -84,7 +84,6 @@ #include <crypto/hash.h> #include <linux/scatterlist.h> -int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; #ifdef CONFIG_TCP_MD5SIG @@ -120,7 +119,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && - (!twp || (sysctl_tcp_tw_reuse && + (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) @@ -2456,6 +2455,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_orphan_retries = 0; net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; + net->ipv4.sysctl_tcp_tw_reuse = 0; return 0; fail: diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 2d4c4d3911c0..9c62b6325f7a 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -606,7 +606,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) rcu_assign_pointer(flow->sf_acts, acts); packet->priority = flow->key.phy.priority; packet->mark = flow->key.phy.skb_mark; - packet->protocol = flow->key.eth.type; rcu_read_lock(); dp = get_dp_rcu(net, ovs_header->dp_ifindex); diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 08aa926cd5cf..2c0a00f7f1b7 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -312,7 +312,8 @@ static bool icmp6hdr_ok(struct sk_buff *skb) * Returns 0 if it encounters a non-vlan or incomplete packet. * Returns 1 after successfully parsing vlan tag. */ -static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) +static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh, + bool untag_vlan) { struct vlan_head *vh = (struct vlan_head *)skb->data; @@ -330,7 +331,20 @@ static int parse_vlan_tag(struct sk_buff *skb, struct vlan_head *key_vh) key_vh->tci = vh->tci | htons(VLAN_TAG_PRESENT); key_vh->tpid = vh->tpid; - __skb_pull(skb, sizeof(struct vlan_head)); + if (unlikely(untag_vlan)) { + int offset = skb->data - skb_mac_header(skb); + u16 tci; + int err; + + __skb_push(skb, offset); + err = __skb_vlan_pop(skb, &tci); + __skb_pull(skb, offset); + if (err) + return err; + __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci); + } else { + __skb_pull(skb, sizeof(struct vlan_head)); + } return 1; } @@ -351,13 +365,13 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) key->eth.vlan.tpid = skb->vlan_proto; } else { /* Parse outer vlan tag in the non-accelerated case. */ - res = parse_vlan_tag(skb, &key->eth.vlan); + res = parse_vlan_tag(skb, &key->eth.vlan, true); if (res <= 0) return res; } /* Parse inner vlan tag. */ - res = parse_vlan_tag(skb, &key->eth.cvlan); + res = parse_vlan_tag(skb, &key->eth.cvlan, false); if (res <= 0) return res; @@ -800,29 +814,15 @@ int ovs_flow_key_extract_userspace(struct net *net, const struct nlattr *attr, if (err) return err; - if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { - /* key_extract assumes that skb->protocol is set-up for - * layer 3 packets which is the case for other callers, - * in particular packets recieved from the network stack. - * Here the correct value can be set from the metadata - * extracted above. - */ - skb->protocol = key->eth.type; - } else { - struct ethhdr *eth; - - skb_reset_mac_header(skb); - eth = eth_hdr(skb); - - /* Normally, setting the skb 'protocol' field would be - * handled by a call to eth_type_trans(), but it assumes - * there's a sending device, which we may not have. - */ - if (eth_proto_is_802_3(eth->h_proto)) - skb->protocol = eth->h_proto; - else - skb->protocol = htons(ETH_P_802_2); - } + /* key_extract assumes that skb->protocol is set-up for + * layer 3 packets which is the case for other callers, + * in particular packets received from the network stack. + * Here the correct value can be set from the metadata + * extracted above. + * For L2 packet key eth type would be zero. skb protocol + * would be set to correct value later during key-extact. + */ + skb->protocol = key->eth.type; return key_extract(skb, key); } diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 3fbba79a4ef0..1ecdf809b5fa 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -148,13 +148,15 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) unsigned long cl; unsigned long fh; int err; - int tp_created = 0; + int tp_created; if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) return -EPERM; replay: + tp_created = 0; + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); if (err < 0) return err; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 333c5dae0072..800caaa699a1 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -441,15 +441,19 @@ static void __tipc_shutdown(struct socket *sock, int error) while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (TIPC_SKB_CB(skb)->bytes_read) { kfree_skb(skb); - } else { - if (!tipc_sk_type_connectionless(sk) && - sk->sk_state != TIPC_DISCONNECTING) { - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - tipc_node_remove_conn(net, dnode, tsk->portid); - } - tipc_sk_respond(sk, skb, error); + continue; + } + if (!tipc_sk_type_connectionless(sk) && + sk->sk_state != TIPC_DISCONNECTING) { + tipc_set_sk_state(sk, TIPC_DISCONNECTING); + tipc_node_remove_conn(net, dnode, tsk->portid); } + tipc_sk_respond(sk, skb, error); } + + if (tipc_sk_type_connectionless(sk)) + return; + if (sk->sk_state != TIPC_DISCONNECTING) { skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode, @@ -457,10 +461,8 @@ static void __tipc_shutdown(struct socket *sock, int error) tsk->portid, error); if (skb) tipc_node_xmit_skb(net, skb, dnode, tsk->portid); - if (!tipc_sk_type_connectionless(sk)) { - tipc_node_remove_conn(net, dnode, tsk->portid); - tipc_set_sk_state(sk, TIPC_DISCONNECTING); - } + tipc_node_remove_conn(net, dnode, tsk->portid); + tipc_set_sk_state(sk, TIPC_DISCONNECTING); } } |