diff options
687 files changed, 30517 insertions, 7277 deletions
@@ -2740,6 +2740,10 @@ S: C/ Mieses 20, 9-B S: Valladolid 47009 S: Spain +N: Jens Osterkamp +D: Maintainer of Spidernet network driver for Cell + N: Gadi Oxman D: Original author and maintainer of IDE/ATAPI floppy/tape drivers diff --git a/Documentation/ABI/testing/sysfs-driver-toshiba_haps b/Documentation/ABI/testing/sysfs-driver-toshiba_haps new file mode 100644 index 000000000000..a662370b4dbf --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-toshiba_haps @@ -0,0 +1,20 @@ +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS620A:00/protection_level +Date: August 16, 2014 +KernelVersion: 3.17 +Contact: Azael Avalos <[email protected]> +Description: This file controls the built-in accelerometer protection level, + valid values are: + * 0 -> Disabled + * 1 -> Low + * 2 -> Medium + * 3 -> High + The default potection value is set to 2 (Medium). +Users: KToshiba + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS620A:00/reset_protection +Date: August 16, 2014 +KernelVersion: 3.17 +Contact: Azael Avalos <[email protected]> +Description: This file turns off the built-in accelerometer for a few + seconds and then restore normal operation. Accepting 1 as the + only parameter. diff --git a/Documentation/clk.txt b/Documentation/clk.txt index 0e4f90aa1c13..f463bdc37f88 100644 --- a/Documentation/clk.txt +++ b/Documentation/clk.txt @@ -230,30 +230,7 @@ clk_register(...) See the basic clock types in drivers/clk/clk-*.c for examples. - Part 5 - static initialization of clock data - -For platforms with many clocks (often numbering into the hundreds) it -may be desirable to statically initialize some clock data. This -presents a problem since the definition of struct clk should be hidden -from everyone except for the clock core in drivers/clk/clk.c. - -To get around this problem struct clk's definition is exposed in -include/linux/clk-private.h along with some macros for more easily -initializing instances of the basic clock types. These clocks must -still be initialized with the common clock framework via a call to -__clk_init. - -clk-private.h must NEVER be included by code which implements struct -clk_ops callbacks, nor must it be included by any logic which pokes -around inside of struct clk at run-time. To do so is a layering -violation. - -To better enforce this policy, always follow this simple rule: any -statically initialized clock data MUST be defined in a separate file -from the logic that implements its ops. Basically separate the logic -from the data and all is well. - - Part 6 - Disabling clock gating of unused clocks + Part 5 - Disabling clock gating of unused clocks Sometimes during development it can be useful to be able to bypass the default disabling of unused clocks. For example, if drivers aren't enabling @@ -264,7 +241,7 @@ are sorted out. To bypass this disabling, include "clk_ignore_unused" in the bootargs to the kernel. - Part 7 - Locking + Part 6 - Locking The common clock framework uses two global locks, the prepare lock and the enable lock. diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt new file mode 100644 index 000000000000..936166fbee09 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt @@ -0,0 +1,23 @@ +Mediatek apmixedsys controller +============================== + +The Mediatek apmixedsys controller provides the PLLs to the system. + +Required Properties: + +- compatible: Should be: + - "mediatek,mt8135-apmixedsys" + - "mediatek,mt8173-apmixedsys" +- #clock-cells: Must be 1 + +The apmixedsys controller uses the common clk binding from +Documentation/devicetree/bindings/clock/clock-bindings.txt +The available clocks are defined in dt-bindings/clock/mt*-clk.h. + +Example: + +apmixedsys: clock-controller@10209000 { + compatible = "mediatek,mt8173-apmixedsys"; + reg = <0 0x10209000 0 0x1000>; + #clock-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt new file mode 100644 index 000000000000..f6cd3e4192ff --- /dev/null +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt @@ -0,0 +1,30 @@ +Mediatek infracfg controller +============================ + +The Mediatek infracfg controller provides various clocks and reset +outputs to the system. + +Required Properties: + +- compatible: Should be: + - "mediatek,mt8135-infracfg", "syscon" + - "mediatek,mt8173-infracfg", "syscon" +- #clock-cells: Must be 1 +- #reset-cells: Must be 1 + +The infracfg controller uses the common clk binding from +Documentation/devicetree/bindings/clock/clock-bindings.txt +The available clocks are defined in dt-bindings/clock/mt*-clk.h. +Also it uses the common reset controller binding from +Documentation/devicetree/bindings/reset/reset.txt. +The available reset outputs are defined in +dt-bindings/reset-controller/mt*-resets.h + +Example: + +infracfg: power-controller@10001000 { + compatible = "mediatek,mt8173-infracfg", "syscon"; + reg = <0 0x10001000 0 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt new file mode 100644 index 000000000000..f25b85499a6f --- /dev/null +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,pericfg.txt @@ -0,0 +1,30 @@ +Mediatek pericfg controller +=========================== + +The Mediatek pericfg controller provides various clocks and reset +outputs to the system. + +Required Properties: + +- compatible: Should be: + - "mediatek,mt8135-pericfg", "syscon" + - "mediatek,mt8173-pericfg", "syscon" +- #clock-cells: Must be 1 +- #reset-cells: Must be 1 + +The pericfg controller uses the common clk binding from +Documentation/devicetree/bindings/clock/clock-bindings.txt +The available clocks are defined in dt-bindings/clock/mt*-clk.h. +Also it uses the common reset controller binding from +Documentation/devicetree/bindings/reset/reset.txt. +The available reset outputs are defined in +dt-bindings/reset-controller/mt*-resets.h + +Example: + +pericfg: power-controller@10003000 { + compatible = "mediatek,mt8173-pericfg", "syscon"; + reg = <0 0x10003000 0 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt new file mode 100644 index 000000000000..f9e917994ced --- /dev/null +++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt @@ -0,0 +1,23 @@ +Mediatek topckgen controller +============================ + +The Mediatek topckgen controller provides various clocks to the system. + +Required Properties: + +- compatible: Should be: + - "mediatek,mt8135-topckgen" + - "mediatek,mt8173-topckgen" +- #clock-cells: Must be 1 + +The topckgen controller uses the common clk binding from +Documentation/devicetree/bindings/clock/clock-bindings.txt +The available clocks are defined in dt-bindings/clock/mt*-clk.h. + +Example: + +topckgen: power-controller@10000000 { + compatible = "mediatek,mt8173-topckgen"; + reg = <0 0x10000000 0 0x1000>; + #clock-cells = <1>; +}; diff --git a/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt new file mode 100644 index 000000000000..2b7b3fa588d7 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/amlogic,meson8b-clkc.txt @@ -0,0 +1,40 @@ +* Amlogic Meson8b Clock and Reset Unit + +The Amlogic Meson8b clock controller generates and supplies clock to various +controllers within the SoC. + +Required Properties: + +- compatible: should be "amlogic,meson8b-clkc" +- reg: it must be composed by two tuples: + 0) physical base address of the xtal register and length of memory + mapped region. + 1) physical base address of the clock controller and length of memory + mapped region. + +- #clock-cells: should be 1. + +Each clock is assigned an identifier and client nodes can use this identifier +to specify the clock which they consume. All available clocks are defined as +preprocessor macros in the dt-bindings/clock/meson8b-clkc.h header and can be +used in device tree sources. + +Example: Clock controller node: + + clkc: clock-controller@c1104000 { + #clock-cells = <1>; + compatible = "amlogic,meson8b-clkc"; + reg = <0xc1108000 0x4>, <0xc1104000 0x460>; + }; + + +Example: UART controller node that consumes the clock generated by the clock + controller: + + uart_AO: serial@c81004c0 { + compatible = "amlogic,meson-uart"; + reg = <0xc81004c0 0x14>; + interrupts = <0 90 1>; + clocks = <&clkc CLKID_CLK81>; + status = "disabled"; + }; diff --git a/Documentation/devicetree/bindings/clock/bcm-cygnus-clock.txt b/Documentation/devicetree/bindings/clock/bcm-cygnus-clock.txt deleted file mode 100644 index 00d26edec8bc..000000000000 --- a/Documentation/devicetree/bindings/clock/bcm-cygnus-clock.txt +++ /dev/null @@ -1,34 +0,0 @@ -Broadcom Cygnus Clocks - -This binding uses the common clock binding: -Documentation/devicetree/bindings/clock/clock-bindings.txt - -Currently various "fixed" clocks are declared for peripheral drivers that use -the common clock framework to reference their core clocks. Proper support of -these clocks will be added later - -Device tree example: - - clocks { - #address-cells = <1>; - #size-cells = <1>; - ranges; - - osc: oscillator { - compatible = "fixed-clock"; - #clock-cells = <1>; - clock-frequency = <25000000>; - }; - - apb_clk: apb_clk { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <1000000000>; - }; - - periph_clk: periph_clk { - compatible = "fixed-clock"; - #clock-cells = <0>; - clock-frequency = <500000000>; - }; - }; diff --git a/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt new file mode 100644 index 000000000000..da8d9bb5751c --- /dev/null +++ b/Documentation/devicetree/bindings/clock/brcm,iproc-clocks.txt @@ -0,0 +1,132 @@ +Broadcom iProc Family Clocks + +This binding uses the common clock binding: + Documentation/devicetree/bindings/clock/clock-bindings.txt + +The iProc clock controller manages clocks that are common to the iProc family. +An SoC from the iProc family may have several PPLs, e.g., ARMPLL, GENPLL, +LCPLL0, MIPIPLL, and etc., all derived from an onboard crystal. Each PLL +comprises of several leaf clocks + +Required properties for a PLL and its leaf clocks: + +- compatible: + Should have a value of the form "brcm,<soc>-<pll>". For example, GENPLL on +Cygnus has a compatible string of "brcm,cygnus-genpll" + +- #clock-cells: + Have a value of <1> since there are more than 1 leaf clock of a given PLL + +- reg: + Define the base and range of the I/O address space that contain the iProc +clock control registers required for the PLL + +- clocks: + The input parent clock phandle for the PLL. For most iProc PLLs, this is an +onboard crystal with a fixed rate + +- clock-output-names: + An ordered list of strings defining the names of the clocks + +Example: + + osc: oscillator { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <25000000>; + }; + + genpll: genpll { + #clock-cells = <1>; + compatible = "brcm,cygnus-genpll"; + reg = <0x0301d000 0x2c>, <0x0301c020 0x4>; + clocks = <&osc>; + clock-output-names = "genpll", "axi21", "250mhz", "ihost_sys", + "enet_sw", "audio_125", "can"; + }; + +Required properties for ASIU clocks: + +ASIU clocks are a special case. These clocks are derived directly from the +reference clock of the onboard crystal + +- compatible: + Should have a value of the form "brcm,<soc>-asiu-clk". For example, ASIU +clocks for Cygnus have a compatible string of "brcm,cygnus-asiu-clk" + +- #clock-cells: + Have a value of <1> since there are more than 1 ASIU clocks + +- reg: + Define the base and range of the I/O address space that contain the iProc +clock control registers required for ASIU clocks + +- clocks: + The input parent clock phandle for the ASIU clock, i.e., the onboard +crystal + +- clock-output-names: + An ordered list of strings defining the names of the ASIU clocks + +Example: + + osc: oscillator { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <25000000>; + }; + + asiu_clks: asiu_clks { + #clock-cells = <1>; + compatible = "brcm,cygnus-asiu-clk"; + reg = <0x0301d048 0xc>, <0x180aa024 0x4>; + clocks = <&osc>; + clock-output-names = "keypad", "adc/touch", "pwm"; + }; + +Cygnus +------ +PLL and leaf clock compatible strings for Cygnus are: + "brcm,cygnus-armpll" + "brcm,cygnus-genpll" + "brcm,cygnus-lcpll0" + "brcm,cygnus-mipipll" + "brcm,cygnus-asiu-clk" + +The following table defines the set of PLL/clock index and ID for Cygnus. +These clock IDs are defined in: + "include/dt-bindings/clock/bcm-cygnus.h" + + Clock Source (Parent) Index ID + --- ----- ----- --------- + crystal N/A N/A N/A + + armpll crystal N/A N/A + + keypad crystal (ASIU) 0 BCM_CYGNUS_ASIU_KEYPAD_CLK + adc/tsc crystal (ASIU) 1 BCM_CYGNUS_ASIU_ADC_CLK + pwm crystal (ASIU) 2 BCM_CYGNUS_ASIU_PWM_CLK + + genpll crystal 0 BCM_CYGNUS_GENPLL + axi21 genpll 1 BCM_CYGNUS_GENPLL_AXI21_CLK + 250mhz genpll 2 BCM_CYGNUS_GENPLL_250MHZ_CLK + ihost_sys genpll 3 BCM_CYGNUS_GENPLL_IHOST_SYS_CLK + enet_sw genpll 4 BCM_CYGNUS_GENPLL_ENET_SW_CLK + audio_125 genpll 5 BCM_CYGNUS_GENPLL_AUDIO_125_CLK + can genpll 6 BCM_CYGNUS_GENPLL_CAN_CLK + + lcpll0 crystal 0 BCM_CYGNUS_LCPLL0 + pcie_phy lcpll0 1 BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK + ddr_phy lcpll0 2 BCM_CYGNUS_LCPLL0_DDR_PHY_CLK + sdio lcpll0 3 BCM_CYGNUS_LCPLL0_SDIO_CLK + usb_phy lcpll0 4 BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK + smart_card lcpll0 5 BCM_CYGNUS_LCPLL0_SMART_CARD_CLK + ch5_unused lcpll0 6 BCM_CYGNUS_LCPLL0_CH5_UNUSED + + mipipll crystal 0 BCM_CYGNUS_MIPIPLL + ch0_unused mipipll 1 BCM_CYGNUS_MIPIPLL_CH0_UNUSED + ch1_lcd mipipll 2 BCM_CYGNUS_MIPIPLL_CH1_LCD + ch2_v3d mipipll 3 BCM_CYGNUS_MIPIPLL_CH2_V3D + ch3_unused mipipll 4 BCM_CYGNUS_MIPIPLL_CH3_UNUSED + ch4_unused mipipll 5 BCM_CYGNUS_MIPIPLL_CH4_UNUSED + ch5_unused mipipll 6 BCM_CYGNUS_MIPIPLL_CH5_UNUSED diff --git a/Documentation/devicetree/bindings/clock/clock-bindings.txt b/Documentation/devicetree/bindings/clock/clock-bindings.txt index 06fc6d541c89..2ec489eebe72 100644 --- a/Documentation/devicetree/bindings/clock/clock-bindings.txt +++ b/Documentation/devicetree/bindings/clock/clock-bindings.txt @@ -138,9 +138,10 @@ Some platforms may require initial configuration of default parent clocks and clock frequencies. Such a configuration can be specified in a device tree node through assigned-clocks, assigned-clock-parents and assigned-clock-rates properties. The assigned-clock-parents property should contain a list of parent -clocks in form of phandle and clock specifier pairs, the assigned-clock-parents -property the list of assigned clock frequency values - corresponding to clocks -listed in the assigned-clocks property. +clocks in the form of a phandle and clock specifier pair and the +assigned-clock-rates property should contain a list of frequencies in Hz. Both +these properties should correspond to the clocks listed in the assigned-clocks +property. To skip setting parent or rate of a clock its corresponding entry should be set to 0, or can be omitted if it is not followed by any non-zero entry. diff --git a/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt b/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt new file mode 100644 index 000000000000..54d6d1358339 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/csr,atlas7-car.txt @@ -0,0 +1,55 @@ +* Clock and reset bindings for CSR atlas7 + +Required properties: +- compatible: Should be "sirf,atlas7-car" +- reg: Address and length of the register set +- #clock-cells: Should be <1> +- #reset-cells: Should be <1> + +The clock consumer should specify the desired clock by having the clock +ID in its "clocks" phandle cell. +The ID list atlas7_clks defined in drivers/clk/sirf/clk-atlas7.c + +The reset consumer should specify the desired reset by having the reset +ID in its "reset" phandle cell. +The ID list atlas7_reset_unit defined in drivers/clk/sirf/clk-atlas7.c + +Examples: Clock and reset controller node: + +car: clock-controller@18620000 { + compatible = "sirf,atlas7-car"; + reg = <0x18620000 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; +}; + +Examples: Consumers using clock or reset: + +timer@10dc0000 { + compatible = "sirf,macro-tick"; + reg = <0x10dc0000 0x1000>; + clocks = <&car 54>; + interrupts = <0 0 0>, + <0 1 0>, + <0 2 0>, + <0 49 0>, + <0 50 0>, + <0 51 0>; +}; + +uart1: uart@18020000 { + cell-index = <1>; + compatible = "sirf,macro-uart"; + reg = <0x18020000 0x1000>; + clocks = <&clks 95>; + interrupts = <0 18 0>; + fifosize = <32>; +}; + +vpp@13110000 { + compatible = "sirf,prima2-vpp"; + reg = <0x13110000 0x10000>; + interrupts = <0 31 0>; + clocks = <&car 85>; + resets = <&car 29>; +}; diff --git a/Documentation/devicetree/bindings/clock/emev2-clock.txt b/Documentation/devicetree/bindings/clock/emev2-clock.txt index 60bbb1a8c69a..268ca615459e 100644 --- a/Documentation/devicetree/bindings/clock/emev2-clock.txt +++ b/Documentation/devicetree/bindings/clock/emev2-clock.txt @@ -52,7 +52,7 @@ usia_u0_sclk: usia_u0_sclk { Example of consumer: -uart@e1020000 { +serial@e1020000 { compatible = "renesas,em-uart"; reg = <0xe1020000 0x38>; interrupts = <0 8 0>; diff --git a/Documentation/devicetree/bindings/clock/keystone-pll.txt b/Documentation/devicetree/bindings/clock/keystone-pll.txt index 225990f79b7c..47570d207215 100644 --- a/Documentation/devicetree/bindings/clock/keystone-pll.txt +++ b/Documentation/devicetree/bindings/clock/keystone-pll.txt @@ -15,8 +15,8 @@ Required properties: - compatible : shall be "ti,keystone,main-pll-clock" or "ti,keystone,pll-clock" - clocks : parent clock phandle - reg - pll control0 and pll multipler registers -- reg-names : control and multiplier. The multiplier is applicable only for - main pll clock +- reg-names : control, multiplier and post-divider. The multiplier and + post-divider registers are applicable only for main pll clock - fixed-postdiv : fixed post divider value. If absent, use clkod register bits for postdiv @@ -25,8 +25,8 @@ Example: #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; - reg = <0x02620350 4>, <0x02310110 4>; - reg-names = "control", "multiplier"; + reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; + reg-names = "control", "multiplier", "post-divider"; fixed-postdiv = <2>; }; diff --git a/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt b/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt new file mode 100644 index 000000000000..fa97c12014ac --- /dev/null +++ b/Documentation/devicetree/bindings/clock/lpc1850-ccu.txt @@ -0,0 +1,77 @@ +* NXP LPC1850 Clock Control Unit (CCU) + +Each CGU base clock has several clock branches which can be turned on +or off independently by the Clock Control Units CCU1 or CCU2. The +branch clocks are distributed between CCU1 and CCU2. + + - Above text taken from NXP LPC1850 User Manual. + +This binding uses the common clock binding: + Documentation/devicetree/bindings/clock/clock-bindings.txt + +Required properties: +- compatible: + Should be "nxp,lpc1850-ccu" +- reg: + Shall define the base and range of the address space + containing clock control registers +- #clock-cells: + Shall have value <1>. The permitted clock-specifier values + are the branch clock names defined in table below. +- clocks: + Shall contain a list of phandles for the base clocks routed + from the CGU to the specific CCU. See mapping of base clocks + and CCU in table below. +- clock-names: + Shall contain a list of names for the base clock routed + from the CGU to the specific CCU. Valid CCU clock names: + "base_usb0_clk", "base_periph_clk", "base_usb1_clk", + "base_cpu_clk", "base_spifi_clk", "base_spi_clk", + "base_apb1_clk", "base_apb3_clk", "base_adchs_clk", + "base_sdio_clk", "base_ssp0_clk", "base_ssp1_clk", + "base_uart0_clk", "base_uart1_clk", "base_uart2_clk", + "base_uart3_clk", "base_audio_clk" + +Which branch clocks that are available on the CCU depends on the +specific LPC part. Check the user manual for your specific part. + +A list of CCU clocks can be found in dt-bindings/clock/lpc18xx-ccu.h. + +Example board file: + +soc { + ccu1: clock-controller@40051000 { + compatible = "nxp,lpc1850-ccu"; + reg = <0x40051000 0x1000>; + #clock-cells = <1>; + clocks = <&cgu BASE_APB3_CLK>, <&cgu BASE_APB1_CLK>, + <&cgu BASE_SPIFI_CLK>, <&cgu BASE_CPU_CLK>, + <&cgu BASE_PERIPH_CLK>, <&cgu BASE_USB0_CLK>, + <&cgu BASE_USB1_CLK>, <&cgu BASE_SPI_CLK>; + clock-names = "base_apb3_clk", "base_apb1_clk", + "base_spifi_clk", "base_cpu_clk", + "base_periph_clk", "base_usb0_clk", + "base_usb1_clk", "base_spi_clk"; + }; + + ccu2: clock-controller@40052000 { + compatible = "nxp,lpc1850-ccu"; + reg = <0x40052000 0x1000>; + #clock-cells = <1>; + clocks = <&cgu BASE_AUDIO_CLK>, <&cgu BASE_UART3_CLK>, + <&cgu BASE_UART2_CLK>, <&cgu BASE_UART1_CLK>, + <&cgu BASE_UART0_CLK>, <&cgu BASE_SSP1_CLK>, + <&cgu BASE_SSP0_CLK>, <&cgu BASE_SDIO_CLK>; + clock-names = "base_audio_clk", "base_uart3_clk", + "base_uart2_clk", "base_uart1_clk", + "base_uart0_clk", "base_ssp1_clk", + "base_ssp0_clk", "base_sdio_clk"; + }; + + /* A user of CCU brach clocks */ + uart1: serial@40082000 { + ... + clocks = <&ccu2 CLK_APB0_UART1>, <&ccu1 CLK_CPU_UART1>; + ... + }; +}; diff --git a/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt b/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt new file mode 100644 index 000000000000..2cc32a9a945a --- /dev/null +++ b/Documentation/devicetree/bindings/clock/lpc1850-cgu.txt @@ -0,0 +1,131 @@ +* NXP LPC1850 Clock Generation Unit (CGU) + +The CGU generates multiple independent clocks for the core and the +peripheral blocks of the LPC18xx. Each independent clock is called +a base clock and itself is one of the inputs to the two Clock +Control Units (CCUs) which control the branch clocks to the +individual peripherals. + +The CGU selects the inputs to the clock generators from multiple +clock sources, controls the clock generation, and routes the outputs +of the clock generators through the clock source bus to the output +stages. Each output stage provides an independent clock source and +corresponds to one of the base clocks for the LPC18xx. + + - Above text taken from NXP LPC1850 User Manual. + + +This binding uses the common clock binding: + Documentation/devicetree/bindings/clock/clock-bindings.txt + +Required properties: +- compatible: + Should be "nxp,lpc1850-cgu" +- reg: + Shall define the base and range of the address space + containing clock control registers +- #clock-cells: + Shall have value <1>. The permitted clock-specifier values + are the base clock numbers defined below. +- clocks: + Shall contain a list of phandles for the external input + sources to the CGU. The list shall be in the following + order: xtal, 32khz, enet_rx_clk, enet_tx_clk, gp_clkin. +- clock-indices: + Shall be an ordered list of numbers defining the base clock + number provided by the CGU. +- clock-output-names: + Shall be an ordered list of strings defining the names of + the clocks provided by the CGU. + +Which base clocks that are available on the CGU depends on the +specific LPC part. Base clocks are numbered from 0 to 27. + +Number: Name: Description: + 0 BASE_SAFE_CLK Base safe clock (always on) for WWDT + 1 BASE_USB0_CLK Base clock for USB0 + 2 BASE_PERIPH_CLK Base clock for Cortex-M0SUB subsystem, + SPI, and SGPIO + 3 BASE_USB1_CLK Base clock for USB1 + 4 BASE_CPU_CLK System base clock for ARM Cortex-M core + and APB peripheral blocks #0 and #2 + 5 BASE_SPIFI_CLK Base clock for SPIFI + 6 BASE_SPI_CLK Base clock for SPI + 7 BASE_PHY_RX_CLK Base clock for Ethernet PHY Receive clock + 8 BASE_PHY_TX_CLK Base clock for Ethernet PHY Transmit clock + 9 BASE_APB1_CLK Base clock for APB peripheral block # 1 +10 BASE_APB3_CLK Base clock for APB peripheral block # 3 +11 BASE_LCD_CLK Base clock for LCD +12 BASE_ADCHS_CLK Base clock for ADCHS +13 BASE_SDIO_CLK Base clock for SD/MMC +14 BASE_SSP0_CLK Base clock for SSP0 +15 BASE_SSP1_CLK Base clock for SSP1 +16 BASE_UART0_CLK Base clock for UART0 +17 BASE_UART1_CLK Base clock for UART1 +18 BASE_UART2_CLK Base clock for UART2 +19 BASE_UART3_CLK Base clock for UART3 +20 BASE_OUT_CLK Base clock for CLKOUT pin +24-21 - Reserved +25 BASE_AUDIO_CLK Base clock for audio system (I2S) +26 BASE_CGU_OUT0_CLK Base clock for CGU_OUT0 clock output +27 BASE_CGU_OUT1_CLK Base clock for CGU_OUT1 clock output + +BASE_PERIPH_CLK and BASE_SPI_CLK is only available on LPC43xx. +BASE_ADCHS_CLK is only available on LPC4370. + + +Example board file: + +/ { + clocks { + xtal: xtal { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <12000000>; + }; + + xtal32: xtal32 { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + }; + + enet_rx_clk: enet_rx_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + clock-output-names = "enet_rx_clk"; + }; + + enet_tx_clk: enet_tx_clk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + clock-output-names = "enet_tx_clk"; + }; + + gp_clkin: gp_clkin { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <0>; + clock-output-names = "gp_clkin"; + }; + }; + + soc { + cgu: clock-controller@40050000 { + compatible = "nxp,lpc1850-cgu"; + reg = <0x40050000 0x1000>; + #clock-cells = <1>; + clocks = <&xtal>, <&creg_clk 1>, <&enet_rx_clk>, <&enet_tx_clk>, <&gp_clkin>; + }; + + /* A CGU and CCU clock consumer */ + lcdc: lcdc@40008000 { + ... + clocks = <&cgu BASE_LCD_CLK>, <&ccu1 CLK_CPU_LCD>; + clock-names = "clcdclk", "apb_pclk"; + ... + }; + }; +}; diff --git a/Documentation/devicetree/bindings/clock/marvell,pxa1928.txt b/Documentation/devicetree/bindings/clock/marvell,pxa1928.txt new file mode 100644 index 000000000000..809c5a2d8d9d --- /dev/null +++ b/Documentation/devicetree/bindings/clock/marvell,pxa1928.txt @@ -0,0 +1,21 @@ +* Marvell PXA1928 Clock Controllers + +The PXA1928 clock subsystem generates and supplies clock to various +controllers within the PXA1928 SoC. The PXA1928 contains 3 clock controller +blocks called APMU, MPMU, and APBC roughly corresponding to internal buses. + +Required Properties: + +- compatible: should be one of the following. + - "marvell,pxa1928-apmu" - APMU controller compatible + - "marvell,pxa1928-mpmu" - MPMU controller compatible + - "marvell,pxa1928-apbc" - APBC controller compatible +- reg: physical base address of the clock controller and length of memory mapped + region. +- #clock-cells: should be 1. +- #reset-cells: should be 1. + +Each clock is assigned an identifier and client nodes use the clock controller +phandle and this identifier to specify the clock which they consume. + +All these identifiers can be found in <dt-bindings/clock/marvell,pxa1928.h>. diff --git a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt index 31c7c0c1ce8f..660e64912cce 100644 --- a/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt +++ b/Documentation/devicetree/bindings/clock/mvebu-gated-clock.txt @@ -19,6 +19,7 @@ ID Clock Peripheral 9 pex1 PCIe Cntrl 1 15 sata0 SATA Host 0 17 sdio SDHCI Host +23 crypto CESA (crypto engine) 25 tdm Time Division Mplx 28 ddr DDR Cntrl 30 sata1 SATA Host 0 diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt index c6620bc96703..7f02fb4ca4ad 100644 --- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt +++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt @@ -20,15 +20,38 @@ Required properties : - #reset-cells : Should be 1. In clock consumers, this cell represents the bit number in the CAR's array of CLK_RST_CONTROLLER_RST_DEVICES_* registers. +- nvidia,external-memory-controller : phandle of the EMC driver. + +The node should contain a "emc-timings" subnode for each supported RAM type (see +field RAM_CODE in register PMC_STRAPPING_OPT_A). + +Required properties for "emc-timings" nodes : +- nvidia,ram-code : Should contain the value of RAM_CODE this timing set + is used for. + +Each "emc-timings" node should contain a "timing" subnode for every supported +EMC clock rate. + +Required properties for "timing" nodes : +- clock-frequency : Should contain the memory clock rate to which this timing +relates. +- nvidia,parent-clock-frequency : Should contain the rate at which the current +parent of the EMC clock should be running at this timing. +- clocks : Must contain an entry for each entry in clock-names. + See ../clocks/clock-bindings.txt for details. +- clock-names : Must include the following entries: + - emc-parent : the clock that should be the parent of the EMC clock at this +timing. Example SoC include file: / { - tegra_car: clock { + tegra_car: clock@60006000 { compatible = "nvidia,tegra124-car"; reg = <0x60006000 0x1000>; #clock-cells = <1>; #reset-cells = <1>; + nvidia,external-memory-controller = <&emc>; }; usb@c5004000 { @@ -62,4 +85,23 @@ Example board file: &tegra_car { clocks = <&clk_32k> <&osc>; }; + + clock@60006000 { + emc-timings-3 { + nvidia,ram-code = <3>; + + timing-12750000 { + clock-frequency = <12750000>; + nvidia,parent-clock-frequency = <408000000>; + clocks = <&tegra_car TEGRA124_CLK_PLL_P>; + clock-names = "emc-parent"; + }; + timing-20400000 { + clock-frequency = <20400000>; + nvidia,parent-clock-frequency = <408000000>; + clocks = <&tegra_car TEGRA124_CLK_PLL_P>; + clock-names = "emc-parent"; + }; + }; + }; }; diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt index 054f65f9319c..5ddb68418655 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt @@ -10,9 +10,11 @@ Required Properties: - "renesas,r8a73a4-div6-clock" for R8A73A4 (R-Mobile APE6) DIV6 clocks - "renesas,r8a7740-div6-clock" for R8A7740 (R-Mobile A1) DIV6 clocks - "renesas,r8a7790-div6-clock" for R8A7790 (R-Car H2) DIV6 clocks - - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2) DIV6 clocks + - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2-W) DIV6 clocks + - "renesas,r8a7793-div6-clock" for R8A7793 (R-Car M2-N) DIV6 clocks + - "renesas,r8a7794-div6-clock" for R8A7794 (R-Car E2) DIV6 clocks - "renesas,sh73a0-div6-clock" for SH73A0 (SH-Mobile AG5) DIV6 clocks - - "renesas,cpg-div6-clock" for generic DIV6 clocks + and "renesas,cpg-div6-clock" as a fallback. - reg: Base address and length of the memory resource used by the DIV6 clock - clocks: Reference to the parent clock(s); either one, four, or eight clocks must be specified. For clocks with multiple parents, invalid diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt index 0a80fa70ca26..16ed18155160 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt @@ -13,12 +13,14 @@ Required Properties: - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks + - "renesas,r8a7778-mstp-clocks" for R8A7778 (R-Car M1) MSTP gate clocks - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks - - "renesas,r8a7791-mstp-clocks" for R8A7791 (R-Car M2) MSTP gate clocks + - "renesas,r8a7791-mstp-clocks" for R8A7791 (R-Car M2-W) MSTP gate clocks + - "renesas,r8a7793-mstp-clocks" for R8A7793 (R-Car M2-N) MSTP gate clocks - "renesas,r8a7794-mstp-clocks" for R8A7794 (R-Car E2) MSTP gate clocks - "renesas,sh73a0-mstp-clocks" for SH73A0 (SH-MobileAG5) MSTP gate clocks - - "renesas,cpg-mstp-clock" for generic MSTP gate clocks + and "renesas,cpg-mstp-clocks" as a fallback. - reg: Base address and length of the I/O mapped registers used by the MSTP clocks. The first register is the clock control register and is mandatory. The second register is the clock status register and is optional when not diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt index b02944fba9de..56f111bd3e45 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt @@ -10,7 +10,7 @@ Required Properties: - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG - - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG + and "renesas,rcar-gen2-cpg-clocks" as a fallback. - reg: Base address and length of the memory resource used by the CPG diff --git a/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt index 98a257492522..b0f7ddb8cdb1 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rz-cpg-clocks.txt @@ -7,7 +7,7 @@ Required Properties: - compatible: Must be one of - "renesas,r7s72100-cpg-clocks" for the r7s72100 CPG - - "renesas,rz-cpg-clocks" for the generic RZ CPG + and "renesas,rz-cpg-clocks" as a fallback. - reg: Base address and length of the memory resource used by the CPG - clocks: References to possible parent clocks. Order must match clock modes in the datasheet. For the r7s72100, this is extal, usb_x1. diff --git a/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt new file mode 100644 index 000000000000..fee3205cdff9 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/st,stm32-rcc.txt @@ -0,0 +1,65 @@ +STMicroelectronics STM32 Reset and Clock Controller +=================================================== + +The RCC IP is both a reset and a clock controller. This documentation only +describes the clock part. + +Please also refer to clock-bindings.txt in this directory for common clock +controller binding usage. + +Required properties: +- compatible: Should be "st,stm32f42xx-rcc" +- reg: should be register base and length as documented in the + datasheet +- #clock-cells: 2, device nodes should specify the clock in their "clocks" + property, containing a phandle to the clock device node, an index selecting + between gated clocks and other clocks and an index specifying the clock to + use. + +Example: + + rcc: rcc@40023800 { + #clock-cells = <2> + compatible = "st,stm32f42xx-rcc", "st,stm32-rcc"; + reg = <0x40023800 0x400>; + }; + +Specifying gated clocks +======================= + +The primary index must be set to 0. + +The secondary index is the bit number within the RCC register bank, starting +from the first RCC clock enable register (RCC_AHB1ENR, address offset 0x30). + +It is calculated as: index = register_offset / 4 * 32 + bit_offset. +Where bit_offset is the bit offset within the register (LSB is 0, MSB is 31). + +Example: + + /* Gated clock, AHB1 bit 0 (GPIOA) */ + ... { + clocks = <&rcc 0 0> + }; + + /* Gated clock, AHB2 bit 4 (CRYP) */ + ... { + clocks = <&rcc 0 36> + }; + +Specifying other clocks +======================= + +The primary index must be set to 1. + +The secondary index is bound with the following magic numbers: + + 0 SYSTICK + 1 FCLK + +Example: + + /* Misc clock, FCLK */ + ... { + clocks = <&rcc 1 1> + }; diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt index 4fa11af3d378..8a47b77abfca 100644 --- a/Documentation/devicetree/bindings/clock/sunxi.txt +++ b/Documentation/devicetree/bindings/clock/sunxi.txt @@ -67,6 +67,7 @@ Required properties: "allwinner,sun4i-a10-usb-clk" - for usb gates + resets on A10 / A20 "allwinner,sun5i-a13-usb-clk" - for usb gates + resets on A13 "allwinner,sun6i-a31-usb-clk" - for usb gates + resets on A31 + "allwinner,sun8i-a23-usb-clk" - for usb gates + resets on A23 "allwinner,sun9i-a80-usb-mod-clk" - for usb gates + resets on A80 "allwinner,sun9i-a80-usb-phy-clk" - for usb phy gates + resets on A80 diff --git a/Documentation/devicetree/bindings/clock/ti,cdce925.txt b/Documentation/devicetree/bindings/clock/ti,cdce925.txt new file mode 100644 index 000000000000..4c7669ad681b --- /dev/null +++ b/Documentation/devicetree/bindings/clock/ti,cdce925.txt @@ -0,0 +1,42 @@ +Binding for TO CDCE925 programmable I2C clock synthesizers. + +Reference +This binding uses the common clock binding[1]. + +[1] Documentation/devicetree/bindings/clock/clock-bindings.txt +[2] http://www.ti.com/product/cdce925 + +The driver provides clock sources for each output Y1 through Y5. + +Required properties: + - compatible: Shall be "ti,cdce925" + - reg: I2C device address. + - clocks: Points to a fixed parent clock that provides the input frequency. + - #clock-cells: From common clock bindings: Shall be 1. + +Optional properties: + - xtal-load-pf: Crystal load-capacitor value to fine-tune performance on a + board, or to compensate for external influences. + +For both PLL1 and PLL2 an optional child node can be used to specify spread +spectrum clocking parameters for a board. + - spread-spectrum: SSC mode as defined in the data sheet. + - spread-spectrum-center: Use "centered" mode instead of "max" mode. When + present, the clock runs at the requested frequency on average. Otherwise + the requested frequency is the maximum value of the SCC range. + + +Example: + + clockgen: cdce925pw@64 { + compatible = "cdce925"; + reg = <0x64>; + clocks = <&xtal_27Mhz>; + #clock-cells = <1>; + xtal-load-pf = <5>; + /* PLL options to get SSC 1% centered */ + PLL2 { + spread-spectrum = <4>; + spread-spectrum-center; + }; + }; diff --git a/Documentation/devicetree/bindings/leds/leds-aat1290.txt b/Documentation/devicetree/bindings/leds/leds-aat1290.txt new file mode 100644 index 000000000000..c05ed91a4e42 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-aat1290.txt @@ -0,0 +1,73 @@ +* Skyworks Solutions, Inc. AAT1290 Current Regulator for Flash LEDs + +The device is controlled through two pins: FL_EN and EN_SET. The pins when, +asserted high, enable flash strobe and movie mode (max 1/2 of flash current) +respectively. In order to add a capability of selecting the strobe signal source +(e.g. CPU or camera sensor) there is an additional switch required, independent +of the flash chip. The switch is controlled with pin control. + +Required properties: + +- compatible : Must be "skyworks,aat1290". +- flen-gpios : Must be device tree identifier of the flash device FL_EN pin. +- enset-gpios : Must be device tree identifier of the flash device EN_SET pin. + +Optional properties: +- pinctrl-names : Must contain entries: "default", "host", "isp". Entries + "default" and "host" must refer to the same pin configuration + node, which sets the host as a strobe signal provider. Entry + "isp" must refer to the pin configuration node, which sets the + ISP as a strobe signal provider. + +A discrete LED element connected to the device must be represented by a child +node - see Documentation/devicetree/bindings/leds/common.txt. + +Required properties of the LED child node: +- led-max-microamp : see Documentation/devicetree/bindings/leds/common.txt +- flash-max-microamp : see Documentation/devicetree/bindings/leds/common.txt + Maximum flash LED supply current can be calculated using + following formula: I = 1A * 162kohm / Rset. +- flash-timeout-us : see Documentation/devicetree/bindings/leds/common.txt + Maximum flash timeout can be calculated using following + formula: T = 8.82 * 10^9 * Ct. + +Optional properties of the LED child node: +- label : see Documentation/devicetree/bindings/leds/common.txt + +Example (by Ct = 220nF, Rset = 160kohm and exynos4412-trats2 board with +a switch that allows for routing strobe signal either from the host or from +the camera sensor): + +#include "exynos4412.dtsi" + +aat1290 { + compatible = "skyworks,aat1290"; + flen-gpios = <&gpj1 1 GPIO_ACTIVE_HIGH>; + enset-gpios = <&gpj1 2 GPIO_ACTIVE_HIGH>; + + pinctrl-names = "default", "host", "isp"; + pinctrl-0 = <&camera_flash_host>; + pinctrl-1 = <&camera_flash_host>; + pinctrl-2 = <&camera_flash_isp>; + + camera_flash: flash-led { + label = "aat1290-flash"; + led-max-microamp = <520833>; + flash-max-microamp = <1012500>; + flash-timeout-us = <1940000>; + }; +}; + +&pinctrl_0 { + camera_flash_host: camera-flash-host { + samsung,pins = "gpj1-0"; + samsung,pin-function = <1>; + samsung,pin-val = <0>; + }; + + camera_flash_isp: camera-flash-isp { + samsung,pins = "gpj1-0"; + samsung,pin-function = <1>; + samsung,pin-val = <1>; + }; +}; diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt new file mode 100644 index 000000000000..f9e36adc0ebf --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt @@ -0,0 +1,309 @@ +LEDs connected to Broadcom BCM6328 controller + +This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268. +In these SoCs it's possible to control LEDs both as GPIOs or by hardware. +However, on some devices there are Serial LEDs (LEDs connected to a 74x164 +controller), which can either be controlled by software (exporting the 74x164 +as spi-gpio. See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or +by hardware using this driver. +Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and +exporting the 74x164 as spi-gpio prevents those LEDs to be hardware +controlled, so the only chance to keep them working is by using this driver. + +BCM6328 LED controller has a HWDIS register, which controls whether a LED +should be controlled by a hardware signal instead of the MODE register value, +with 0 meaning hardware control enabled and 1 hardware control disabled. This +is usually 1:1 for hardware to LED signals, but through the activity/link +registers you have some limited control over rerouting the LEDs (as +explained later in brcm,link-signal-sources). Even if a LED is hardware +controlled you are still able to make it blink or light it up if it isn't, +but you can't turn it off if the hardware decides to light it up. For this +reason, hardware controlled LEDs aren't registered as LED class devices. + +Required properties: + - compatible : should be "brcm,bcm6328-leds". + - #address-cells : must be 1. + - #size-cells : must be 0. + - reg : BCM6328 LED controller address and size. + +Optional properties: + - brcm,serial-leds : Boolean, enables Serial LEDs. + Default : false + +Each LED is represented as a sub-node of the brcm,bcm6328-leds device. + +LED sub-node required properties: + - reg : LED pin number (only LEDs 0 to 23 are valid). + +LED sub-node optional properties: + a) Optional properties for sub-nodes related to software controlled LEDs: + - label : see Documentation/devicetree/bindings/leds/common.txt + - active-low : Boolean, makes LED active low. + Default : false + - default-state : see + Documentation/devicetree/bindings/leds/leds-gpio.txt + - linux,default-trigger : see + Documentation/devicetree/bindings/leds/common.txt + + b) Optional properties for sub-nodes related to hardware controlled LEDs: + - brcm,hardware-controlled : Boolean, makes this LED hardware controlled. + Default : false + - brcm,link-signal-sources : An array of hardware link + signal sources. Up to four link hardware signals can get muxed into + these LEDs. Only valid for LEDs 0 to 7, where LED signals 0 to 3 may + be muxed to LEDs 0 to 3, and signals 4 to 7 may be muxed to LEDs + 4 to 7. A signal can be muxed to more than one LED, and one LED can + have more than one source signal. + - brcm,activity-signal-sources : An array of hardware activity + signal sources. Up to four activity hardware signals can get muxed into + these LEDs. Only valid for LEDs 0 to 7, where LED signals 0 to 3 may + be muxed to LEDs 0 to 3, and signals 4 to 7 may be muxed to LEDs + 4 to 7. A signal can be muxed to more than one LED, and one LED can + have more than one source signal. + +Examples: +Scenario 1 : BCM6328 with 4 EPHY LEDs + leds0: led-controller@10000800 { + compatible = "brcm,bcm6328-leds"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x10000800 0x24>; + + alarm_red@2 { + reg = <2>; + active-low; + label = "red:alarm"; + }; + inet_green@3 { + reg = <3>; + active-low; + label = "green:inet"; + }; + power_green@4 { + reg = <4>; + active-low; + label = "green:power"; + default-state = "on"; + }; + ephy0_spd@17 { + reg = <17>; + brcm,hardware-controlled; + }; + ephy1_spd@18 { + reg = <18>; + brcm,hardware-controlled; + }; + ephy2_spd@19 { + reg = <19>; + brcm,hardware-controlled; + }; + ephy3_spd@20 { + reg = <20>; + brcm,hardware-controlled; + }; + }; + +Scenario 2 : BCM63268 with Serial/GPHY0 LEDs + leds0: led-controller@10001900 { + compatible = "brcm,bcm6328-leds"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x10001900 0x24>; + brcm,serial-leds; + + gphy0_spd0@0 { + reg = <0>; + brcm,hardware-controlled; + brcm,link-signal-sources = <0>; + }; + gphy0_spd1@1 { + reg = <1>; + brcm,hardware-controlled; + brcm,link-signal-sources = <1>; + }; + inet_red@2 { + reg = <2>; + active-low; + label = "red:inet"; + }; + dsl_green@3 { + reg = <3>; + active-low; + label = "green:dsl"; + }; + usb_green@4 { + reg = <4>; + active-low; + label = "green:usb"; + }; + wps_green@7 { + reg = <7>; + active-low; + label = "green:wps"; + }; + inet_green@8 { + reg = <8>; + active-low; + label = "green:inet"; + }; + ephy0_act@9 { + reg = <9>; + brcm,hardware-controlled; + }; + ephy1_act@10 { + reg = <10>; + brcm,hardware-controlled; + }; + ephy2_act@11 { + reg = <11>; + brcm,hardware-controlled; + }; + gphy0_act@12 { + reg = <12>; + brcm,hardware-controlled; + }; + ephy0_spd@13 { + reg = <13>; + brcm,hardware-controlled; + }; + ephy1_spd@14 { + reg = <14>; + brcm,hardware-controlled; + }; + ephy2_spd@15 { + reg = <15>; + brcm,hardware-controlled; + }; + power_green@20 { + reg = <20>; + active-low; + label = "green:power"; + default-state = "on"; + }; + }; + +Scenario 3 : BCM6362 with 1 LED for each EPHY + leds0: led-controller@10001900 { + compatible = "brcm,bcm6328-leds"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x10001900 0x24>; + + usb@0 { + reg = <0>; + brcm,hardware-controlled; + brcm,link-signal-sources = <0>; + brcm,activity-signal-sources = <0>; + /* USB link/activity routed to USB LED */ + }; + inet@1 { + reg = <1>; + brcm,hardware-controlled; + brcm,activity-signal-sources = <1>; + /* INET activity routed to INET LED */ + }; + ephy0@4 { + reg = <4>; + brcm,hardware-controlled; + brcm,link-signal-sources = <4>; + /* EPHY0 link routed to EPHY0 LED */ + }; + ephy1@5 { + reg = <5>; + brcm,hardware-controlled; + brcm,link-signal-sources = <5>; + /* EPHY1 link routed to EPHY1 LED */ + }; + ephy2@6 { + reg = <6>; + brcm,hardware-controlled; + brcm,link-signal-sources = <6>; + /* EPHY2 link routed to EPHY2 LED */ + }; + ephy3@7 { + reg = <7>; + brcm,hardware-controlled; + brcm,link-signal-sources = <7>; + /* EPHY3 link routed to EPHY3 LED */ + }; + power_green@20 { + reg = <20>; + active-low; + label = "green:power"; + default-state = "on"; + }; + }; + +Scenario 4 : BCM6362 with 1 LED for all EPHYs + leds0: led-controller@10001900 { + compatible = "brcm,bcm6328-leds"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x10001900 0x24>; + + usb@0 { + reg = <0>; + brcm,hardware-controlled; + brcm,link-signal-sources = <0 1>; + brcm,activity-signal-sources = <0 1>; + /* USB/INET link/activity routed to USB LED */ + }; + ephy@4 { + reg = <4>; + brcm,hardware-controlled; + brcm,link-signal-sources = <4 5 6 7>; + /* EPHY0/1/2/3 link routed to EPHY0 LED */ + }; + power_green@20 { + reg = <20>; + active-low; + label = "green:power"; + default-state = "on"; + }; + }; + +Scenario 5 : BCM6362 with EPHY LEDs swapped + leds0: led-controller@10001900 { + compatible = "brcm,bcm6328-leds"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x10001900 0x24>; + + usb@0 { + reg = <0>; + brcm,hardware-controlled; + brcm,link-signal-sources = <0>; + brcm,activity-signal-sources = <0 1>; + /* USB link/act and INET act routed to USB LED */ + }; + ephy0@4 { + reg = <4>; + brcm,hardware-controlled; + brcm,link-signal-sources = <7>; + /* EPHY3 link routed to EPHY0 LED */ + }; + ephy1@5 { + reg = <5>; + brcm,hardware-controlled; + brcm,link-signal-sources = <6>; + /* EPHY2 link routed to EPHY1 LED */ + }; + ephy2@6 { + reg = <6>; + brcm,hardware-controlled; + brcm,link-signal-sources = <5>; + /* EPHY1 link routed to EPHY2 LED */ + }; + ephy3@7 { + reg = <7>; + brcm,hardware-controlled; + brcm,link-signal-sources = <4>; + /* EPHY0 link routed to EPHY3 LED */ + }; + power_green@20 { + reg = <20>; + active-low; + label = "green:power"; + default-state = "on"; + }; + }; diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6358.txt b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt new file mode 100644 index 000000000000..b22a55bcc65d --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-bcm6358.txt @@ -0,0 +1,145 @@ +LEDs connected to Broadcom BCM6358 controller + +This controller is present on BCM6358 and BCM6368. +In these SoCs there are Serial LEDs (LEDs connected to a 74x164 controller), +which can either be controlled by software (exporting the 74x164 as spi-gpio. +See Documentation/devicetree/bindings/gpio/gpio-74x164.txt), or +by hardware using this driver. + +Required properties: + - compatible : should be "brcm,bcm6358-leds". + - #address-cells : must be 1. + - #size-cells : must be 0. + - reg : BCM6358 LED controller address and size. + +Optional properties: + - brcm,clk-div : SCK signal divider. Possible values are 1, 2, 4 and 8. + Default : 1 + - brcm,clk-dat-low : Boolean, makes clock and data signals active low. + Default : false + +Each LED is represented as a sub-node of the brcm,bcm6358-leds device. + +LED sub-node required properties: + - reg : LED pin number (only LEDs 0 to 31 are valid). + +LED sub-node optional properties: + - label : see Documentation/devicetree/bindings/leds/common.txt + - active-low : Boolean, makes LED active low. + Default : false + - default-state : see + Documentation/devicetree/bindings/leds/leds-gpio.txt + - linux,default-trigger : see + Documentation/devicetree/bindings/leds/common.txt + +Examples: +Scenario 1 : BCM6358 + leds0: led-controller@fffe00d0 { + compatible = "brcm,bcm6358-leds"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0xfffe00d0 0x8>; + + alarm_white { + reg = <0>; + active-low; + label = "white:alarm"; + }; + tv_white { + reg = <2>; + active-low; + label = "white:tv"; + }; + tel_white { + reg = <3>; + active-low; + label = "white:tel"; + }; + adsl_white { + reg = <4>; + active-low; + label = "white:adsl"; + }; + }; + +Scenario 2 : BCM6368 + leds0: led-controller@100000d0 { + compatible = "brcm,bcm6358-leds"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x100000d0 0x8>; + brcm,pol-low; + brcm,clk-div = <4>; + + power_red { + reg = <0>; + active-low; + label = "red:power"; + }; + power_green { + reg = <1>; + active-low; + label = "green:power"; + default-state = "on"; + }; + power_blue { + reg = <2>; + label = "blue:power"; + }; + broadband_red { + reg = <3>; + active-low; + label = "red:broadband"; + }; + broadband_green { + reg = <4>; + label = "green:broadband"; + }; + broadband_blue { + reg = <5>; + active-low; + label = "blue:broadband"; + }; + wireless_red { + reg = <6>; + active-low; + label = "red:wireless"; + }; + wireless_green { + reg = <7>; + active-low; + label = "green:wireless"; + }; + wireless_blue { + reg = <8>; + label = "blue:wireless"; + }; + phone_red { + reg = <9>; + active-low; + label = "red:phone"; + }; + phone_green { + reg = <10>; + active-low; + label = "green:phone"; + }; + phone_blue { + reg = <11>; + label = "blue:phone"; + }; + upgrading_red { + reg = <12>; + active-low; + label = "red:upgrading"; + }; + upgrading_green { + reg = <13>; + active-low; + label = "green:upgrading"; + }; + upgrading_blue { + reg = <14>; + label = "blue:upgrading"; + }; + }; diff --git a/Documentation/devicetree/bindings/leds/leds-ktd2692.txt b/Documentation/devicetree/bindings/leds/leds-ktd2692.txt new file mode 100644 index 000000000000..853737452580 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-ktd2692.txt @@ -0,0 +1,50 @@ +* Kinetic Technologies - KTD2692 Flash LED Driver + +KTD2692 is the ideal power solution for high-power flash LEDs. +It uses ExpressWire single-wire programming for maximum flexibility. + +The ExpressWire interface through CTRL pin can control LED on/off and +enable/disable the IC, Movie(max 1/3 of Flash current) / Flash mode current, +Flash timeout, LVP(low voltage protection). + +Also, When the AUX pin is pulled high while CTRL pin is high, +LED current will be ramped up to the flash-mode current level. + +Required properties: +- compatible : Should be "kinetic,ktd2692". +- ctrl-gpios : Specifier of the GPIO connected to CTRL pin. +- aux-gpios : Specifier of the GPIO connected to AUX pin. + +Optional properties: +- vin-supply : "vin" LED supply (2.7V to 5.5V). + See Documentation/devicetree/bindings/regulator/regulator.txt + +A discrete LED element connected to the device must be represented by a child +node - See Documentation/devicetree/bindings/leds/common.txt + +Required properties for flash LED child nodes: + See Documentation/devicetree/bindings/leds/common.txt +- led-max-microamp : Minimum Threshold for Timer protection + is defined internally (Maximum 300mA). +- flash-max-microamp : Flash LED maximum current + Formula : I(mA) = 15000 / Rset. +- flash-max-timeout-us : Flash LED maximum timeout. + +Optional properties for flash LED child nodes: +- label : See Documentation/devicetree/bindings/leds/common.txt + +Example: + +ktd2692 { + compatible = "kinetic,ktd2692"; + ctrl-gpios = <&gpc0 1 0>; + aux-gpios = <&gpc0 2 0>; + vin-supply = <&vbat>; + + flash-led { + label = "ktd2692-flash"; + led-max-microamp = <300000>; + flash-max-microamp = <1500000>; + flash-max-timeout-us = <1835000>; + }; +}; diff --git a/Documentation/devicetree/bindings/leds/leds-tlc591xx.txt b/Documentation/devicetree/bindings/leds/leds-tlc591xx.txt new file mode 100644 index 000000000000..3bbbf7024411 --- /dev/null +++ b/Documentation/devicetree/bindings/leds/leds-tlc591xx.txt @@ -0,0 +1,40 @@ +LEDs connected to tlc59116 or tlc59108 + +Required properties +- compatible: should be "ti,tlc59116" or "ti,tlc59108" +- #address-cells: must be 1 +- #size-cells: must be 0 +- reg: typically 0x68 + +Each led is represented as a sub-node of the ti,tlc59116. +See Documentation/devicetree/bindings/leds/common.txt + +LED sub-node properties: +- reg: number of LED line, 0 to 15 or 0 to 7 +- label: (optional) name of LED +- linux,default-trigger : (optional) + +Examples: + +tlc59116@68 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "ti,tlc59116"; + reg = <0x68>; + + wan@0 { + label = "wrt1900ac:amber:wan"; + reg = <0x0>; + }; + + 2g@2 { + label = "wrt1900ac:white:2g"; + reg = <0x2>; + }; + + alive@9 { + label = "wrt1900ac:green:alive"; + reg = <0x9>; + linux,default_trigger = "heartbeat"; + }; +}; diff --git a/Documentation/devicetree/bindings/mfd/tps6507x.txt b/Documentation/devicetree/bindings/mfd/tps6507x.txt index 8fffa3c5ed40..8fffa3c5ed40 100755..100644 --- a/Documentation/devicetree/bindings/mfd/tps6507x.txt +++ b/Documentation/devicetree/bindings/mfd/tps6507x.txt diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt index 750d577e8083..f5a8ca29aff0 100644 --- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt @@ -1,7 +1,7 @@ * Marvell Armada 370 / Armada XP Ethernet Controller (NETA) Required properties: -- compatible: should be "marvell,armada-370-neta". +- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta". - reg: address and length of the register set for the device. - interrupts: interrupt for the device - phy: See ethernet.txt file in the same directory. diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-falcon.txt index ac4da9fe07bd..ac4da9fe07bd 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-falcon.txt diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-xway.txt index e89b4677567d..e89b4677567d 100644 --- a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt +++ b/Documentation/devicetree/bindings/pinctrl/lantiq,pinctrl-xway.txt diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt index 1be8d7a26c15..5883b73ea1b5 100644 --- a/Documentation/devicetree/bindings/usb/atmel-usb.txt +++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt @@ -79,9 +79,9 @@ Atmel High-Speed USB device controller Required properties: - compatible: Should be one of the following - "at91sam9rl-udc" - "at91sam9g45-udc" - "sama5d3-udc" + "atmel,at91sam9rl-udc" + "atmel,at91sam9g45-udc" + "atmel,sama5d3-udc" - reg: Address and length of the register set for the device - interrupts: Should contain usba interrupt - clocks: Should reference the peripheral and host clocks diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index 7b607761b774..d444757c4d9e 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -114,6 +114,7 @@ isee ISEE 2007 S.L. isil Intersil karo Ka-Ro electronics GmbH keymile Keymile GmbH +kinetic Kinetic Technologies lacie LaCie lantiq Lantiq Semiconductor lenovo Lenovo Group Ltd. @@ -226,4 +227,5 @@ xillybus Xillybus Ltd. xlnx Xilinx zyxel ZyXEL Communications Corp. zarlink Zarlink Semiconductor +zii Zodiac Inflight Innovations zte ZTE Corp. diff --git a/Documentation/devicetree/bindings/watchdog/digicolor-wdt.txt b/Documentation/devicetree/bindings/watchdog/digicolor-wdt.txt new file mode 100644 index 000000000000..a882967e17d4 --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/digicolor-wdt.txt @@ -0,0 +1,25 @@ +Conexant Digicolor SoCs Watchdog timer + +The watchdog functionality in Conexant Digicolor SoCs relies on the so called +"Agent Communication" block. This block includes the eight programmable system +timer counters. The first timer (called "Timer A") is the only one that can be +used as watchdog. + +Required properties: + +- compatible : Should be "cnxt,cx92755-wdt" +- reg : Specifies base physical address and size of the registers +- clocks : phandle; specifies the clock that drives the timer + +Optional properties: + +- timeout-sec : Contains the watchdog timeout in seconds + +Example: + + watchdog@f0000fc0 { + compatible = "cnxt,cx92755-wdt"; + reg = <0xf0000fc0 0x8>; + clocks = <&main_clk>; + timeout-sec = <15>; + }; diff --git a/Documentation/devicetree/bindings/watchdog/omap-wdt.txt b/Documentation/devicetree/bindings/watchdog/omap-wdt.txt index c227970671ea..1fa20e453a2d 100644 --- a/Documentation/devicetree/bindings/watchdog/omap-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/omap-wdt.txt @@ -1,10 +1,11 @@ TI Watchdog Timer (WDT) Controller for OMAP Required properties: -compatible: -- "ti,omap3-wdt" for OMAP3 -- "ti,omap4-wdt" for OMAP4 -- ti,hwmods: Name of the hwmod associated to the WDT +- compatible : "ti,omap3-wdt" for OMAP3 or "ti,omap4-wdt" for OMAP4 +- ti,hwmods : Name of the hwmod associated to the WDT + +Optional properties: +- timeout-sec : default watchdog timeout in seconds Examples: diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt index 51f4221657bf..611c52267d24 100644 --- a/Documentation/ioctl/ioctl-number.txt +++ b/Documentation/ioctl/ioctl-number.txt @@ -321,6 +321,7 @@ Code Seq#(hex) Include File Comments 0xDB 00-0F drivers/char/mwave/mwavepub.h 0xDD 00-3F ZFCP device driver see drivers/s390/scsi/ <mailto:[email protected]> +0xE5 00-3F linux/fuse.h 0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver 0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development) <mailto:[email protected]> diff --git a/Documentation/leds/leds-class-flash.txt b/Documentation/leds/leds-class-flash.txt index 19bb67355424..8da3c6f4b60b 100644 --- a/Documentation/leds/leds-class-flash.txt +++ b/Documentation/leds/leds-class-flash.txt @@ -20,3 +20,54 @@ Following sysfs attributes are exposed for controlling flash LED devices: - max_flash_timeout - flash_strobe - flash_fault + + +V4L2 flash wrapper for flash LEDs +================================= + +A LED subsystem driver can be controlled also from the level of VideoForLinux2 +subsystem. In order to enable this CONFIG_V4L2_FLASH_LED_CLASS symbol has to +be defined in the kernel config. + +The driver must call the v4l2_flash_init function to get registered in the +V4L2 subsystem. The function takes six arguments: +- dev : flash device, e.g. an I2C device +- of_node : of_node of the LED, may be NULL if the same as device's +- fled_cdev : LED flash class device to wrap +- iled_cdev : LED flash class device representing indicator LED associated with + fled_cdev, may be NULL +- ops : V4L2 specific ops + * external_strobe_set - defines the source of the flash LED strobe - + V4L2_CID_FLASH_STROBE control or external source, typically + a sensor, which makes it possible to synchronise the flash + strobe start with exposure start, + * intensity_to_led_brightness and led_brightness_to_intensity - perform + enum led_brightness <-> V4L2 intensity conversion in a device + specific manner - they can be used for devices with non-linear + LED current scale. +- config : configuration for V4L2 Flash sub-device + * dev_name - the name of the media entity, unique in the system, + * flash_faults - bitmask of flash faults that the LED flash class + device can report; corresponding LED_FAULT* bit definitions are + available in <linux/led-class-flash.h>, + * torch_intensity - constraints for the LED in TORCH mode + in microamperes, + * indicator_intensity - constraints for the indicator LED + in microamperes, + * has_external_strobe - determines whether the flash strobe source + can be switched to external, + +On remove the v4l2_flash_release function has to be called, which takes one +argument - struct v4l2_flash pointer returned previously by v4l2_flash_init. +This function can be safely called with NULL or error pointer argument. + +Please refer to drivers/leds/leds-max77693.c for an exemplary usage of the +v4l2 flash wrapper. + +Once the V4L2 sub-device is registered by the driver which created the Media +controller device, the sub-device node acts just as a node of a native V4L2 +flash API device would. The calls are simply routed to the LED flash API. + +Opening the V4L2 flash sub-device makes the LED subsystem sysfs interface +unavailable. The interface is re-enabled after the V4L2 flash sub-device +is closed. diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt index 5b3e91d4ac59..0dbbd279c9b9 100644 --- a/Documentation/leds/leds-lp5523.txt +++ b/Documentation/leds/leds-lp5523.txt @@ -49,6 +49,36 @@ There are two ways to run LED patterns. 2) Firmware interface - LP55xx common interface For the details, please refer to 'firmware' section in leds-lp55xx.txt +LP5523 has three master faders. If a channel is mapped to one of +the master faders, its output is dimmed based on the value of the master +fader. + +For example, + + echo "123000123" > master_fader_leds + +creates the following channel-fader mappings: + + channel 0,6 to master_fader1 + channel 1,7 to master_fader2 + channel 2,8 to master_fader3 + +Then, to have 25% of the original output on channel 0,6: + + echo 64 > master_fader1 + +To have 0% of the original output (i.e. no output) channel 1,7: + + echo 0 > master_fader2 + +To have 100% of the original output (i.e. no dimming) on channel 2,8: + + echo 255 > master_fader3 + +To clear all master fader controls: + + echo "000000000" > master_fader_leds + Selftest uses always the current from the platform data. Each channel contains led current settings. diff --git a/Documentation/watchdog/watchdog-kernel-api.txt b/Documentation/watchdog/watchdog-kernel-api.txt index a0438f3957ca..d8b0d3367706 100644 --- a/Documentation/watchdog/watchdog-kernel-api.txt +++ b/Documentation/watchdog/watchdog-kernel-api.txt @@ -36,6 +36,10 @@ The watchdog_unregister_device routine deregisters a registered watchdog timer device. The parameter of this routine is the pointer to the registered watchdog_device structure. +The watchdog subsystem includes an registration deferral mechanism, +which allows you to register an watchdog as early as you wish during +the boot process. + The watchdog device structure looks like this: struct watchdog_device { @@ -52,6 +56,7 @@ struct watchdog_device { void *driver_data; struct mutex lock; unsigned long status; + struct list_head deferred; }; It contains following fields: @@ -80,6 +85,8 @@ It contains following fields: information about the status of the device (Like: is the watchdog timer running/active, is the nowayout bit set, is the device opened via the /dev/watchdog interface or not, ...). +* deferred: entry in wtd_deferred_reg_list which is used to + register early initialized watchdogs. The list of watchdog operations is defined as: diff --git a/Documentation/watchdog/watchdog-parameters.txt b/Documentation/watchdog/watchdog-parameters.txt index 692791cc674c..9f9ec9f76039 100644 --- a/Documentation/watchdog/watchdog-parameters.txt +++ b/Documentation/watchdog/watchdog-parameters.txt @@ -208,6 +208,9 @@ nowayout: Watchdog cannot be stopped once started ------------------------------------------------- omap_wdt: timer_margin: initial watchdog timeout (in seconds) +early_enable: Watchdog is started on module insertion (default=0 +nowayout: Watchdog cannot be stopped once started + (default=kernel config parameter) ------------------------------------------------- orion_wdt: heartbeat: Initial watchdog heartbeat in seconds @@ -52,7 +52,6 @@ $(obj)/$(bounds-file): kernel/bounds.s FORCE timeconst-file := include/generated/timeconst.h -#always += $(timeconst-file) targets += $(timeconst-file) quiet_cmd_gentimeconst = GEN $@ diff --git a/MAINTAINERS b/MAINTAINERS index ab6fb58b3873..86ea2084bc58 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2026,10 +2026,10 @@ S: Maintained F: drivers/net/hamradio/baycom* BCACHE (BLOCK LAYER CACHE) -M: Kent Overstreet <[email protected]> +M: Kent Overstreet <[email protected]> W: http://bcache.evilpiepirate.org -S: Maintained: +S: Maintained F: drivers/md/bcache/ BDISP ST MEDIA DRIVER @@ -2280,7 +2280,7 @@ S: Maintained F: arch/mips/bmips/* F: arch/mips/include/asm/mach-bmips/* F: arch/mips/kernel/*bmips* -F: arch/mips/boot/dts/bcm*.dts* +F: arch/mips/boot/dts/brcm/bcm*.dts* F: drivers/irqchip/irq-bcm7* F: drivers/irqchip/irq-brcmstb* @@ -2339,7 +2339,7 @@ M: Ray Jui <[email protected]> S: Supported F: drivers/gpio/gpio-bcm-kona.c -F: Documentation/devicetree/bindings/gpio/gpio-bcm-kona.txt +F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt BROADCOM NVRAM DRIVER M: Rafał Miłecki <[email protected]> @@ -2763,7 +2763,7 @@ F: Documentation/devicetree/bindings/media/coda.txt F: drivers/media/platform/coda/ COMMON CLK FRAMEWORK -M: Mike Turquette <[email protected]> +M: Michael Turquette <[email protected]> M: Stephen Boyd <[email protected]> T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git @@ -3216,6 +3216,11 @@ L: [email protected] S: Maintained F: drivers/platform/x86/dell-laptop.c +DELL LAPTOP RBTN DRIVER +M: Pali Rohár <[email protected]> +S: Maintained +F: drivers/platform/x86/dell-rbtn.* + DELL LAPTOP FREEFALL DRIVER M: Pali Rohár <[email protected]> S: Maintained @@ -4425,9 +4430,11 @@ FUSE: FILESYSTEM IN USERSPACE M: Miklos Szeredi <[email protected]> W: http://fuse.sourceforge.net/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git S: Maintained F: fs/fuse/ F: include/uapi/linux/fuse.h +F: Documentation/filesystems/fuse.txt FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit) M: Rik Faith <[email protected]> @@ -5285,11 +5292,10 @@ INTEL ASoC BDW/HSW DRIVERS M: Jie Yang <[email protected]> L: [email protected] (moderated for non-subscribers) S: Supported -F: sound/soc/intel/sst-haswell* -F: sound/soc/intel/sst-dsp* -F: sound/soc/intel/sst-firmware.c -F: sound/soc/intel/broadwell.c -F: sound/soc/intel/haswell.c +F: sound/soc/intel/common/sst-dsp* +F: sound/soc/intel/common/sst-firmware.c +F: sound/soc/intel/boards/broadwell.c +F: sound/soc/intel/haswell/ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support <[email protected]> @@ -6787,9 +6793,8 @@ S: Maintained F: drivers/platform/x86/msi-laptop.c MSI WMI SUPPORT -M: Anisse Astier <[email protected]> -S: Supported +S: Orphan F: drivers/platform/x86/msi-wmi.c MSI001 MEDIA DRIVER @@ -7019,7 +7024,6 @@ L: [email protected] T: git git://git.pengutronix.de/git/mpa/linux-nbd.git F: Documentation/blockdev/nbd.txt F: drivers/block/nbd.c -F: include/linux/nbd.h F: include/uapi/linux/nbd.h NETWORK DROP MONITOR @@ -7647,7 +7651,6 @@ F: arch/*/include/asm/paravirt.h PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES M: Tim Waugh <[email protected]> L: [email protected] (subscribers-only) -W: http://www.torque.net/linux-pp.html S: Maintained F: Documentation/blockdev/paride.txt F: drivers/block/paride/ @@ -9091,9 +9094,9 @@ S: Supported F: drivers/net/ethernet/emulex/benet/ EMULEX ONECONNECT ROCE DRIVER -M: Selvin Xavier <[email protected]> -M: Devesh Sharma <[email protected]> -M: Mitesh Ahuja <[email protected]> +M: Selvin Xavier <[email protected]> +M: Devesh Sharma <[email protected]> +M: Mitesh Ahuja <[email protected]> W: http://www.emulex.com S: Supported @@ -9593,7 +9596,6 @@ F: include/uapi/linux/spi/ SPIDERNET NETWORK DRIVER for CELL M: Ishizaki Kou <[email protected]> -M: Jens Osterkamp <[email protected]> S: Supported F: Documentation/networking/spider_net.txt @@ -11354,6 +11356,13 @@ L: [email protected] (subscribers-only) S: Maintained F: drivers/net/wireless/zd1211rw/ +ZPOOL COMPRESSED PAGE STORAGE API +M: Dan Streetman <[email protected]> +S: Maintained +F: mm/zpool.c +F: include/linux/zpool.h + ZR36067 VIDEO FOR LINUX DRIVER diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h index fd6cdb56d4fd..2d28ba939d8e 100644 --- a/arch/arc/include/asm/dma-mapping.h +++ b/arch/arc/include/asm/dma-mapping.h @@ -157,22 +157,24 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, } static inline void -dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir) { int i; + struct scatterlist *sg; - for (i = 0; i < nelems; i++, sg++) + for_each_sg(sglist, sg, nelems, i) _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); } static inline void -dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, - enum dma_data_direction dir) +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, + int nelems, enum dma_data_direction dir) { int i; + struct scatterlist *sg; - for (i = 0; i < nelems; i++, sg++) + for_each_sg(sglist, sg, nelems, i) _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir); } diff --git a/arch/arm/boot/compressed/libfdt_env.h b/arch/arm/boot/compressed/libfdt_env.h index 1f4e71876b00..17ae0f3efac8 100644 --- a/arch/arm/boot/compressed/libfdt_env.h +++ b/arch/arm/boot/compressed/libfdt_env.h @@ -5,6 +5,10 @@ #include <linux/string.h> #include <asm/byteorder.h> +typedef __be16 fdt16_t; +typedef __be32 fdt32_t; +typedef __be64 fdt64_t; + #define fdt16_to_cpu(x) be16_to_cpu(x) #define cpu_to_fdt16(x) cpu_to_be16(x) #define fdt32_to_cpu(x) be32_to_cpu(x) diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi index 7f0252c580e4..a718866ba52d 100644 --- a/arch/arm/boot/dts/armada-370-xp.dtsi +++ b/arch/arm/boot/dts/armada-370-xp.dtsi @@ -268,7 +268,6 @@ }; eth0: ethernet@70000 { - compatible = "marvell,armada-370-neta"; reg = <0x70000 0x4000>; interrupts = <8>; clocks = <&gateclk 4>; @@ -284,7 +283,6 @@ }; eth1: ethernet@74000 { - compatible = "marvell,armada-370-neta"; reg = <0x74000 0x4000>; interrupts = <10>; clocks = <&gateclk 3>; diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index 3f036bd635f4..53a1a5abe147 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi @@ -311,6 +311,14 @@ dmacap,memset; }; }; + + ethernet@70000 { + compatible = "marvell,armada-370-neta"; + }; + + ethernet@74000 { + compatible = "marvell,armada-370-neta"; + }; }; }; }; diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 8479fdc9e9c2..c5fdc99f0dbe 100644 --- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -318,7 +318,7 @@ }; eth3: ethernet@34000 { - compatible = "marvell,armada-370-neta"; + compatible = "marvell,armada-xp-neta"; reg = <0x34000 0x4000>; interrupts = <14>; clocks = <&gateclk 1>; diff --git a/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/arch/arm/boot/dts/armada-xp-mv78460.dtsi index 661d54c81580..0e24f1a38540 100644 --- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi +++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi @@ -356,7 +356,7 @@ }; eth3: ethernet@34000 { - compatible = "marvell,armada-370-neta"; + compatible = "marvell,armada-xp-neta"; reg = <0x34000 0x4000>; interrupts = <14>; clocks = <&gateclk 1>; diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi index e78ce4ab6b75..3de9b761cc1a 100644 --- a/arch/arm/boot/dts/armada-xp.dtsi +++ b/arch/arm/boot/dts/armada-xp.dtsi @@ -185,7 +185,7 @@ }; eth2: ethernet@30000 { - compatible = "marvell,armada-370-neta"; + compatible = "marvell,armada-xp-neta"; reg = <0x30000 0x4000>; interrupts = <12>; clocks = <&gateclk 2>; @@ -228,6 +228,14 @@ }; }; + ethernet@70000 { + compatible = "marvell,armada-xp-neta"; + }; + + ethernet@74000 { + compatible = "marvell,armada-xp-neta"; + }; + xor@f0900 { compatible = "marvell,orion-xor"; reg = <0xF0900 0x100 @@ -297,7 +305,7 @@ spi0_pins: spi0-pins { marvell,pins = "mpp36", "mpp37", "mpp38", "mpp39"; - marvell,function = "spi"; + marvell,function = "spi0"; }; uart2_pins: uart2-pins { diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi index d260ba779ae5..18177f5a7464 100644 --- a/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/arch/arm/boot/dts/at91sam9g45.dtsi @@ -1148,7 +1148,7 @@ usb2: gadget@fff78000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,at91sam9g45-udc"; reg = <0x00600000 0x80000 0xfff78000 0x400>; interrupts = <27 IRQ_TYPE_LEVEL_HIGH 0>; diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi index 7521bdf17ef2..b6c8df8d380e 100644 --- a/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/arch/arm/boot/dts/at91sam9x5.dtsi @@ -1108,7 +1108,7 @@ usb2: gadget@f803c000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,at91sam9g45-udc"; reg = <0x00500000 0x80000 0xf803c000 0x400>; interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; diff --git a/arch/arm/boot/dts/atlas7.dtsi b/arch/arm/boot/dts/atlas7.dtsi index a753178abc85..5dfd3a44bf82 100644 --- a/arch/arm/boot/dts/atlas7.dtsi +++ b/arch/arm/boot/dts/atlas7.dtsi @@ -38,6 +38,21 @@ }; }; + clocks { + xinw { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <32768>; + clock-output-names = "xinw"; + }; + xin { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <26000000>; + clock-output-names = "xin"; + }; + }; + noc { compatible = "simple-bus"; #address-cells = <1>; diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 5ab7548e04e1..9e2444b07bce 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi @@ -1321,7 +1321,7 @@ usb0: gadget@00500000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,sama5d3-udc"; reg = <0x00500000 0x100000 0xf8030000 0x4000>; interrupts = <33 IRQ_TYPE_LEVEL_HIGH 2>; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 653a1f851f2b..3ee22ee13c5a 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -127,7 +127,7 @@ usb0: gadget@00400000 { #address-cells = <1>; #size-cells = <0>; - compatible = "atmel,at91sam9rl-udc"; + compatible = "atmel,sama5d3-udc"; reg = <0x00400000 0x100000 0xfc02c000 0x4000>; interrupts = <47 IRQ_TYPE_LEVEL_HIGH 2>; diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index fd6a6d23bc20..6d83a1bf0c74 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -169,6 +169,7 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_ATMEL=y +CONFIG_MTD_NAND_BRCMNAND=y CONFIG_MTD_NAND_DAVINCI=y CONFIG_MTD_SPI_NOR=y CONFIG_MTD_UBI=y diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index f8f7398c74c2..7dac3086e361 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -15,6 +15,8 @@ * that causes it to save wrong values... Be aware! */ +#include <linux/init.h> + #include <asm/assembler.h> #include <asm/memory.h> #include <asm/glue-df.h> diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 1e184767c3be..e24df77abd79 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -369,7 +369,7 @@ static void __init at91_pm_sram_init(void) return; } - sram_pool = dev_get_gen_pool(&pdev->dev); + sram_pool = gen_pool_get(&pdev->dev); if (!sram_pool) { pr_warn("%s: sram pool unavailable!\n", __func__); return; diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index e9184feffc4e..0ac9e4b3b265 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -19,7 +19,6 @@ config ARCH_BCM_IPROC select ARCH_REQUIRE_GPIOLIB select ARM_AMBA select PINCTRL - select MTD_NAND_BRCMNAND help This enables support for systems based on Broadcom IPROC architected SoCs. The IPROC complex contains one or more ARM CPUs along with common diff --git a/arch/arm/mach-dove/include/mach/irqs.h b/arch/arm/mach-dove/include/mach/irqs.h index 03d401d20453..3f29e6bca058 100644 --- a/arch/arm/mach-dove/include/mach/irqs.h +++ b/arch/arm/mach-dove/include/mach/irqs.h @@ -14,73 +14,73 @@ /* * Dove Low Interrupt Controller */ -#define IRQ_DOVE_BRIDGE 0 -#define IRQ_DOVE_H2C 1 -#define IRQ_DOVE_C2H 2 -#define IRQ_DOVE_NAND 3 -#define IRQ_DOVE_PDMA 4 -#define IRQ_DOVE_SPI1 5 -#define IRQ_DOVE_SPI0 6 -#define IRQ_DOVE_UART_0 7 -#define IRQ_DOVE_UART_1 8 -#define IRQ_DOVE_UART_2 9 -#define IRQ_DOVE_UART_3 10 -#define IRQ_DOVE_I2C 11 -#define IRQ_DOVE_GPIO_0_7 12 -#define IRQ_DOVE_GPIO_8_15 13 -#define IRQ_DOVE_GPIO_16_23 14 -#define IRQ_DOVE_PCIE0_ERR 15 -#define IRQ_DOVE_PCIE0 16 -#define IRQ_DOVE_PCIE1_ERR 17 -#define IRQ_DOVE_PCIE1 18 -#define IRQ_DOVE_I2S0 19 -#define IRQ_DOVE_I2S0_ERR 20 -#define IRQ_DOVE_I2S1 21 -#define IRQ_DOVE_I2S1_ERR 22 -#define IRQ_DOVE_USB_ERR 23 -#define IRQ_DOVE_USB0 24 -#define IRQ_DOVE_USB1 25 -#define IRQ_DOVE_GE00_RX 26 -#define IRQ_DOVE_GE00_TX 27 -#define IRQ_DOVE_GE00_MISC 28 -#define IRQ_DOVE_GE00_SUM 29 -#define IRQ_DOVE_GE00_ERR 30 -#define IRQ_DOVE_CRYPTO 31 +#define IRQ_DOVE_BRIDGE (1 + 0) +#define IRQ_DOVE_H2C (1 + 1) +#define IRQ_DOVE_C2H (1 + 2) +#define IRQ_DOVE_NAND (1 + 3) +#define IRQ_DOVE_PDMA (1 + 4) +#define IRQ_DOVE_SPI1 (1 + 5) +#define IRQ_DOVE_SPI0 (1 + 6) +#define IRQ_DOVE_UART_0 (1 + 7) +#define IRQ_DOVE_UART_1 (1 + 8) +#define IRQ_DOVE_UART_2 (1 + 9) +#define IRQ_DOVE_UART_3 (1 + 10) +#define IRQ_DOVE_I2C (1 + 11) +#define IRQ_DOVE_GPIO_0_7 (1 + 12) +#define IRQ_DOVE_GPIO_8_15 (1 + 13) +#define IRQ_DOVE_GPIO_16_23 (1 + 14) +#define IRQ_DOVE_PCIE0_ERR (1 + 15) +#define IRQ_DOVE_PCIE0 (1 + 16) +#define IRQ_DOVE_PCIE1_ERR (1 + 17) +#define IRQ_DOVE_PCIE1 (1 + 18) +#define IRQ_DOVE_I2S0 (1 + 19) +#define IRQ_DOVE_I2S0_ERR (1 + 20) +#define IRQ_DOVE_I2S1 (1 + 21) +#define IRQ_DOVE_I2S1_ERR (1 + 22) +#define IRQ_DOVE_USB_ERR (1 + 23) +#define IRQ_DOVE_USB0 (1 + 24) +#define IRQ_DOVE_USB1 (1 + 25) +#define IRQ_DOVE_GE00_RX (1 + 26) +#define IRQ_DOVE_GE00_TX (1 + 27) +#define IRQ_DOVE_GE00_MISC (1 + 28) +#define IRQ_DOVE_GE00_SUM (1 + 29) +#define IRQ_DOVE_GE00_ERR (1 + 30) +#define IRQ_DOVE_CRYPTO (1 + 31) /* * Dove High Interrupt Controller */ -#define IRQ_DOVE_AC97 32 -#define IRQ_DOVE_PMU 33 -#define IRQ_DOVE_CAM 34 -#define IRQ_DOVE_SDIO0 35 -#define IRQ_DOVE_SDIO1 36 -#define IRQ_DOVE_SDIO0_WAKEUP 37 -#define IRQ_DOVE_SDIO1_WAKEUP 38 -#define IRQ_DOVE_XOR_00 39 -#define IRQ_DOVE_XOR_01 40 -#define IRQ_DOVE_XOR0_ERR 41 -#define IRQ_DOVE_XOR_10 42 -#define IRQ_DOVE_XOR_11 43 -#define IRQ_DOVE_XOR1_ERR 44 -#define IRQ_DOVE_LCD_DCON 45 -#define IRQ_DOVE_LCD1 46 -#define IRQ_DOVE_LCD0 47 -#define IRQ_DOVE_GPU 48 -#define IRQ_DOVE_PERFORM_MNTR 49 -#define IRQ_DOVE_VPRO_DMA1 51 -#define IRQ_DOVE_SSP_TIMER 54 -#define IRQ_DOVE_SSP 55 -#define IRQ_DOVE_MC_L2_ERR 56 -#define IRQ_DOVE_CRYPTO_ERR 59 -#define IRQ_DOVE_GPIO_24_31 60 -#define IRQ_DOVE_HIGH_GPIO 61 -#define IRQ_DOVE_SATA 62 +#define IRQ_DOVE_AC97 (1 + 32) +#define IRQ_DOVE_PMU (1 + 33) +#define IRQ_DOVE_CAM (1 + 34) +#define IRQ_DOVE_SDIO0 (1 + 35) +#define IRQ_DOVE_SDIO1 (1 + 36) +#define IRQ_DOVE_SDIO0_WAKEUP (1 + 37) +#define IRQ_DOVE_SDIO1_WAKEUP (1 + 38) +#define IRQ_DOVE_XOR_00 (1 + 39) +#define IRQ_DOVE_XOR_01 (1 + 40) +#define IRQ_DOVE_XOR0_ERR (1 + 41) +#define IRQ_DOVE_XOR_10 (1 + 42) +#define IRQ_DOVE_XOR_11 (1 + 43) +#define IRQ_DOVE_XOR1_ERR (1 + 44) +#define IRQ_DOVE_LCD_DCON (1 + 45) +#define IRQ_DOVE_LCD1 (1 + 46) +#define IRQ_DOVE_LCD0 (1 + 47) +#define IRQ_DOVE_GPU (1 + 48) +#define IRQ_DOVE_PERFORM_MNTR (1 + 49) +#define IRQ_DOVE_VPRO_DMA1 (1 + 51) +#define IRQ_DOVE_SSP_TIMER (1 + 54) +#define IRQ_DOVE_SSP (1 + 55) +#define IRQ_DOVE_MC_L2_ERR (1 + 56) +#define IRQ_DOVE_CRYPTO_ERR (1 + 59) +#define IRQ_DOVE_GPIO_24_31 (1 + 60) +#define IRQ_DOVE_HIGH_GPIO (1 + 61) +#define IRQ_DOVE_SATA (1 + 62) /* * DOVE General Purpose Pins */ -#define IRQ_DOVE_GPIO_START 64 +#define IRQ_DOVE_GPIO_START 65 #define NR_GPIO_IRQS 64 /* diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c index 4a5a7aedcb76..df0223f76fa9 100644 --- a/arch/arm/mach-dove/irq.c +++ b/arch/arm/mach-dove/irq.c @@ -126,14 +126,14 @@ __exception_irq_entry dove_legacy_handle_irq(struct pt_regs *regs) stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_LOW_OFF); stat &= readl_relaxed(dove_irq_base + IRQ_MASK_LOW_OFF); if (stat) { - unsigned int hwirq = __fls(stat); + unsigned int hwirq = 1 + __fls(stat); handle_IRQ(hwirq, regs); return; } stat = readl_relaxed(dove_irq_base + IRQ_CAUSE_HIGH_OFF); stat &= readl_relaxed(dove_irq_base + IRQ_MASK_HIGH_OFF); if (stat) { - unsigned int hwirq = 32 + __fls(stat); + unsigned int hwirq = 33 + __fls(stat); handle_IRQ(hwirq, regs); return; } @@ -144,8 +144,8 @@ void __init dove_init_irq(void) { int i; - orion_irq_init(0, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); - orion_irq_init(32, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); + orion_irq_init(1, IRQ_VIRT_BASE + IRQ_MASK_LOW_OFF); + orion_irq_init(33, IRQ_VIRT_BASE + IRQ_MASK_HIGH_OFF); #ifdef CONFIG_MULTI_IRQ_HANDLER set_handle_irq(dove_legacy_handle_irq); diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 4bd8b7653817..5f8ddcdeeacf 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -224,6 +224,25 @@ static void __init exynos_init_irq(void) exynos_map_pmu(); } +static const struct of_device_id exynos_cpufreq_matches[] = { + { .compatible = "samsung,exynos4210", .data = "cpufreq-dt" }, + { /* sentinel */ } +}; + +static void __init exynos_cpufreq_init(void) +{ + struct device_node *root = of_find_node_by_path("/"); + const struct of_device_id *match; + + match = of_match_node(exynos_cpufreq_matches, root); + if (!match) { + platform_device_register_simple("exynos-cpufreq", -1, NULL, 0); + return; + } + + platform_device_register_simple(match->data, -1, NULL, 0); +} + static void __init exynos_dt_machine_init(void) { /* @@ -246,7 +265,7 @@ static void __init exynos_dt_machine_init(void) of_machine_is_compatible("samsung,exynos5250")) platform_device_register(&exynos_cpuidle); - platform_device_register_simple("exynos-cpufreq", -1, NULL, 0); + exynos_cpufreq_init(); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff --git a/arch/arm/mach-imx/pm-imx5.c b/arch/arm/mach-imx/pm-imx5.c index 0309ccda36a9..1885676c23c0 100644 --- a/arch/arm/mach-imx/pm-imx5.c +++ b/arch/arm/mach-imx/pm-imx5.c @@ -297,7 +297,7 @@ static int __init imx_suspend_alloc_ocram( goto put_node; } - ocram_pool = dev_get_gen_pool(&pdev->dev); + ocram_pool = gen_pool_get(&pdev->dev); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c index b01650d94f91..93ecf559d06d 100644 --- a/arch/arm/mach-imx/pm-imx6.c +++ b/arch/arm/mach-imx/pm-imx6.c @@ -451,7 +451,7 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata) goto put_node; } - ocram_pool = dev_get_gen_pool(&pdev->dev); + ocram_pool = gen_pool_get(&pdev->dev); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; diff --git a/arch/arm/mach-lpc32xx/irq.c b/arch/arm/mach-lpc32xx/irq.c index 9ecb8f9c4ef5..d4f7dc87042b 100644 --- a/arch/arm/mach-lpc32xx/irq.c +++ b/arch/arm/mach-lpc32xx/irq.c @@ -283,25 +283,25 @@ static int lpc32xx_set_irq_type(struct irq_data *d, unsigned int type) case IRQ_TYPE_EDGE_RISING: /* Rising edge sensitive */ __lpc32xx_set_irq_type(d->hwirq, 1, 1); - __irq_set_handler_locked(d->hwirq, handle_edge_irq); + __irq_set_handler_locked(d->irq, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: /* Falling edge sensitive */ __lpc32xx_set_irq_type(d->hwirq, 0, 1); - __irq_set_handler_locked(d->hwirq, handle_edge_irq); + __irq_set_handler_locked(d->irq, handle_edge_irq); break; case IRQ_TYPE_LEVEL_LOW: /* Low level sensitive */ __lpc32xx_set_irq_type(d->hwirq, 0, 0); - __irq_set_handler_locked(d->hwirq, handle_level_irq); + __irq_set_handler_locked(d->irq, handle_level_irq); break; case IRQ_TYPE_LEVEL_HIGH: /* High level sensitive */ __lpc32xx_set_irq_type(d->hwirq, 1, 0); - __irq_set_handler_locked(d->hwirq, handle_level_irq); + __irq_set_handler_locked(d->irq, handle_level_irq); break; /* Other modes are not supported */ diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S index 48e4c4b3cd1c..b093a196e801 100644 --- a/arch/arm/mach-mvebu/headsmp-a9.S +++ b/arch/arm/mach-mvebu/headsmp-a9.S @@ -13,12 +13,9 @@ */ #include <linux/linkage.h> -#include <linux/init.h> #include <asm/assembler.h> - __CPUINIT - ENTRY(mvebu_cortex_a9_secondary_startup) ARM_BE8(setend be) bl armada_38x_scu_power_up diff --git a/arch/arm/mach-mvebu/platsmp-a9.c b/arch/arm/mach-mvebu/platsmp-a9.c index df0a9cc5da59..3d5000481c11 100644 --- a/arch/arm/mach-mvebu/platsmp-a9.c +++ b/arch/arm/mach-mvebu/platsmp-a9.c @@ -24,7 +24,7 @@ extern void mvebu_cortex_a9_secondary_startup(void); -static int __cpuinit mvebu_cortex_a9_boot_secondary(unsigned int cpu, +static int mvebu_cortex_a9_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret, hw_cpu; diff --git a/arch/arm/mach-mvebu/pm-board.c b/arch/arm/mach-mvebu/pm-board.c index 6dfd4ab97b2a..301ab38d38ba 100644 --- a/arch/arm/mach-mvebu/pm-board.c +++ b/arch/arm/mach-mvebu/pm-board.c @@ -43,6 +43,9 @@ static void mvebu_armada_xp_gp_pm_enter(void __iomem *sdram_reg, u32 srcmd) for (i = 0; i < ARMADA_XP_GP_PIC_NR_GPIOS; i++) ackcmd |= BIT(pic_raw_gpios[i]); + srcmd = cpu_to_le32(srcmd); + ackcmd = cpu_to_le32(ackcmd); + /* * Wait a while, the PIC needs quite a bit of time between the * two GPIO commands. diff --git a/arch/arm/mach-rockchip/platsmp.c b/arch/arm/mach-rockchip/platsmp.c index 2e6ab67e2284..8fcec1cc101e 100644 --- a/arch/arm/mach-rockchip/platsmp.c +++ b/arch/arm/mach-rockchip/platsmp.c @@ -119,8 +119,7 @@ static int pmu_set_power_domain(int pd, bool on) * Handling of CPU cores */ -static int __cpuinit rockchip_boot_secondary(unsigned int cpu, - struct task_struct *idle) +static int rockchip_boot_secondary(unsigned int cpu, struct task_struct *idle) { int ret; diff --git a/arch/arm/mach-socfpga/pm.c b/arch/arm/mach-socfpga/pm.c index 1ed89fc2b7a8..6a4199f2bffb 100644 --- a/arch/arm/mach-socfpga/pm.c +++ b/arch/arm/mach-socfpga/pm.c @@ -56,7 +56,7 @@ static int socfpga_setup_ocram_self_refresh(void) goto put_node; } - ocram_pool = dev_get_gen_pool(&pdev->dev); + ocram_pool = gen_pool_get(&pdev->dev); if (!ocram_pool) { pr_warn("%s: ocram pool unavailable!\n", __func__); ret = -ENODEV; diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index f61158c6ce71..5766ce2be32b 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -589,4 +589,4 @@ static int __init ve_spc_clk_init(void) platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); return 0; } -module_init(ve_spc_clk_init); +device_initcall(ve_spc_clk_init); diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi index 0bb287ca0a98..0689c3fb56e3 100644 --- a/arch/arm64/boot/dts/apm/apm-storm.dtsi +++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi @@ -717,6 +717,19 @@ phy-names = "sata-phy"; }; + sbgpio: sbgpio@17001000{ + compatible = "apm,xgene-gpio-sb"; + reg = <0x0 0x17001000 0x0 0x400>; + #gpio-cells = <2>; + gpio-controller; + interrupts = <0x0 0x28 0x1>, + <0x0 0x29 0x1>, + <0x0 0x2a 0x1>, + <0x0 0x2b 0x1>, + <0x0 0x2c 0x1>, + <0x0 0x2d 0x1>; + }; + rtc: rtc@10510000 { compatible = "apm,xgene-rtc"; reg = <0x0 0x10510000 0x0 0x400>; diff --git a/arch/avr32/mach-at32ap/extint.c b/arch/avr32/mach-at32ap/extint.c index cfb298d66305..2d48b6a46166 100644 --- a/arch/avr32/mach-at32ap/extint.c +++ b/arch/avr32/mach-at32ap/extint.c @@ -231,8 +231,7 @@ static int __init eic_probe(struct platform_device *pdev) irq_set_chip_data(eic->first_irq + i, eic); } - irq_set_chained_handler(int_irq, demux_eic_irq); - irq_set_handler_data(int_irq, eic); + irq_set_chained_handler_and_data(int_irq, demux_eic_irq, eic); if (pdev->id == 0) { nmi_eic = eic; diff --git a/arch/cris/arch-v10/drivers/eeprom.c b/arch/cris/arch-v10/drivers/eeprom.c index 5047a33043bd..f679a19dfeb8 100644 --- a/arch/cris/arch-v10/drivers/eeprom.c +++ b/arch/cris/arch-v10/drivers/eeprom.c @@ -848,5 +848,4 @@ static void eeprom_disable_write_protect(void) /* Write protect disabled */ } } - -module_init(eeprom_init); +device_initcall(eeprom_init); diff --git a/arch/cris/arch-v32/mm/intmem.c b/arch/cris/arch-v32/mm/intmem.c index 1b17d92cef8e..9ef56092a4c5 100644 --- a/arch/cris/arch-v32/mm/intmem.c +++ b/arch/cris/arch-v32/mm/intmem.c @@ -145,6 +145,5 @@ unsigned long crisv32_intmem_virt_to_phys(void* addr) (unsigned long)intmem_virtual + MEM_INTMEM_START + RESERVED_SIZE); } - -module_init(crisv32_intmem_init); +device_initcall(crisv32_intmem_init); diff --git a/arch/frv/mb93090-mb00/flash.c b/arch/frv/mb93090-mb00/flash.c index c0e3707c2299..e1cf802d1639 100644 --- a/arch/frv/mb93090-mb00/flash.c +++ b/arch/frv/mb93090-mb00/flash.c @@ -9,7 +9,7 @@ * 2 of the Licence, or (at your option) any later version. */ -#include <linux/init.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 3a428f19a001..085047f3a545 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -368,13 +368,4 @@ simscsi_init(void) scsi_host_put(host); return error; } - -static void __exit -simscsi_exit(void) -{ - scsi_remove_host(host); - scsi_host_put(host); -} - -module_init(simscsi_init); -module_exit(simscsi_exit); +device_initcall(simscsi_init); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 7f3028965064..97e48b0eefc7 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -215,10 +215,6 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) pmd_t *pmd; pte_t *pte; - if (!PageReserved(page)) - printk(KERN_ERR "put_kernel_page: page at 0x%p not in reserved memory\n", - page_address(page)); - pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */ { diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index ea21d4cad540..aa19b7ac8222 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c @@ -58,27 +58,22 @@ paddr_to_nid(unsigned long paddr) * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where * the section resides. */ -int __meminit __early_pfn_to_nid(unsigned long pfn) +int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) { int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec; - /* - * NOTE: The following SMP-unsafe globals are only used early in boot - * when the kernel is running single-threaded. - */ - static int __meminitdata last_ssec, last_esec; - static int __meminitdata last_nid; - if (section >= last_ssec && section < last_esec) - return last_nid; + if (section >= state->last_start && section < state->last_end) + return state->last_nid; for (i = 0; i < num_node_memblks; i++) { ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT; esec = (node_memblk[i].start_paddr + node_memblk[i].size + ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT; if (section >= ssec && section < esec) { - last_ssec = ssec; - last_esec = esec; - last_nid = node_memblk[i].nid; + state->last_start = ssec; + state->last_end = esec; + state->last_nid = node_memblk[i].nid; return node_memblk[i].nid; } } diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 27793f7aa99c..5b799d4deb74 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c @@ -142,5 +142,4 @@ static int __init sn_salinfo_init(void) salinfo_platform_oemdata = &sn_salinfo_platform_oemdata; return 0; } - -module_init(sn_salinfo_init) +device_initcall(sn_salinfo_init); diff --git a/arch/m68k/mac/psc.c b/arch/m68k/mac/psc.c index 835fa04511c8..272dde481d17 100644 --- a/arch/m68k/mac/psc.c +++ b/arch/m68k/mac/psc.c @@ -148,14 +148,10 @@ static void psc_irq(unsigned int irq, struct irq_desc *desc) void __init psc_register_interrupts(void) { - irq_set_chained_handler(IRQ_AUTO_3, psc_irq); - irq_set_handler_data(IRQ_AUTO_3, (void *)0x30); - irq_set_chained_handler(IRQ_AUTO_4, psc_irq); - irq_set_handler_data(IRQ_AUTO_4, (void *)0x40); - irq_set_chained_handler(IRQ_AUTO_5, psc_irq); - irq_set_handler_data(IRQ_AUTO_5, (void *)0x50); - irq_set_chained_handler(IRQ_AUTO_6, psc_irq); - irq_set_handler_data(IRQ_AUTO_6, (void *)0x60); + irq_set_chained_handler_and_data(IRQ_AUTO_3, psc_irq, (void *)0x30); + irq_set_chained_handler_and_data(IRQ_AUTO_4, psc_irq, (void *)0x40); + irq_set_chained_handler_and_data(IRQ_AUTO_5, psc_irq, (void *)0x50); + irq_set_chained_handler_and_data(IRQ_AUTO_6, psc_irq, (void *)0x60); } void psc_irq_enable(int irq) { diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c index 8742e1cee492..ec9a371f1e62 100644 --- a/arch/mips/ath25/ar2315.c +++ b/arch/mips/ath25/ar2315.c @@ -161,8 +161,8 @@ void __init ar2315_arch_init_irq(void) irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB); setup_irq(irq, &ar2315_ahb_err_interrupt); - irq_set_chained_handler(AR2315_IRQ_MISC, ar2315_misc_irq_handler); - irq_set_handler_data(AR2315_IRQ_MISC, domain); + irq_set_chained_handler_and_data(AR2315_IRQ_MISC, + ar2315_misc_irq_handler, domain); ar2315_misc_irq_domain = domain; } diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c index 094b938fd603..e63e38fa4880 100644 --- a/arch/mips/ath25/ar5312.c +++ b/arch/mips/ath25/ar5312.c @@ -156,8 +156,8 @@ void __init ar5312_arch_init_irq(void) irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC); setup_irq(irq, &ar5312_ahb_err_interrupt); - irq_set_chained_handler(AR5312_IRQ_MISC, ar5312_misc_irq_handler); - irq_set_handler_data(AR5312_IRQ_MISC, domain); + irq_set_chained_handler_and_data(AR5312_IRQ_MISC, + ar5312_misc_irq_handler, domain); ar5312_misc_irq_domain = domain; } diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile index 69a8a8dabc2b..2a5926578841 100644 --- a/arch/mips/cavium-octeon/Makefile +++ b/arch/mips/cavium-octeon/Makefile @@ -9,9 +9,6 @@ # Copyright (C) 2005-2009 Cavium Networks # -CFLAGS_octeon-platform.o = -I$(src)/../../../scripts/dtc/libfdt -CFLAGS_setup.o = -I$(src)/../../../scripts/dtc/libfdt - obj-y := cpu.o setup.o octeon-platform.o octeon-irq.o csrc-octeon.o obj-y += dma-octeon.o obj-y += octeon-memcpy.o diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile index ecd71db6258b..2e52cbd20ceb 100644 --- a/arch/mips/mti-sead3/Makefile +++ b/arch/mips/mti-sead3/Makefile @@ -15,5 +15,3 @@ obj-y := sead3-lcd.o sead3-display.o sead3-init.o \ obj-y += leds-sead3.o obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o - -CFLAGS_sead3-setup.o = -I$(src)/../../../scripts/dtc/libfdt diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c index dadb30306a0a..f8d0acb4f973 100644 --- a/arch/mips/pci/pci-ar2315.c +++ b/arch/mips/pci/pci-ar2315.c @@ -384,8 +384,8 @@ static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc) apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT); - irq_set_chained_handler(apc->irq, ar2315_pci_irq_handler); - irq_set_handler_data(apc->irq, apc); + irq_set_chained_handler_and_data(apc->irq, ar2315_pci_irq_handler, + apc); /* Clear any pending Abort or external Interrupts * and enable interrupt processing */ diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c index da301e0a2f1f..53707aacc0f8 100644 --- a/arch/mips/ralink/irq.c +++ b/arch/mips/ralink/irq.c @@ -184,8 +184,7 @@ static int __init intc_of_init(struct device_node *node, rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE); - irq_set_chained_handler(irq, ralink_intc_irq_handler); - irq_set_handler_data(irq, domain); + irq_set_chained_handler_and_data(irq, ralink_intc_irq_handler, domain); /* tell the kernel which irq is used for performance monitoring */ rt_perfcount_irq = irq_create_mapping(domain, 9); diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 6ab3b73efcf8..480de70f4059 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -320,11 +320,11 @@ void migrate_irqs(void) if (irqd_is_per_cpu(data)) continue; - if (cpumask_test_cpu(self, &data->affinity) && + if (cpumask_test_cpu(self, data->affinity) && !cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) { int cpu_id; cpu_id = cpumask_first(cpu_online_mask); - cpumask_set_cpu(cpu_id, &data->affinity); + cpumask_set_cpu(cpu_id, data->affinity); } /* We need to operate irq_affinity_online atomically. */ arch_local_cli_save(flags); @@ -335,7 +335,7 @@ void migrate_irqs(void) GxICR(irq) = x & GxICR_LEVEL; tmp = GxICR(irq); - new = cpumask_any_and(&data->affinity, + new = cpumask_any_and(data->affinity, cpu_online_mask); irq_affinity_online[irq] = new; diff --git a/arch/mn10300/unit-asb2303/flash.c b/arch/mn10300/unit-asb2303/flash.c index 17fe083fcb6f..b03d8738d67c 100644 --- a/arch/mn10300/unit-asb2303/flash.c +++ b/arch/mn10300/unit-asb2303/flash.c @@ -96,5 +96,4 @@ static int __init asb2303_mtd_init(void) platform_device_register(&asb2303_sysflash); return 0; } - -module_init(asb2303_mtd_init); +device_initcall(asb2303_mtd_init); diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index d5cae55195ec..10a5ae9553fd 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -207,8 +207,7 @@ static int __init pdc_console_tty_driver_init(void) return 0; } - -module_init(pdc_console_tty_driver_init); +device_initcall(pdc_console_tty_driver_init); static struct tty_driver * pdc_console_device (struct console *c, int *index) { diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index ba0c053e25ae..518f4f5f1f43 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -543,6 +543,7 @@ static int __init perf_init(void) return 0; } +device_initcall(perf_init); /* * perf_start_counters(void) @@ -847,5 +848,3 @@ printk("perf_rdr_write\n"); } printk("perf_rdr_write done\n"); } - -module_init(perf_init); diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h index 8dcd744e5728..7e3789ea396b 100644 --- a/arch/powerpc/boot/libfdt_env.h +++ b/arch/powerpc/boot/libfdt_env.h @@ -10,6 +10,10 @@ typedef u32 uint32_t; typedef u64 uint64_t; typedef unsigned long uintptr_t; +typedef __be16 fdt16_t; +typedef __be32 fdt32_t; +typedef __be64 fdt64_t; + #define fdt16_to_cpu(x) be16_to_cpu(x) #define cpu_to_fdt16(x) cpu_to_be16(x) #define fdt32_to_cpu(x) be32_to_cpu(x) diff --git a/arch/powerpc/boot/of.h b/arch/powerpc/boot/of.h index 5603320dce07..53f8f27f94e4 100644 --- a/arch/powerpc/boot/of.h +++ b/arch/powerpc/boot/of.h @@ -21,7 +21,9 @@ int of_setprop(const void *phandle, const char *name, const void *buf, /* Console functions */ void of_console_init(void); +typedef u16 __be16; typedef u32 __be32; +typedef u64 __be64; #ifdef __LITTLE_ENDIAN__ #define cpu_to_be16(x) swab16(x) diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 87c7d1473488..12868b1c4e05 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -2,7 +2,6 @@ # Makefile for the linux kernel. # -CFLAGS_prom.o = -I$(src)/../../../scripts/dtc/libfdt CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 56f44848b044..43922509a483 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -1124,4 +1124,4 @@ static int __init rtc_init(void) return PTR_ERR_OR_ZERO(pdev); } -module_init(rtc_init); +device_initcall(rtc_init); diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 1f614d778a8b..bb0bd7025cb8 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -928,7 +928,7 @@ static int __init hugetlbpage_init(void) return 0; } #endif -module_init(hugetlbpage_init); +arch_initcall(hugetlbpage_init); void flush_dcache_icache_hugepage(struct page *page) { diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index c9adbfb65006..fcbea4b51a78 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c @@ -445,5 +445,4 @@ static int pmc_init(void) { return platform_driver_register(&pmc_driver); } - -module_init(pmc_init); +device_initcall(pmc_init); diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c index ce73ce865613..791c6142c4a7 100644 --- a/arch/powerpc/platforms/ps3/time.c +++ b/arch/powerpc/platforms/ps3/time.c @@ -92,5 +92,4 @@ static int __init ps3_rtc_init(void) return PTR_ERR_OR_ZERO(pdev); } - -module_init(ps3_rtc_init); +device_initcall(ps3_rtc_init); diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index d631022ffb4b..38138cf8d33e 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c @@ -407,4 +407,4 @@ static int __init fsl_lbc_init(void) { return platform_driver_register(&fsl_lbc_ctrl_driver); } -module_init(fsl_lbc_init); +subsys_initcall(fsl_lbc_init); diff --git a/arch/sh/boards/mach-highlander/psw.c b/arch/sh/boards/mach-highlander/psw.c index 522786318d36..40e2b585d488 100644 --- a/arch/sh/boards/mach-highlander/psw.c +++ b/arch/sh/boards/mach-highlander/psw.c @@ -10,7 +10,7 @@ * for more details. */ #include <linux/io.h> -#include <linux/init.h> +#include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <mach/highlander.h> diff --git a/arch/sh/boards/mach-landisk/psw.c b/arch/sh/boards/mach-landisk/psw.c index bef83522f958..5192b1f43ada 100644 --- a/arch/sh/boards/mach-landisk/psw.c +++ b/arch/sh/boards/mach-landisk/psw.c @@ -140,4 +140,4 @@ static int __init psw_init(void) { return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices)); } -module_init(psw_init); +device_initcall(psw_init); diff --git a/arch/tile/kernel/usb.c b/arch/tile/kernel/usb.c index 5af8debc6a71..f0da5a237e94 100644 --- a/arch/tile/kernel/usb.c +++ b/arch/tile/kernel/usb.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/usb/tilegx.h> +#include <linux/init.h> #include <linux/types.h> static u64 ehci_dmamask = DMA_BIT_MASK(32); diff --git a/arch/unicore32/kernel/fpu-ucf64.c b/arch/unicore32/kernel/fpu-ucf64.c index 282a60ac82ba..a53343a90ca2 100644 --- a/arch/unicore32/kernel/fpu-ucf64.c +++ b/arch/unicore32/kernel/fpu-ucf64.c @@ -90,8 +90,8 @@ void ucf64_exchandler(u32 inst, u32 fpexc, struct pt_regs *regs) tmp &= ~(FPSCR_CON); exc &= ~(FPSCR_CMPINSTR_BIT | FPSCR_CON); } else { - pr_debug(KERN_ERR "UniCore-F64 Error: unhandled exceptions\n"); - pr_debug(KERN_ERR "UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", + pr_debug("UniCore-F64 Error: unhandled exceptions\n"); + pr_debug("UniCore-F64 FPSCR 0x%08x INST 0x%08x\n", cff(FPSCR), inst); ucf64_raise_sigfpe(0, regs); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index d05a42357ef0..55bced17dc95 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -34,6 +34,7 @@ config X86 select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT select ARCH_SUPPORTS_INT128 if X86_64 select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_USE_BUILTIN_BSWAP @@ -87,6 +88,7 @@ config X86 select HAVE_CMPXCHG_DOUBLE select HAVE_CMPXCHG_LOCAL select HAVE_CONTEXT_TRACKING if X86_64 + select HAVE_COPY_THREAD_TLS select HAVE_C_RECORDMCOUNT select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_STACKOVERFLOW diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 2bfc8a7c88c1..dccad38b59a8 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -1537,7 +1537,7 @@ static void __exit aesni_exit(void) crypto_fpu_exit(); } -module_init(aesni_init); +late_initcall(aesni_init); module_exit(aesni_exit); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized"); diff --git a/arch/x86/kernel/bootflag.c b/arch/x86/kernel/bootflag.c index 5de7f4c56971..52c8e3c7789d 100644 --- a/arch/x86/kernel/bootflag.c +++ b/arch/x86/kernel/bootflag.c @@ -98,4 +98,4 @@ static int __init sbf_init(void) return 0; } -module_init(sbf_init); +arch_initcall(sbf_init); diff --git a/arch/x86/kernel/cpu/perf_event_intel_bts.c b/arch/x86/kernel/cpu/perf_event_intel_bts.c index 7795f3f8b1d5..43dd672d788b 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_bts.c +++ b/arch/x86/kernel/cpu/perf_event_intel_bts.c @@ -530,5 +530,4 @@ static __init int bts_init(void) return perf_pmu_register(&bts_pmu, "intel_bts", -1); } - -module_init(bts_init); +arch_initcall(bts_init); diff --git a/arch/x86/kernel/cpu/perf_event_intel_pt.c b/arch/x86/kernel/cpu/perf_event_intel_pt.c index 159887c3a89d..183de719628d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_pt.c +++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c @@ -1106,5 +1106,4 @@ static __init int pt_init(void) return ret; } - -module_init(pt_init); +arch_initcall(pt_init); diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 5ee771859b6f..1f4acd68b98b 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -65,7 +65,7 @@ static int __init add_bus_probe(void) return of_platform_bus_probe(NULL, ce4100_ids, NULL); } -module_init(add_bus_probe); +device_initcall(add_bus_probe); #ifdef CONFIG_PCI struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus) diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index ca05f86481aa..ca83f7ac388b 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -72,15 +72,16 @@ static int setup_cmdline(struct kimage *image, struct boot_params *params, unsigned long cmdline_len) { char *cmdline_ptr = ((char *)params) + cmdline_offset; - unsigned long cmdline_ptr_phys, len; + unsigned long cmdline_ptr_phys, len = 0; uint32_t cmdline_low_32, cmdline_ext_32; - memcpy(cmdline_ptr, cmdline, cmdline_len); if (image->type == KEXEC_TYPE_CRASH) { - len = sprintf(cmdline_ptr + cmdline_len - 1, - " elfcorehdr=0x%lx", image->arch.elf_load_addr); - cmdline_len += len; + len = sprintf(cmdline_ptr, + "elfcorehdr=0x%lx ", image->arch.elf_load_addr); } + memcpy(cmdline_ptr + len, cmdline, cmdline_len); + cmdline_len += len; + cmdline_ptr[cmdline_len - 1] = '\0'; pr_debug("Final command line is: %s\n", cmdline_ptr); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index c09c99ccf3e3..f73c962fe636 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -128,8 +128,8 @@ void release_thread(struct task_struct *dead_task) release_vm86_irqs(dead_task); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct *p, unsigned long tls) { struct pt_regs *childregs = task_pt_regs(p); struct task_struct *tsk; @@ -184,7 +184,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, */ if (clone_flags & CLONE_SETTLS) err = do_set_thread_area(p, -1, - (struct user_desc __user *)childregs->si, 0); + (struct user_desc __user *)tls, 0); if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 843f92e4c711..71d7849a07f7 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -150,8 +150,8 @@ static inline u32 read_32bit_tls(struct task_struct *t, int tls) return get_desc_base(&t->thread.tls_array[tls]); } -int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long arg, struct task_struct *p) +int copy_thread_tls(unsigned long clone_flags, unsigned long sp, + unsigned long arg, struct task_struct *p, unsigned long tls) { int err; struct pt_regs *childregs; @@ -207,10 +207,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, #ifdef CONFIG_IA32_EMULATION if (is_ia32_task()) err = do_set_thread_area(p, -1, - (struct user_desc __user *)childregs->si, 0); + (struct user_desc __user *)tls, 0); else #endif - err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); + err = do_arch_prctl(p, ARCH_SET_FS, tls); if (err) goto out; } diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index ee22c1d93ae5..b034b1b14b9c 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c @@ -72,7 +72,7 @@ asmlinkage __visible void vsmp_irq_enable(void) } PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable); -static unsigned __init_or_module vsmp_patch(u8 type, u16 clobbers, void *ibuf, +static unsigned __init vsmp_patch(u8 type, u16 clobbers, void *ibuf, unsigned long addr, unsigned len) { switch (type) { diff --git a/arch/x86/platform/intel-mid/intel_mid_vrtc.c b/arch/x86/platform/intel-mid/intel_mid_vrtc.c index 32947ba0f62d..ee40fcb6e54d 100644 --- a/arch/x86/platform/intel-mid/intel_mid_vrtc.c +++ b/arch/x86/platform/intel-mid/intel_mid_vrtc.c @@ -173,5 +173,4 @@ static int __init intel_mid_device_create(void) return platform_device_register(&vrtc_device); } - -module_init(intel_mid_device_create); +device_initcall(intel_mid_device_create); diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c index 17b1ef3232e4..8ab021b1f141 100644 --- a/arch/xtensa/platforms/iss/network.c +++ b/arch/xtensa/platforms/iss/network.c @@ -681,6 +681,4 @@ static int iss_net_init(void) return 1; } - -module_init(iss_net_init); - +device_initcall(iss_net_init); diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c index 751f8fd7335d..3d13b042da73 100644 --- a/crypto/asymmetric_keys/pkcs7_key_type.c +++ b/crypto/asymmetric_keys/pkcs7_key_type.c @@ -12,6 +12,7 @@ #define pr_fmt(fmt) "PKCS7key: "fmt #include <linux/key.h> #include <linux/err.h> +#include <linux/module.h> #include <linux/key-type.h> #include <crypto/pkcs7.h> #include <keys/user-type.h> diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index fcb7807ea8b7..10561ce16ed1 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -660,8 +660,10 @@ static int add_region_before(u64 start, u64 end, u8 space_id, return -ENOMEM; error = request_range(start, end, space_id, flags, desc); - if (error) + if (error) { + kfree(reg); return error; + } reg->start = start; reg->end = end; diff --git a/drivers/base/node.c b/drivers/base/node.c index a2aa65b4215d..31df474d72f4 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -359,12 +359,16 @@ int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE #define page_initialized(page) (page->lru.next) -static int get_nid_for_pfn(unsigned long pfn) +static int __init_refok get_nid_for_pfn(unsigned long pfn) { struct page *page; if (!pfn_valid_within(pfn)) return -1; +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + if (system_state == SYSTEM_BOOTING) + return early_pfn_to_nid(pfn); +#endif page = pfn_to_page(pfn); if (!page_initialized(page)) return -1; diff --git a/drivers/base/property.c b/drivers/base/property.c index e645852396ba..f3f6d167f3f1 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -129,9 +129,9 @@ EXPORT_SYMBOL_GPL(device_property_present); bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) { if (is_of_node(fwnode)) - return of_property_read_bool(of_node(fwnode), propname); + return of_property_read_bool(to_of_node(fwnode), propname); else if (is_acpi_node(fwnode)) - return !acpi_dev_prop_get(acpi_node(fwnode), propname, NULL); + return !acpi_dev_prop_get(to_acpi_node(fwnode), propname, NULL); return !!pset_prop_get(to_pset(fwnode), propname); } @@ -286,10 +286,10 @@ EXPORT_SYMBOL_GPL(device_property_read_string); ({ \ int _ret_; \ if (is_of_node(_fwnode_)) \ - _ret_ = OF_DEV_PROP_READ_ARRAY(of_node(_fwnode_), _propname_, \ + _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \ _type_, _val_, _nval_); \ else if (is_acpi_node(_fwnode_)) \ - _ret_ = acpi_dev_prop_read(acpi_node(_fwnode_), _propname_, \ + _ret_ = acpi_dev_prop_read(to_acpi_node(_fwnode_), _propname_, \ _proptype_, _val_, _nval_); \ else \ _ret_ = pset_prop_read_array(to_pset(_fwnode_), _propname_, \ @@ -425,11 +425,11 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, { if (is_of_node(fwnode)) return val ? - of_property_read_string_array(of_node(fwnode), propname, - val, nval) : - of_property_count_strings(of_node(fwnode), propname); + of_property_read_string_array(to_of_node(fwnode), + propname, val, nval) : + of_property_count_strings(to_of_node(fwnode), propname); else if (is_acpi_node(fwnode)) - return acpi_dev_prop_read(acpi_node(fwnode), propname, + return acpi_dev_prop_read(to_acpi_node(fwnode), propname, DEV_PROP_STRING, val, nval); return pset_prop_read_array(to_pset(fwnode), propname, @@ -456,9 +456,9 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode, const char *propname, const char **val) { if (is_of_node(fwnode)) - return of_property_read_string(of_node(fwnode), propname, val); + return of_property_read_string(to_of_node(fwnode), propname, val); else if (is_acpi_node(fwnode)) - return acpi_dev_prop_read(acpi_node(fwnode), propname, + return acpi_dev_prop_read(to_acpi_node(fwnode), propname, DEV_PROP_STRING, val, 1); return -ENXIO; @@ -476,13 +476,13 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev, if (IS_ENABLED(CONFIG_OF) && dev->of_node) { struct device_node *node; - node = of_get_next_available_child(dev->of_node, of_node(child)); + node = of_get_next_available_child(dev->of_node, to_of_node(child)); if (node) return &node->fwnode; } else if (IS_ENABLED(CONFIG_ACPI)) { struct acpi_device *node; - node = acpi_get_next_child(dev, acpi_node(child)); + node = acpi_get_next_child(dev, to_acpi_node(child)); if (node) return acpi_fwnode_handle(node); } @@ -501,7 +501,7 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node); void fwnode_handle_put(struct fwnode_handle *fwnode) { if (is_of_node(fwnode)) - of_node_put(of_node(fwnode)); + of_node_put(to_of_node(fwnode)); } EXPORT_SYMBOL_GPL(fwnode_handle_put); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ec6c5c6e1ac9..d94529d5c8e9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -346,6 +346,7 @@ struct rbd_device { struct rbd_image_header header; unsigned long flags; /* possibly lock protected */ struct rbd_spec *spec; + struct rbd_options *opts; char *header_name; @@ -724,34 +725,36 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) } /* - * mount options + * (Per device) rbd map options */ enum { + Opt_queue_depth, Opt_last_int, /* int args above */ Opt_last_string, /* string args above */ Opt_read_only, Opt_read_write, - /* Boolean args above */ - Opt_last_bool, + Opt_err }; static match_table_t rbd_opts_tokens = { + {Opt_queue_depth, "queue_depth=%d"}, /* int args above */ /* string args above */ {Opt_read_only, "read_only"}, {Opt_read_only, "ro"}, /* Alternate spelling */ {Opt_read_write, "read_write"}, {Opt_read_write, "rw"}, /* Alternate spelling */ - /* Boolean args above */ - {-1, NULL} + {Opt_err, NULL} }; struct rbd_options { + int queue_depth; bool read_only; }; +#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ #define RBD_READ_ONLY_DEFAULT false static int parse_rbd_opts_token(char *c, void *private) @@ -761,27 +764,27 @@ static int parse_rbd_opts_token(char *c, void *private) int token, intval, ret; token = match_token(c, rbd_opts_tokens, argstr); - if (token < 0) - return -EINVAL; - if (token < Opt_last_int) { ret = match_int(&argstr[0], &intval); if (ret < 0) { - pr_err("bad mount option arg (not int) " - "at '%s'\n", c); + pr_err("bad mount option arg (not int) at '%s'\n", c); return ret; } dout("got int token %d val %d\n", token, intval); } else if (token > Opt_last_int && token < Opt_last_string) { - dout("got string token %d val %s\n", token, - argstr[0].from); - } else if (token > Opt_last_string && token < Opt_last_bool) { - dout("got Boolean token %d\n", token); + dout("got string token %d val %s\n", token, argstr[0].from); } else { dout("got token %d\n", token); } switch (token) { + case Opt_queue_depth: + if (intval < 1) { + pr_err("queue_depth out of range\n"); + return -EINVAL; + } + rbd_opts->queue_depth = intval; + break; case Opt_read_only: rbd_opts->read_only = true; break; @@ -789,9 +792,10 @@ static int parse_rbd_opts_token(char *c, void *private) rbd_opts->read_only = false; break; default: - rbd_assert(false); - break; + /* libceph prints "bad option" msg */ + return -EINVAL; } + return 0; } @@ -1563,22 +1567,39 @@ static void rbd_obj_request_end(struct rbd_obj_request *obj_request) /* * Wait for an object request to complete. If interrupted, cancel the * underlying osd request. + * + * @timeout: in jiffies, 0 means "wait forever" */ -static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) +static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request, + unsigned long timeout) { - int ret; + long ret; dout("%s %p\n", __func__, obj_request); - - ret = wait_for_completion_interruptible(&obj_request->completion); - if (ret < 0) { - dout("%s %p interrupted\n", __func__, obj_request); + ret = wait_for_completion_interruptible_timeout( + &obj_request->completion, + ceph_timeout_jiffies(timeout)); + if (ret <= 0) { + if (ret == 0) + ret = -ETIMEDOUT; rbd_obj_request_end(obj_request); - return ret; + } else { + ret = 0; } - dout("%s %p done\n", __func__, obj_request); - return 0; + dout("%s %p ret %d\n", __func__, obj_request, (int)ret); + return ret; +} + +static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) +{ + return __rbd_obj_request_wait(obj_request, 0); +} + +static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request, + unsigned long timeout) +{ + return __rbd_obj_request_wait(obj_request, timeout); } static void rbd_img_request_complete(struct rbd_img_request *img_request) @@ -2001,11 +2022,11 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name, rbd_assert(obj_request_type_valid(type)); size = strlen(object_name) + 1; - name = kmalloc(size, GFP_KERNEL); + name = kmalloc(size, GFP_NOIO); if (!name) return NULL; - obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL); + obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO); if (!obj_request) { kfree(name); return NULL; @@ -2376,7 +2397,7 @@ static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request, } if (opcode == CEPH_OSD_OP_DELETE) - osd_req_op_init(osd_request, num_ops, opcode); + osd_req_op_init(osd_request, num_ops, opcode, 0); else osd_req_op_extent_init(osd_request, num_ops, opcode, offset, length, 0, 0); @@ -2848,7 +2869,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) goto out; stat_request->callback = rbd_img_obj_exists_callback; - osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT); + osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0); osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0, false, false); rbd_osd_req_format_read(stat_request); @@ -3122,6 +3143,7 @@ static struct rbd_obj_request *rbd_obj_watch_request_helper( bool watch) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct ceph_options *opts = osdc->client->options; struct rbd_obj_request *obj_request; int ret; @@ -3148,7 +3170,7 @@ static struct rbd_obj_request *rbd_obj_watch_request_helper( if (ret) goto out; - ret = rbd_obj_request_wait(obj_request); + ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout); if (ret) goto out; @@ -3750,10 +3772,9 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); rbd_dev->tag_set.ops = &rbd_mq_ops; - rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ; + rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; rbd_dev->tag_set.numa_node = NUMA_NO_NODE; - rbd_dev->tag_set.flags = - BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; rbd_dev->tag_set.nr_hw_queues = 1; rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); @@ -3773,6 +3794,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) /* set io sizes to object size */ segment_size = rbd_obj_bytes(&rbd_dev->header); blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE); + blk_queue_max_segments(q, segment_size / SECTOR_SIZE); blk_queue_max_segment_size(q, segment_size); blk_queue_io_min(q, segment_size); blk_queue_io_opt(q, segment_size); @@ -4044,7 +4066,8 @@ static void rbd_spec_free(struct kref *kref) } static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, - struct rbd_spec *spec) + struct rbd_spec *spec, + struct rbd_options *opts) { struct rbd_device *rbd_dev; @@ -4058,8 +4081,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, INIT_LIST_HEAD(&rbd_dev->node); init_rwsem(&rbd_dev->header_rwsem); - rbd_dev->spec = spec; rbd_dev->rbd_client = rbdc; + rbd_dev->spec = spec; + rbd_dev->opts = opts; /* Initialize the layout used for all rbd requests */ @@ -4075,6 +4099,7 @@ static void rbd_dev_destroy(struct rbd_device *rbd_dev) { rbd_put_client(rbd_dev->rbd_client); rbd_spec_put(rbd_dev->spec); + kfree(rbd_dev->opts); kfree(rbd_dev); } @@ -4933,6 +4958,7 @@ static int rbd_add_parse_args(const char *buf, goto out_mem; rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; + rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT; copts = ceph_parse_options(options, mon_addrs, mon_addrs + mon_addrs_size - 1, @@ -4963,8 +4989,8 @@ out_err: */ static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name) { + struct ceph_options *opts = rbdc->client->options; u64 newest_epoch; - unsigned long timeout = rbdc->client->options->mount_timeout * HZ; int tries = 0; int ret; @@ -4979,7 +5005,8 @@ again: if (rbdc->client->osdc.osdmap->epoch < newest_epoch) { ceph_monc_request_next_osdmap(&rbdc->client->monc); (void) ceph_monc_wait_osdmap(&rbdc->client->monc, - newest_epoch, timeout); + newest_epoch, + opts->mount_timeout); goto again; } else { /* the osdmap we have is new enough */ @@ -5148,7 +5175,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) rbdc = __rbd_get_client(rbd_dev->rbd_client); ret = -ENOMEM; - parent = rbd_dev_create(rbdc, parent_spec); + parent = rbd_dev_create(rbdc, parent_spec, NULL); if (!parent) goto out_err; @@ -5394,9 +5421,6 @@ static ssize_t do_rbd_add(struct bus_type *bus, rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); if (rc < 0) goto err_out_module; - read_only = rbd_opts->read_only; - kfree(rbd_opts); - rbd_opts = NULL; /* done with this */ rbdc = rbd_get_client(ceph_opts); if (IS_ERR(rbdc)) { @@ -5422,11 +5446,12 @@ static ssize_t do_rbd_add(struct bus_type *bus, goto err_out_client; } - rbd_dev = rbd_dev_create(rbdc, spec); + rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts); if (!rbd_dev) goto err_out_client; rbdc = NULL; /* rbd_dev now owns this */ spec = NULL; /* rbd_dev now owns this */ + rbd_opts = NULL; /* rbd_dev now owns this */ rc = rbd_dev_image_probe(rbd_dev, true); if (rc < 0) @@ -5434,6 +5459,7 @@ static ssize_t do_rbd_add(struct bus_type *bus, /* If we are mapping a snapshot it must be marked read-only */ + read_only = rbd_dev->opts->read_only; if (rbd_dev->spec->snap_id != CEPH_NOSNAP) read_only = true; rbd_dev->mapping.read_only = read_only; @@ -5458,6 +5484,7 @@ err_out_client: rbd_put_client(rbdc); err_out_args: rbd_spec_put(spec); + kfree(rbd_opts); err_out_module: module_put(THIS_MODULE); diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 0b4188b9af7c..c6dea3f6917b 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -581,7 +581,7 @@ static inline int needs_ilk_vtd_wa(void) /* Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || + if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG || gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && intel_iommu_gfx_mapped) return 1; diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 9897f353bf1a..42f7120ca9ce 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -78,6 +78,23 @@ config COMMON_CLK_SI570 This driver supports Silicon Labs 570/571/598/599 programmable clock generators. +config COMMON_CLK_CDCE925 + tristate "Clock driver for TI CDCE925 devices" + depends on I2C + depends on OF + select REGMAP_I2C + help + ---help--- + This driver supports the TI CDCE925 programmable clock synthesizer. + The chip contains two PLLs with spread-spectrum clocking support and + five output dividers. The driver only supports the following setup, + and uses a fixed setting for the output muxes. + Y1 is derived from the input clock + Y2 and Y3 derive from PLL1 + Y4 and Y5 derive from PLL2 + Given a target output frequency, the driver will set the PLL and + divider to best approximate the desired output. + config COMMON_CLK_S2MPS11 tristate "Clock driver for S2MPS1X/S5M8767 MFD" depends on MFD_SEC_CORE @@ -150,11 +167,13 @@ config COMMON_CLK_CDCE706 ---help--- This driver supports TI CDCE706 programmable 3-PLL clock synthesizer. +source "drivers/clk/bcm/Kconfig" +source "drivers/clk/hisilicon/Kconfig" source "drivers/clk/qcom/Kconfig" endmenu -source "drivers/clk/bcm/Kconfig" source "drivers/clk/mvebu/Kconfig" source "drivers/clk/samsung/Kconfig" +source "drivers/clk/tegra/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 8732e4c5bf3c..c4cf075a2320 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -38,6 +38,8 @@ obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o +obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o +obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o obj-$(CONFIG_ARCH_U300) += clk-u300.o obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o @@ -45,19 +47,20 @@ obj-$(CONFIG_COMMON_CLK_WM831X) += clk-wm831x.o obj-$(CONFIG_COMMON_CLK_XGENE) += clk-xgene.o obj-$(CONFIG_COMMON_CLK_PWM) += clk-pwm.o obj-$(CONFIG_COMMON_CLK_AT91) += at91/ -obj-$(CONFIG_ARCH_BCM_MOBILE) += bcm/ +obj-$(CONFIG_ARCH_BCM) += bcm/ obj-$(CONFIG_ARCH_BERLIN) += berlin/ -obj-$(CONFIG_ARCH_HI3xxx) += hisilicon/ -obj-$(CONFIG_ARCH_HIP04) += hisilicon/ -obj-$(CONFIG_ARCH_HIX5HD2) += hisilicon/ +obj-$(CONFIG_ARCH_HISI) += hisilicon/ obj-$(CONFIG_ARCH_MXC) += imx/ obj-$(CONFIG_MACH_INGENIC) += ingenic/ obj-$(CONFIG_COMMON_CLK_KEYSTONE) += keystone/ +obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ ifeq ($(CONFIG_COMMON_CLK), y) obj-$(CONFIG_ARCH_MMP) += mmp/ endif obj-$(CONFIG_PLAT_ORION) += mvebu/ +obj-$(CONFIG_ARCH_MESON) += meson/ obj-$(CONFIG_ARCH_MXS) += mxs/ +obj-$(CONFIG_ARCH_LPC18XX) += nxp/ obj-$(CONFIG_MACH_PISTACHIO) += pistachio/ obj-$(CONFIG_COMMON_CLK_PXA) += pxa/ obj-$(CONFIG_COMMON_CLK_QCOM) += qcom/ diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index 59fa3cc96c9e..c2400456a044 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -614,7 +614,7 @@ void __init of_at91sam9x5_clk_main_setup(struct device_node *np, const char *name = np->name; int i; - num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > 2) return; diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index c1af80bcdf20..f98eafe9b12d 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -224,7 +224,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, const char *name = np->name; struct clk_master_characteristics *characteristics; - num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX) return; diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 86c8a073dcc3..8c86c0f7847a 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -237,7 +237,7 @@ of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, const char *name; struct device_node *progclknp; - num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX) return; diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c index 2f13bd5246b5..98a84a865fe1 100644 --- a/drivers/clk/at91/clk-slow.c +++ b/drivers/clk/at91/clk-slow.c @@ -373,7 +373,7 @@ void __init of_at91sam9x5_clk_slow_setup(struct device_node *np, const char *name = np->name; int i; - num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > 2) return; @@ -451,7 +451,7 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np, const char *name = np->name; int i; - num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + num_parents = of_clk_get_parent_count(np); if (num_parents != 2) return; diff --git a/drivers/clk/at91/clk-smd.c b/drivers/clk/at91/clk-smd.c index 144d47ecfe63..3817ea865ca2 100644 --- a/drivers/clk/at91/clk-smd.c +++ b/drivers/clk/at91/clk-smd.c @@ -150,7 +150,7 @@ void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, const char *parent_names[SMD_SOURCE_MAX]; const char *name = np->name; - num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX) return; diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index 0b7c3e8840ba..b0cbd2b1ff59 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -378,7 +378,7 @@ void __init of_at91sam9x5_clk_usb_setup(struct device_node *np, const char *parent_names[USB_SOURCE_MAX]; const char *name = np->name; - num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + num_parents = of_clk_get_parent_count(np); if (num_parents <= 0 || num_parents > USB_SOURCE_MAX) return; diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c index 3f27d21fb729..39be2be82b0a 100644 --- a/drivers/clk/at91/pmc.c +++ b/drivers/clk/at91/pmc.c @@ -153,7 +153,7 @@ static int pmc_irq_domain_xlate(struct irq_domain *d, return 0; } -static struct irq_domain_ops pmc_irq_ops = { +static const struct irq_domain_ops pmc_irq_ops = { .map = pmc_irq_map, .xlate = pmc_irq_domain_xlate, }; diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig index 75506e53075b..88febf53b276 100644 --- a/drivers/clk/bcm/Kconfig +++ b/drivers/clk/bcm/Kconfig @@ -7,3 +7,12 @@ config CLK_BCM_KONA Enable common clock framework support for Broadcom SoCs using "Kona" style clock control units, including those in the BCM281xx and BCM21664 families. + +config COMMON_CLK_IPROC + bool "Broadcom iProc clock support" + depends on ARCH_BCM_IPROC + depends on COMMON_CLK + default ARCH_BCM_IPROC + help + Enable common clock framework support for Broadcom SoCs + based on the iProc architecture diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile index 6297d05a9a10..8a7a477862c7 100644 --- a/drivers/clk/bcm/Makefile +++ b/drivers/clk/bcm/Makefile @@ -2,3 +2,5 @@ obj-$(CONFIG_CLK_BCM_KONA) += clk-kona.o obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm21664.o +obj-$(CONFIG_COMMON_CLK_IPROC) += clk-iproc-armpll.o clk-iproc-pll.o clk-iproc-asiu.o +obj-$(CONFIG_ARCH_BCM_CYGNUS) += clk-cygnus.o diff --git a/drivers/clk/bcm/clk-cygnus.c b/drivers/clk/bcm/clk-cygnus.c new file mode 100644 index 000000000000..316c60337661 --- /dev/null +++ b/drivers/clk/bcm/clk-cygnus.c @@ -0,0 +1,265 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/clkdev.h> +#include <linux/of_address.h> +#include <linux/delay.h> + +#include <dt-bindings/clock/bcm-cygnus.h> +#include "clk-iproc.h" + +#define reg_val(o, s, w) { .offset = o, .shift = s, .width = w, } + +#define aon_val(o, pw, ps, is) { .offset = o, .pwr_width = pw, \ + .pwr_shift = ps, .iso_shift = is } + +#define sw_ctrl_val(o, s) { .offset = o, .shift = s, } + +#define asiu_div_val(o, es, hs, hw, ls, lw) \ + { .offset = o, .en_shift = es, .high_shift = hs, \ + .high_width = hw, .low_shift = ls, .low_width = lw } + +#define reset_val(o, rs, prs, kis, kiw, kps, kpw, kas, kaw) { .offset = o, \ + .reset_shift = rs, .p_reset_shift = prs, .ki_shift = kis, \ + .ki_width = kiw, .kp_shift = kps, .kp_width = kpw, .ka_shift = kas, \ + .ka_width = kaw } + +#define vco_ctrl_val(uo, lo) { .u_offset = uo, .l_offset = lo } + +#define enable_val(o, es, hs, bs) { .offset = o, .enable_shift = es, \ + .hold_shift = hs, .bypass_shift = bs } + +#define asiu_gate_val(o, es) { .offset = o, .en_shift = es } + +static void __init cygnus_armpll_init(struct device_node *node) +{ + iproc_armpll_setup(node); +} +CLK_OF_DECLARE(cygnus_armpll, "brcm,cygnus-armpll", cygnus_armpll_init); + +static const struct iproc_pll_ctrl genpll = { + .flags = IPROC_CLK_AON | IPROC_CLK_PLL_HAS_NDIV_FRAC | + IPROC_CLK_PLL_NEEDS_SW_CFG, + .aon = aon_val(0x0, 2, 1, 0), + .reset = reset_val(0x0, 11, 10, 4, 3, 0, 4, 7, 3), + .sw_ctrl = sw_ctrl_val(0x10, 31), + .ndiv_int = reg_val(0x10, 20, 10), + .ndiv_frac = reg_val(0x10, 0, 20), + .pdiv = reg_val(0x14, 0, 4), + .vco_ctrl = vco_ctrl_val(0x18, 0x1c), + .status = reg_val(0x28, 12, 1), +}; + +static const struct iproc_clk_ctrl genpll_clk[] = { + [BCM_CYGNUS_GENPLL_AXI21_CLK] = { + .channel = BCM_CYGNUS_GENPLL_AXI21_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x4, 6, 0, 12), + .mdiv = reg_val(0x20, 0, 8), + }, + [BCM_CYGNUS_GENPLL_250MHZ_CLK] = { + .channel = BCM_CYGNUS_GENPLL_250MHZ_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x4, 7, 1, 13), + .mdiv = reg_val(0x20, 10, 8), + }, + [BCM_CYGNUS_GENPLL_IHOST_SYS_CLK] = { + .channel = BCM_CYGNUS_GENPLL_IHOST_SYS_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x4, 8, 2, 14), + .mdiv = reg_val(0x20, 20, 8), + }, + [BCM_CYGNUS_GENPLL_ENET_SW_CLK] = { + .channel = BCM_CYGNUS_GENPLL_ENET_SW_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x4, 9, 3, 15), + .mdiv = reg_val(0x24, 0, 8), + }, + [BCM_CYGNUS_GENPLL_AUDIO_125_CLK] = { + .channel = BCM_CYGNUS_GENPLL_AUDIO_125_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x4, 10, 4, 16), + .mdiv = reg_val(0x24, 10, 8), + }, + [BCM_CYGNUS_GENPLL_CAN_CLK] = { + .channel = BCM_CYGNUS_GENPLL_CAN_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x4, 11, 5, 17), + .mdiv = reg_val(0x24, 20, 8), + }, +}; + +static void __init cygnus_genpll_clk_init(struct device_node *node) +{ + iproc_pll_clk_setup(node, &genpll, NULL, 0, genpll_clk, + ARRAY_SIZE(genpll_clk)); +} +CLK_OF_DECLARE(cygnus_genpll, "brcm,cygnus-genpll", cygnus_genpll_clk_init); + +static const struct iproc_pll_ctrl lcpll0 = { + .flags = IPROC_CLK_AON | IPROC_CLK_PLL_NEEDS_SW_CFG, + .aon = aon_val(0x0, 2, 5, 4), + .reset = reset_val(0x0, 31, 30, 27, 3, 23, 4, 19, 4), + .sw_ctrl = sw_ctrl_val(0x4, 31), + .ndiv_int = reg_val(0x4, 16, 10), + .pdiv = reg_val(0x4, 26, 4), + .vco_ctrl = vco_ctrl_val(0x10, 0x14), + .status = reg_val(0x18, 12, 1), +}; + +static const struct iproc_clk_ctrl lcpll0_clk[] = { + [BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK] = { + .channel = BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x0, 7, 1, 13), + .mdiv = reg_val(0x8, 0, 8), + }, + [BCM_CYGNUS_LCPLL0_DDR_PHY_CLK] = { + .channel = BCM_CYGNUS_LCPLL0_DDR_PHY_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x0, 8, 2, 14), + .mdiv = reg_val(0x8, 10, 8), + }, + [BCM_CYGNUS_LCPLL0_SDIO_CLK] = { + .channel = BCM_CYGNUS_LCPLL0_SDIO_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x0, 9, 3, 15), + .mdiv = reg_val(0x8, 20, 8), + }, + [BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK] = { + .channel = BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x0, 10, 4, 16), + .mdiv = reg_val(0xc, 0, 8), + }, + [BCM_CYGNUS_LCPLL0_SMART_CARD_CLK] = { + .channel = BCM_CYGNUS_LCPLL0_SMART_CARD_CLK, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x0, 11, 5, 17), + .mdiv = reg_val(0xc, 10, 8), + }, + [BCM_CYGNUS_LCPLL0_CH5_UNUSED] = { + .channel = BCM_CYGNUS_LCPLL0_CH5_UNUSED, + .flags = IPROC_CLK_AON, + .enable = enable_val(0x0, 12, 6, 18), + .mdiv = reg_val(0xc, 20, 8), + }, +}; + +static void __init cygnus_lcpll0_clk_init(struct device_node *node) +{ + iproc_pll_clk_setup(node, &lcpll0, NULL, 0, lcpll0_clk, + ARRAY_SIZE(lcpll0_clk)); +} +CLK_OF_DECLARE(cygnus_lcpll0, "brcm,cygnus-lcpll0", cygnus_lcpll0_clk_init); + +/* + * MIPI PLL VCO frequency parameter table + */ +static const struct iproc_pll_vco_param mipipll_vco_params[] = { + /* rate (Hz) ndiv_int ndiv_frac pdiv */ + { 750000000UL, 30, 0, 1 }, + { 1000000000UL, 40, 0, 1 }, + { 1350000000ul, 54, 0, 1 }, + { 2000000000UL, 80, 0, 1 }, + { 2100000000UL, 84, 0, 1 }, + { 2250000000UL, 90, 0, 1 }, + { 2500000000UL, 100, 0, 1 }, + { 2700000000UL, 54, 0, 0 }, + { 2975000000UL, 119, 0, 1 }, + { 3100000000UL, 124, 0, 1 }, + { 3150000000UL, 126, 0, 1 }, +}; + +static const struct iproc_pll_ctrl mipipll = { + .flags = IPROC_CLK_PLL_ASIU | IPROC_CLK_PLL_HAS_NDIV_FRAC | + IPROC_CLK_NEEDS_READ_BACK, + .aon = aon_val(0x0, 4, 17, 16), + .asiu = asiu_gate_val(0x0, 3), + .reset = reset_val(0x0, 11, 10, 4, 3, 0, 4, 7, 4), + .ndiv_int = reg_val(0x10, 20, 10), + .ndiv_frac = reg_val(0x10, 0, 20), + .pdiv = reg_val(0x14, 0, 4), + .vco_ctrl = vco_ctrl_val(0x18, 0x1c), + .status = reg_val(0x28, 12, 1), +}; + +static const struct iproc_clk_ctrl mipipll_clk[] = { + [BCM_CYGNUS_MIPIPLL_CH0_UNUSED] = { + .channel = BCM_CYGNUS_MIPIPLL_CH0_UNUSED, + .flags = IPROC_CLK_NEEDS_READ_BACK, + .enable = enable_val(0x4, 12, 6, 18), + .mdiv = reg_val(0x20, 0, 8), + }, + [BCM_CYGNUS_MIPIPLL_CH1_LCD] = { + .channel = BCM_CYGNUS_MIPIPLL_CH1_LCD, + .flags = IPROC_CLK_NEEDS_READ_BACK, + .enable = enable_val(0x4, 13, 7, 19), + .mdiv = reg_val(0x20, 10, 8), + }, + [BCM_CYGNUS_MIPIPLL_CH2_V3D] = { + .channel = BCM_CYGNUS_MIPIPLL_CH2_V3D, + .flags = IPROC_CLK_NEEDS_READ_BACK, + .enable = enable_val(0x4, 14, 8, 20), + .mdiv = reg_val(0x20, 20, 8), + }, + [BCM_CYGNUS_MIPIPLL_CH3_UNUSED] = { + .channel = BCM_CYGNUS_MIPIPLL_CH3_UNUSED, + .flags = IPROC_CLK_NEEDS_READ_BACK, + .enable = enable_val(0x4, 15, 9, 21), + .mdiv = reg_val(0x24, 0, 8), + }, + [BCM_CYGNUS_MIPIPLL_CH4_UNUSED] = { + .channel = BCM_CYGNUS_MIPIPLL_CH4_UNUSED, + .flags = IPROC_CLK_NEEDS_READ_BACK, + .enable = enable_val(0x4, 16, 10, 22), + .mdiv = reg_val(0x24, 10, 8), + }, + [BCM_CYGNUS_MIPIPLL_CH5_UNUSED] = { + .channel = BCM_CYGNUS_MIPIPLL_CH5_UNUSED, + .flags = IPROC_CLK_NEEDS_READ_BACK, + .enable = enable_val(0x4, 17, 11, 23), + .mdiv = reg_val(0x24, 20, 8), + }, +}; + +static void __init cygnus_mipipll_clk_init(struct device_node *node) +{ + iproc_pll_clk_setup(node, &mipipll, mipipll_vco_params, + ARRAY_SIZE(mipipll_vco_params), mipipll_clk, + ARRAY_SIZE(mipipll_clk)); +} +CLK_OF_DECLARE(cygnus_mipipll, "brcm,cygnus-mipipll", cygnus_mipipll_clk_init); + +static const struct iproc_asiu_div asiu_div[] = { + [BCM_CYGNUS_ASIU_KEYPAD_CLK] = asiu_div_val(0x0, 31, 16, 10, 0, 10), + [BCM_CYGNUS_ASIU_ADC_CLK] = asiu_div_val(0x4, 31, 16, 10, 0, 10), + [BCM_CYGNUS_ASIU_PWM_CLK] = asiu_div_val(0x8, 31, 16, 10, 0, 10), +}; + +static const struct iproc_asiu_gate asiu_gate[] = { + [BCM_CYGNUS_ASIU_KEYPAD_CLK] = asiu_gate_val(0x0, 7), + [BCM_CYGNUS_ASIU_ADC_CLK] = asiu_gate_val(0x0, 9), + [BCM_CYGNUS_ASIU_PWM_CLK] = asiu_gate_val(IPROC_CLK_INVALID_OFFSET, 0), +}; + +static void __init cygnus_asiu_init(struct device_node *node) +{ + iproc_asiu_setup(node, asiu_div, asiu_gate, ARRAY_SIZE(asiu_div)); +} +CLK_OF_DECLARE(cygnus_asiu_clk, "brcm,cygnus-asiu-clk", cygnus_asiu_init); diff --git a/drivers/clk/bcm/clk-iproc-armpll.c b/drivers/clk/bcm/clk-iproc-armpll.c new file mode 100644 index 000000000000..a196ee28a17a --- /dev/null +++ b/drivers/clk/bcm/clk-iproc-armpll.c @@ -0,0 +1,282 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/clkdev.h> +#include <linux/of_address.h> + +#define IPROC_CLK_MAX_FREQ_POLICY 0x3 +#define IPROC_CLK_POLICY_FREQ_OFFSET 0x008 +#define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT 8 +#define IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK 0x7 + +#define IPROC_CLK_PLLARMA_OFFSET 0xc00 +#define IPROC_CLK_PLLARMA_LOCK_SHIFT 28 +#define IPROC_CLK_PLLARMA_PDIV_SHIFT 24 +#define IPROC_CLK_PLLARMA_PDIV_MASK 0xf +#define IPROC_CLK_PLLARMA_NDIV_INT_SHIFT 8 +#define IPROC_CLK_PLLARMA_NDIV_INT_MASK 0x3ff + +#define IPROC_CLK_PLLARMB_OFFSET 0xc04 +#define IPROC_CLK_PLLARMB_NDIV_FRAC_MASK 0xfffff + +#define IPROC_CLK_PLLARMC_OFFSET 0xc08 +#define IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT 8 +#define IPROC_CLK_PLLARMC_MDIV_MASK 0xff + +#define IPROC_CLK_PLLARMCTL5_OFFSET 0xc20 +#define IPROC_CLK_PLLARMCTL5_H_MDIV_MASK 0xff + +#define IPROC_CLK_PLLARM_OFFSET_OFFSET 0xc24 +#define IPROC_CLK_PLLARM_SW_CTL_SHIFT 29 +#define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT 20 +#define IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK 0xff +#define IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK 0xfffff + +#define IPROC_CLK_ARM_DIV_OFFSET 0xe00 +#define IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT 4 +#define IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK 0xf + +#define IPROC_CLK_POLICY_DBG_OFFSET 0xec0 +#define IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT 12 +#define IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK 0x7 + +enum iproc_arm_pll_fid { + ARM_PLL_FID_CRYSTAL_CLK = 0, + ARM_PLL_FID_SYS_CLK = 2, + ARM_PLL_FID_CH0_SLOW_CLK = 6, + ARM_PLL_FID_CH1_FAST_CLK = 7 +}; + +struct iproc_arm_pll { + struct clk_hw hw; + void __iomem *base; + unsigned long rate; +}; + +#define to_iproc_arm_pll(hw) container_of(hw, struct iproc_arm_pll, hw) + +static unsigned int __get_fid(struct iproc_arm_pll *pll) +{ + u32 val; + unsigned int policy, fid, active_fid; + + val = readl(pll->base + IPROC_CLK_ARM_DIV_OFFSET); + if (val & (1 << IPROC_CLK_ARM_DIV_PLL_SELECT_OVERRIDE_SHIFT)) + policy = val & IPROC_CLK_ARM_DIV_ARM_PLL_SELECT_MASK; + else + policy = 0; + + /* something is seriously wrong */ + BUG_ON(policy > IPROC_CLK_MAX_FREQ_POLICY); + + val = readl(pll->base + IPROC_CLK_POLICY_FREQ_OFFSET); + fid = (val >> (IPROC_CLK_POLICY_FREQ_POLICY_FREQ_SHIFT * policy)) & + IPROC_CLK_POLICY_FREQ_POLICY_FREQ_MASK; + + val = readl(pll->base + IPROC_CLK_POLICY_DBG_OFFSET); + active_fid = IPROC_CLK_POLICY_DBG_ACT_FREQ_MASK & + (val >> IPROC_CLK_POLICY_DBG_ACT_FREQ_SHIFT); + if (fid != active_fid) { + pr_debug("%s: fid override %u->%u\n", __func__, fid, + active_fid); + fid = active_fid; + } + + pr_debug("%s: active fid: %u\n", __func__, fid); + + return fid; +} + +/* + * Determine the mdiv (post divider) based on the frequency ID being used. + * There are 4 sources that can be used to derive the output clock rate: + * - 25 MHz Crystal + * - System clock + * - PLL channel 0 (slow clock) + * - PLL channel 1 (fast clock) + */ +static int __get_mdiv(struct iproc_arm_pll *pll) +{ + unsigned int fid; + int mdiv; + u32 val; + + fid = __get_fid(pll); + + switch (fid) { + case ARM_PLL_FID_CRYSTAL_CLK: + case ARM_PLL_FID_SYS_CLK: + mdiv = 1; + break; + + case ARM_PLL_FID_CH0_SLOW_CLK: + val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET); + mdiv = val & IPROC_CLK_PLLARMC_MDIV_MASK; + if (mdiv == 0) + mdiv = 256; + break; + + case ARM_PLL_FID_CH1_FAST_CLK: + val = readl(pll->base + IPROC_CLK_PLLARMCTL5_OFFSET); + mdiv = val & IPROC_CLK_PLLARMCTL5_H_MDIV_MASK; + if (mdiv == 0) + mdiv = 256; + break; + + default: + mdiv = -EFAULT; + } + + return mdiv; +} + +static unsigned int __get_ndiv(struct iproc_arm_pll *pll) +{ + u32 val; + unsigned int ndiv_int, ndiv_frac, ndiv; + + val = readl(pll->base + IPROC_CLK_PLLARM_OFFSET_OFFSET); + if (val & (1 << IPROC_CLK_PLLARM_SW_CTL_SHIFT)) { + /* + * offset mode is active. Read the ndiv from the PLLARM OFFSET + * register + */ + ndiv_int = (val >> IPROC_CLK_PLLARM_NDIV_INT_OFFSET_SHIFT) & + IPROC_CLK_PLLARM_NDIV_INT_OFFSET_MASK; + if (ndiv_int == 0) + ndiv_int = 256; + + ndiv_frac = val & IPROC_CLK_PLLARM_NDIV_FRAC_OFFSET_MASK; + } else { + /* offset mode not active */ + val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET); + ndiv_int = (val >> IPROC_CLK_PLLARMA_NDIV_INT_SHIFT) & + IPROC_CLK_PLLARMA_NDIV_INT_MASK; + if (ndiv_int == 0) + ndiv_int = 1024; + + val = readl(pll->base + IPROC_CLK_PLLARMB_OFFSET); + ndiv_frac = val & IPROC_CLK_PLLARMB_NDIV_FRAC_MASK; + } + + ndiv = (ndiv_int << 20) | ndiv_frac; + + return ndiv; +} + +/* + * The output frequency of the ARM PLL is calculated based on the ARM PLL + * divider values: + * pdiv = ARM PLL pre-divider + * ndiv = ARM PLL multiplier + * mdiv = ARM PLL post divider + * + * The frequency is calculated by: + * ((ndiv * parent clock rate) / pdiv) / mdiv + */ +static unsigned long iproc_arm_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct iproc_arm_pll *pll = to_iproc_arm_pll(hw); + u32 val; + int mdiv; + u64 ndiv; + unsigned int pdiv; + + /* in bypass mode, use parent rate */ + val = readl(pll->base + IPROC_CLK_PLLARMC_OFFSET); + if (val & (1 << IPROC_CLK_PLLARMC_BYPCLK_EN_SHIFT)) { + pll->rate = parent_rate; + return pll->rate; + } + + /* PLL needs to be locked */ + val = readl(pll->base + IPROC_CLK_PLLARMA_OFFSET); + if (!(val & (1 << IPROC_CLK_PLLARMA_LOCK_SHIFT))) { + pll->rate = 0; + return 0; + } + + pdiv = (val >> IPROC_CLK_PLLARMA_PDIV_SHIFT) & + IPROC_CLK_PLLARMA_PDIV_MASK; + if (pdiv == 0) + pdiv = 16; + + ndiv = __get_ndiv(pll); + mdiv = __get_mdiv(pll); + if (mdiv <= 0) { + pll->rate = 0; + return 0; + } + pll->rate = (ndiv * parent_rate) >> 20; + pll->rate = (pll->rate / pdiv) / mdiv; + + pr_debug("%s: ARM PLL rate: %lu. parent rate: %lu\n", __func__, + pll->rate, parent_rate); + pr_debug("%s: ndiv_int: %u, pdiv: %u, mdiv: %d\n", __func__, + (unsigned int)(ndiv >> 20), pdiv, mdiv); + + return pll->rate; +} + +static const struct clk_ops iproc_arm_pll_ops = { + .recalc_rate = iproc_arm_pll_recalc_rate, +}; + +void __init iproc_armpll_setup(struct device_node *node) +{ + int ret; + struct clk *clk; + struct iproc_arm_pll *pll; + struct clk_init_data init; + const char *parent_name; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (WARN_ON(!pll)) + return; + + pll->base = of_iomap(node, 0); + if (WARN_ON(!pll->base)) + goto err_free_pll; + + init.name = node->name; + init.ops = &iproc_arm_pll_ops; + init.flags = 0; + parent_name = of_clk_get_parent_name(node, 0); + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + pll->hw.init = &init; + + clk = clk_register(NULL, &pll->hw); + if (WARN_ON(IS_ERR(clk))) + goto err_iounmap; + + ret = of_clk_add_provider(node, of_clk_src_simple_get, clk); + if (WARN_ON(ret)) + goto err_clk_unregister; + + return; + +err_clk_unregister: + clk_unregister(clk); +err_iounmap: + iounmap(pll->base); +err_free_pll: + kfree(pll); +} diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c new file mode 100644 index 000000000000..e19c09cd9645 --- /dev/null +++ b/drivers/clk/bcm/clk-iproc-asiu.c @@ -0,0 +1,276 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/clkdev.h> +#include <linux/of_address.h> +#include <linux/delay.h> + +#include "clk-iproc.h" + +struct iproc_asiu; + +struct iproc_asiu_clk { + struct clk_hw hw; + const char *name; + struct iproc_asiu *asiu; + unsigned long rate; + struct iproc_asiu_div div; + struct iproc_asiu_gate gate; +}; + +struct iproc_asiu { + void __iomem *div_base; + void __iomem *gate_base; + + struct clk_onecell_data clk_data; + struct iproc_asiu_clk *clks; +}; + +#define to_asiu_clk(hw) container_of(hw, struct iproc_asiu_clk, hw) + +static int iproc_asiu_clk_enable(struct clk_hw *hw) +{ + struct iproc_asiu_clk *clk = to_asiu_clk(hw); + struct iproc_asiu *asiu = clk->asiu; + u32 val; + + /* some clocks at the ASIU level are always enabled */ + if (clk->gate.offset == IPROC_CLK_INVALID_OFFSET) + return 0; + + val = readl(asiu->gate_base + clk->gate.offset); + val |= (1 << clk->gate.en_shift); + writel(val, asiu->gate_base + clk->gate.offset); + + return 0; +} + +static void iproc_asiu_clk_disable(struct clk_hw *hw) +{ + struct iproc_asiu_clk *clk = to_asiu_clk(hw); + struct iproc_asiu *asiu = clk->asiu; + u32 val; + + /* some clocks at the ASIU level are always enabled */ + if (clk->gate.offset == IPROC_CLK_INVALID_OFFSET) + return; + + val = readl(asiu->gate_base + clk->gate.offset); + val &= ~(1 << clk->gate.en_shift); + writel(val, asiu->gate_base + clk->gate.offset); +} + +static unsigned long iproc_asiu_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct iproc_asiu_clk *clk = to_asiu_clk(hw); + struct iproc_asiu *asiu = clk->asiu; + u32 val; + unsigned int div_h, div_l; + + if (parent_rate == 0) { + clk->rate = 0; + return 0; + } + + /* if clock divisor is not enabled, simply return parent rate */ + val = readl(asiu->div_base + clk->div.offset); + if ((val & (1 << clk->div.en_shift)) == 0) { + clk->rate = parent_rate; + return parent_rate; + } + + /* clock rate = parent rate / (high_div + 1) + (low_div + 1) */ + div_h = (val >> clk->div.high_shift) & bit_mask(clk->div.high_width); + div_h++; + div_l = (val >> clk->div.low_shift) & bit_mask(clk->div.low_width); + div_l++; + + clk->rate = parent_rate / (div_h + div_l); + pr_debug("%s: rate: %lu. parent rate: %lu div_h: %u div_l: %u\n", + __func__, clk->rate, parent_rate, div_h, div_l); + + return clk->rate; +} + +static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned int div; + + if (rate == 0 || *parent_rate == 0) + return -EINVAL; + + if (rate == *parent_rate) + return *parent_rate; + + div = DIV_ROUND_UP(*parent_rate, rate); + if (div < 2) + return *parent_rate; + + return *parent_rate / div; +} + +static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct iproc_asiu_clk *clk = to_asiu_clk(hw); + struct iproc_asiu *asiu = clk->asiu; + unsigned int div, div_h, div_l; + u32 val; + + if (rate == 0 || parent_rate == 0) + return -EINVAL; + + /* simply disable the divisor if one wants the same rate as parent */ + if (rate == parent_rate) { + val = readl(asiu->div_base + clk->div.offset); + val &= ~(1 << clk->div.en_shift); + writel(val, asiu->div_base + clk->div.offset); + return 0; + } + + div = DIV_ROUND_UP(parent_rate, rate); + if (div < 2) + return -EINVAL; + + div_h = div_l = div >> 1; + div_h--; + div_l--; + + val = readl(asiu->div_base + clk->div.offset); + val |= 1 << clk->div.en_shift; + if (div_h) { + val &= ~(bit_mask(clk->div.high_width) + << clk->div.high_shift); + val |= div_h << clk->div.high_shift; + } else { + val &= ~(bit_mask(clk->div.high_width) + << clk->div.high_shift); + } + if (div_l) { + val &= ~(bit_mask(clk->div.low_width) << clk->div.low_shift); + val |= div_l << clk->div.low_shift; + } else { + val &= ~(bit_mask(clk->div.low_width) << clk->div.low_shift); + } + writel(val, asiu->div_base + clk->div.offset); + + return 0; +} + +static const struct clk_ops iproc_asiu_ops = { + .enable = iproc_asiu_clk_enable, + .disable = iproc_asiu_clk_disable, + .recalc_rate = iproc_asiu_clk_recalc_rate, + .round_rate = iproc_asiu_clk_round_rate, + .set_rate = iproc_asiu_clk_set_rate, +}; + +void __init iproc_asiu_setup(struct device_node *node, + const struct iproc_asiu_div *div, + const struct iproc_asiu_gate *gate, + unsigned int num_clks) +{ + int i, ret; + struct iproc_asiu *asiu; + + if (WARN_ON(!gate || !div)) + return; + + asiu = kzalloc(sizeof(*asiu), GFP_KERNEL); + if (WARN_ON(!asiu)) + return; + + asiu->clk_data.clk_num = num_clks; + asiu->clk_data.clks = kcalloc(num_clks, sizeof(*asiu->clk_data.clks), + GFP_KERNEL); + if (WARN_ON(!asiu->clk_data.clks)) + goto err_clks; + + asiu->clks = kcalloc(num_clks, sizeof(*asiu->clks), GFP_KERNEL); + if (WARN_ON(!asiu->clks)) + goto err_asiu_clks; + + asiu->div_base = of_iomap(node, 0); + if (WARN_ON(!asiu->div_base)) + goto err_iomap_div; + + asiu->gate_base = of_iomap(node, 1); + if (WARN_ON(!asiu->gate_base)) + goto err_iomap_gate; + + for (i = 0; i < num_clks; i++) { + struct clk_init_data init; + struct clk *clk; + const char *parent_name; + struct iproc_asiu_clk *asiu_clk; + const char *clk_name; + + clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL); + if (WARN_ON(!clk_name)) + goto err_clk_register; + + ret = of_property_read_string_index(node, "clock-output-names", + i, &clk_name); + if (WARN_ON(ret)) + goto err_clk_register; + + asiu_clk = &asiu->clks[i]; + asiu_clk->name = clk_name; + asiu_clk->asiu = asiu; + asiu_clk->div = div[i]; + asiu_clk->gate = gate[i]; + init.name = clk_name; + init.ops = &iproc_asiu_ops; + init.flags = 0; + parent_name = of_clk_get_parent_name(node, 0); + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + asiu_clk->hw.init = &init; + + clk = clk_register(NULL, &asiu_clk->hw); + if (WARN_ON(IS_ERR(clk))) + goto err_clk_register; + asiu->clk_data.clks[i] = clk; + } + + ret = of_clk_add_provider(node, of_clk_src_onecell_get, + &asiu->clk_data); + if (WARN_ON(ret)) + goto err_clk_register; + + return; + +err_clk_register: + for (i = 0; i < num_clks; i++) + kfree(asiu->clks[i].name); + iounmap(asiu->gate_base); + +err_iomap_gate: + iounmap(asiu->div_base); + +err_iomap_div: + kfree(asiu->clks); + +err_asiu_clks: + kfree(asiu->clk_data.clks); + +err_clks: + kfree(asiu); +} diff --git a/drivers/clk/bcm/clk-iproc-pll.c b/drivers/clk/bcm/clk-iproc-pll.c new file mode 100644 index 000000000000..46fb84bc2674 --- /dev/null +++ b/drivers/clk/bcm/clk-iproc-pll.c @@ -0,0 +1,716 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/clkdev.h> +#include <linux/of_address.h> +#include <linux/delay.h> + +#include "clk-iproc.h" + +#define PLL_VCO_HIGH_SHIFT 19 +#define PLL_VCO_LOW_SHIFT 30 + +/* number of delay loops waiting for PLL to lock */ +#define LOCK_DELAY 100 + +/* number of VCO frequency bands */ +#define NUM_FREQ_BANDS 8 + +#define NUM_KP_BANDS 3 +enum kp_band { + KP_BAND_MID = 0, + KP_BAND_HIGH, + KP_BAND_HIGH_HIGH +}; + +static const unsigned int kp_table[NUM_KP_BANDS][NUM_FREQ_BANDS] = { + { 5, 6, 6, 7, 7, 8, 9, 10 }, + { 4, 4, 5, 5, 6, 7, 8, 9 }, + { 4, 5, 5, 6, 7, 8, 9, 10 }, +}; + +static const unsigned long ref_freq_table[NUM_FREQ_BANDS][2] = { + { 10000000, 12500000 }, + { 12500000, 15000000 }, + { 15000000, 20000000 }, + { 20000000, 25000000 }, + { 25000000, 50000000 }, + { 50000000, 75000000 }, + { 75000000, 100000000 }, + { 100000000, 125000000 }, +}; + +enum vco_freq_range { + VCO_LOW = 700000000U, + VCO_MID = 1200000000U, + VCO_HIGH = 2200000000U, + VCO_HIGH_HIGH = 3100000000U, + VCO_MAX = 4000000000U, +}; + +struct iproc_pll; + +struct iproc_clk { + struct clk_hw hw; + const char *name; + struct iproc_pll *pll; + unsigned long rate; + const struct iproc_clk_ctrl *ctrl; +}; + +struct iproc_pll { + void __iomem *pll_base; + void __iomem *pwr_base; + void __iomem *asiu_base; + + const struct iproc_pll_ctrl *ctrl; + const struct iproc_pll_vco_param *vco_param; + unsigned int num_vco_entries; + + struct clk_onecell_data clk_data; + struct iproc_clk *clks; +}; + +#define to_iproc_clk(hw) container_of(hw, struct iproc_clk, hw) + +/* + * Based on the target frequency, find a match from the VCO frequency parameter + * table and return its index + */ +static int pll_get_rate_index(struct iproc_pll *pll, unsigned int target_rate) +{ + int i; + + for (i = 0; i < pll->num_vco_entries; i++) + if (target_rate == pll->vco_param[i].rate) + break; + + if (i >= pll->num_vco_entries) + return -EINVAL; + + return i; +} + +static int get_kp(unsigned long ref_freq, enum kp_band kp_index) +{ + int i; + + if (ref_freq < ref_freq_table[0][0]) + return -EINVAL; + + for (i = 0; i < NUM_FREQ_BANDS; i++) { + if (ref_freq >= ref_freq_table[i][0] && + ref_freq < ref_freq_table[i][1]) + return kp_table[kp_index][i]; + } + return -EINVAL; +} + +static int pll_wait_for_lock(struct iproc_pll *pll) +{ + int i; + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + + for (i = 0; i < LOCK_DELAY; i++) { + u32 val = readl(pll->pll_base + ctrl->status.offset); + + if (val & (1 << ctrl->status.shift)) + return 0; + udelay(10); + } + + return -EIO; +} + +static void __pll_disable(struct iproc_pll *pll) +{ + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + u32 val; + + if (ctrl->flags & IPROC_CLK_PLL_ASIU) { + val = readl(pll->asiu_base + ctrl->asiu.offset); + val &= ~(1 << ctrl->asiu.en_shift); + writel(val, pll->asiu_base + ctrl->asiu.offset); + } + + /* latch input value so core power can be shut down */ + val = readl(pll->pwr_base + ctrl->aon.offset); + val |= (1 << ctrl->aon.iso_shift); + writel(val, pll->pwr_base + ctrl->aon.offset); + + /* power down the core */ + val &= ~(bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift); + writel(val, pll->pwr_base + ctrl->aon.offset); +} + +static int __pll_enable(struct iproc_pll *pll) +{ + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + u32 val; + + /* power up the PLL and make sure it's not latched */ + val = readl(pll->pwr_base + ctrl->aon.offset); + val |= bit_mask(ctrl->aon.pwr_width) << ctrl->aon.pwr_shift; + val &= ~(1 << ctrl->aon.iso_shift); + writel(val, pll->pwr_base + ctrl->aon.offset); + + /* certain PLLs also need to be ungated from the ASIU top level */ + if (ctrl->flags & IPROC_CLK_PLL_ASIU) { + val = readl(pll->asiu_base + ctrl->asiu.offset); + val |= (1 << ctrl->asiu.en_shift); + writel(val, pll->asiu_base + ctrl->asiu.offset); + } + + return 0; +} + +static void __pll_put_in_reset(struct iproc_pll *pll) +{ + u32 val; + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + const struct iproc_pll_reset_ctrl *reset = &ctrl->reset; + + val = readl(pll->pll_base + reset->offset); + val &= ~(1 << reset->reset_shift | 1 << reset->p_reset_shift); + writel(val, pll->pll_base + reset->offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + reset->offset); +} + +static void __pll_bring_out_reset(struct iproc_pll *pll, unsigned int kp, + unsigned int ka, unsigned int ki) +{ + u32 val; + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + const struct iproc_pll_reset_ctrl *reset = &ctrl->reset; + + val = readl(pll->pll_base + reset->offset); + val &= ~(bit_mask(reset->ki_width) << reset->ki_shift | + bit_mask(reset->kp_width) << reset->kp_shift | + bit_mask(reset->ka_width) << reset->ka_shift); + val |= ki << reset->ki_shift | kp << reset->kp_shift | + ka << reset->ka_shift; + val |= 1 << reset->reset_shift | 1 << reset->p_reset_shift; + writel(val, pll->pll_base + reset->offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + reset->offset); +} + +static int pll_set_rate(struct iproc_clk *clk, unsigned int rate_index, + unsigned long parent_rate) +{ + struct iproc_pll *pll = clk->pll; + const struct iproc_pll_vco_param *vco = &pll->vco_param[rate_index]; + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + int ka = 0, ki, kp, ret; + unsigned long rate = vco->rate; + u32 val; + enum kp_band kp_index; + unsigned long ref_freq; + + /* + * reference frequency = parent frequency / PDIV + * If PDIV = 0, then it becomes a multiplier (x2) + */ + if (vco->pdiv == 0) + ref_freq = parent_rate * 2; + else + ref_freq = parent_rate / vco->pdiv; + + /* determine Ki and Kp index based on target VCO frequency */ + if (rate >= VCO_LOW && rate < VCO_HIGH) { + ki = 4; + kp_index = KP_BAND_MID; + } else if (rate >= VCO_HIGH && rate && rate < VCO_HIGH_HIGH) { + ki = 3; + kp_index = KP_BAND_HIGH; + } else if (rate >= VCO_HIGH_HIGH && rate < VCO_MAX) { + ki = 3; + kp_index = KP_BAND_HIGH_HIGH; + } else { + pr_err("%s: pll: %s has invalid rate: %lu\n", __func__, + clk->name, rate); + return -EINVAL; + } + + kp = get_kp(ref_freq, kp_index); + if (kp < 0) { + pr_err("%s: pll: %s has invalid kp\n", __func__, clk->name); + return kp; + } + + ret = __pll_enable(pll); + if (ret) { + pr_err("%s: pll: %s fails to enable\n", __func__, clk->name); + return ret; + } + + /* put PLL in reset */ + __pll_put_in_reset(pll); + + writel(0, pll->pll_base + ctrl->vco_ctrl.u_offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->vco_ctrl.u_offset); + val = readl(pll->pll_base + ctrl->vco_ctrl.l_offset); + + if (rate >= VCO_LOW && rate < VCO_MID) + val |= (1 << PLL_VCO_LOW_SHIFT); + + if (rate < VCO_HIGH) + val &= ~(1 << PLL_VCO_HIGH_SHIFT); + else + val |= (1 << PLL_VCO_HIGH_SHIFT); + + writel(val, pll->pll_base + ctrl->vco_ctrl.l_offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->vco_ctrl.l_offset); + + /* program integer part of NDIV */ + val = readl(pll->pll_base + ctrl->ndiv_int.offset); + val &= ~(bit_mask(ctrl->ndiv_int.width) << ctrl->ndiv_int.shift); + val |= vco->ndiv_int << ctrl->ndiv_int.shift; + writel(val, pll->pll_base + ctrl->ndiv_int.offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->ndiv_int.offset); + + /* program fractional part of NDIV */ + if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { + val = readl(pll->pll_base + ctrl->ndiv_frac.offset); + val &= ~(bit_mask(ctrl->ndiv_frac.width) << + ctrl->ndiv_frac.shift); + val |= vco->ndiv_frac << ctrl->ndiv_frac.shift; + writel(val, pll->pll_base + ctrl->ndiv_frac.offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->ndiv_frac.offset); + } + + /* program PDIV */ + val = readl(pll->pll_base + ctrl->pdiv.offset); + val &= ~(bit_mask(ctrl->pdiv.width) << ctrl->pdiv.shift); + val |= vco->pdiv << ctrl->pdiv.shift; + writel(val, pll->pll_base + ctrl->pdiv.offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->pdiv.offset); + + __pll_bring_out_reset(pll, kp, ka, ki); + + ret = pll_wait_for_lock(pll); + if (ret < 0) { + pr_err("%s: pll: %s failed to lock\n", __func__, clk->name); + return ret; + } + + return 0; +} + +static int iproc_pll_enable(struct clk_hw *hw) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + struct iproc_pll *pll = clk->pll; + + return __pll_enable(pll); +} + +static void iproc_pll_disable(struct clk_hw *hw) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + struct iproc_pll *pll = clk->pll; + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + + if (ctrl->flags & IPROC_CLK_AON) + return; + + __pll_disable(pll); +} + +static unsigned long iproc_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + struct iproc_pll *pll = clk->pll; + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + u32 val; + u64 ndiv; + unsigned int ndiv_int, ndiv_frac, pdiv; + + if (parent_rate == 0) + return 0; + + /* PLL needs to be locked */ + val = readl(pll->pll_base + ctrl->status.offset); + if ((val & (1 << ctrl->status.shift)) == 0) { + clk->rate = 0; + return 0; + } + + /* + * PLL output frequency = + * + * ((ndiv_int + ndiv_frac / 2^20) * (parent clock rate / pdiv) + */ + val = readl(pll->pll_base + ctrl->ndiv_int.offset); + ndiv_int = (val >> ctrl->ndiv_int.shift) & + bit_mask(ctrl->ndiv_int.width); + ndiv = ndiv_int << ctrl->ndiv_int.shift; + + if (ctrl->flags & IPROC_CLK_PLL_HAS_NDIV_FRAC) { + val = readl(pll->pll_base + ctrl->ndiv_frac.offset); + ndiv_frac = (val >> ctrl->ndiv_frac.shift) & + bit_mask(ctrl->ndiv_frac.width); + + if (ndiv_frac != 0) + ndiv = (ndiv_int << ctrl->ndiv_int.shift) | ndiv_frac; + } + + val = readl(pll->pll_base + ctrl->pdiv.offset); + pdiv = (val >> ctrl->pdiv.shift) & bit_mask(ctrl->pdiv.width); + + clk->rate = (ndiv * parent_rate) >> ctrl->ndiv_int.shift; + + if (pdiv == 0) + clk->rate *= 2; + else + clk->rate /= pdiv; + + return clk->rate; +} + +static long iproc_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned i; + struct iproc_clk *clk = to_iproc_clk(hw); + struct iproc_pll *pll = clk->pll; + + if (rate == 0 || *parent_rate == 0 || !pll->vco_param) + return -EINVAL; + + for (i = 0; i < pll->num_vco_entries; i++) { + if (rate <= pll->vco_param[i].rate) + break; + } + + if (i == pll->num_vco_entries) + i--; + + return pll->vco_param[i].rate; +} + +static int iproc_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + struct iproc_pll *pll = clk->pll; + int rate_index, ret; + + rate_index = pll_get_rate_index(pll, rate); + if (rate_index < 0) + return rate_index; + + ret = pll_set_rate(clk, rate_index, parent_rate); + return ret; +} + +static const struct clk_ops iproc_pll_ops = { + .enable = iproc_pll_enable, + .disable = iproc_pll_disable, + .recalc_rate = iproc_pll_recalc_rate, + .round_rate = iproc_pll_round_rate, + .set_rate = iproc_pll_set_rate, +}; + +static int iproc_clk_enable(struct clk_hw *hw) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + const struct iproc_clk_ctrl *ctrl = clk->ctrl; + struct iproc_pll *pll = clk->pll; + u32 val; + + /* channel enable is active low */ + val = readl(pll->pll_base + ctrl->enable.offset); + val &= ~(1 << ctrl->enable.enable_shift); + writel(val, pll->pll_base + ctrl->enable.offset); + + /* also make sure channel is not held */ + val = readl(pll->pll_base + ctrl->enable.offset); + val &= ~(1 << ctrl->enable.hold_shift); + writel(val, pll->pll_base + ctrl->enable.offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->enable.offset); + + return 0; +} + +static void iproc_clk_disable(struct clk_hw *hw) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + const struct iproc_clk_ctrl *ctrl = clk->ctrl; + struct iproc_pll *pll = clk->pll; + u32 val; + + if (ctrl->flags & IPROC_CLK_AON) + return; + + val = readl(pll->pll_base + ctrl->enable.offset); + val |= 1 << ctrl->enable.enable_shift; + writel(val, pll->pll_base + ctrl->enable.offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->enable.offset); +} + +static unsigned long iproc_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + const struct iproc_clk_ctrl *ctrl = clk->ctrl; + struct iproc_pll *pll = clk->pll; + u32 val; + unsigned int mdiv; + + if (parent_rate == 0) + return 0; + + val = readl(pll->pll_base + ctrl->mdiv.offset); + mdiv = (val >> ctrl->mdiv.shift) & bit_mask(ctrl->mdiv.width); + if (mdiv == 0) + mdiv = 256; + + clk->rate = parent_rate / mdiv; + + return clk->rate; +} + +static long iproc_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned int div; + + if (rate == 0 || *parent_rate == 0) + return -EINVAL; + + if (rate == *parent_rate) + return *parent_rate; + + div = DIV_ROUND_UP(*parent_rate, rate); + if (div < 2) + return *parent_rate; + + if (div > 256) + div = 256; + + return *parent_rate / div; +} + +static int iproc_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct iproc_clk *clk = to_iproc_clk(hw); + const struct iproc_clk_ctrl *ctrl = clk->ctrl; + struct iproc_pll *pll = clk->pll; + u32 val; + unsigned int div; + + if (rate == 0 || parent_rate == 0) + return -EINVAL; + + div = DIV_ROUND_UP(parent_rate, rate); + if (div > 256) + return -EINVAL; + + val = readl(pll->pll_base + ctrl->mdiv.offset); + if (div == 256) { + val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift); + } else { + val &= ~(bit_mask(ctrl->mdiv.width) << ctrl->mdiv.shift); + val |= div << ctrl->mdiv.shift; + } + writel(val, pll->pll_base + ctrl->mdiv.offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->mdiv.offset); + clk->rate = parent_rate / div; + + return 0; +} + +static const struct clk_ops iproc_clk_ops = { + .enable = iproc_clk_enable, + .disable = iproc_clk_disable, + .recalc_rate = iproc_clk_recalc_rate, + .round_rate = iproc_clk_round_rate, + .set_rate = iproc_clk_set_rate, +}; + +/** + * Some PLLs require the PLL SW override bit to be set before changes can be + * applied to the PLL + */ +static void iproc_pll_sw_cfg(struct iproc_pll *pll) +{ + const struct iproc_pll_ctrl *ctrl = pll->ctrl; + + if (ctrl->flags & IPROC_CLK_PLL_NEEDS_SW_CFG) { + u32 val; + + val = readl(pll->pll_base + ctrl->sw_ctrl.offset); + val |= BIT(ctrl->sw_ctrl.shift); + writel(val, pll->pll_base + ctrl->sw_ctrl.offset); + if (unlikely(ctrl->flags & IPROC_CLK_NEEDS_READ_BACK)) + readl(pll->pll_base + ctrl->sw_ctrl.offset); + } +} + +void __init iproc_pll_clk_setup(struct device_node *node, + const struct iproc_pll_ctrl *pll_ctrl, + const struct iproc_pll_vco_param *vco, + unsigned int num_vco_entries, + const struct iproc_clk_ctrl *clk_ctrl, + unsigned int num_clks) +{ + int i, ret; + struct clk *clk; + struct iproc_pll *pll; + struct iproc_clk *iclk; + struct clk_init_data init; + const char *parent_name; + + if (WARN_ON(!pll_ctrl) || WARN_ON(!clk_ctrl)) + return; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (WARN_ON(!pll)) + return; + + pll->clk_data.clk_num = num_clks; + pll->clk_data.clks = kcalloc(num_clks, sizeof(*pll->clk_data.clks), + GFP_KERNEL); + if (WARN_ON(!pll->clk_data.clks)) + goto err_clk_data; + + pll->clks = kcalloc(num_clks, sizeof(*pll->clks), GFP_KERNEL); + if (WARN_ON(!pll->clks)) + goto err_clks; + + pll->pll_base = of_iomap(node, 0); + if (WARN_ON(!pll->pll_base)) + goto err_pll_iomap; + + pll->pwr_base = of_iomap(node, 1); + if (WARN_ON(!pll->pwr_base)) + goto err_pwr_iomap; + + /* some PLLs require gating control at the top ASIU level */ + if (pll_ctrl->flags & IPROC_CLK_PLL_ASIU) { + pll->asiu_base = of_iomap(node, 2); + if (WARN_ON(!pll->asiu_base)) + goto err_asiu_iomap; + } + + /* initialize and register the PLL itself */ + pll->ctrl = pll_ctrl; + + iclk = &pll->clks[0]; + iclk->pll = pll; + iclk->name = node->name; + + init.name = node->name; + init.ops = &iproc_pll_ops; + init.flags = 0; + parent_name = of_clk_get_parent_name(node, 0); + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + iclk->hw.init = &init; + + if (vco) { + pll->num_vco_entries = num_vco_entries; + pll->vco_param = vco; + } + + iproc_pll_sw_cfg(pll); + + clk = clk_register(NULL, &iclk->hw); + if (WARN_ON(IS_ERR(clk))) + goto err_pll_register; + + pll->clk_data.clks[0] = clk; + + /* now initialize and register all leaf clocks */ + for (i = 1; i < num_clks; i++) { + const char *clk_name; + + memset(&init, 0, sizeof(init)); + parent_name = node->name; + + clk_name = kzalloc(IPROC_CLK_NAME_LEN, GFP_KERNEL); + if (WARN_ON(!clk_name)) + goto err_clk_register; + + ret = of_property_read_string_index(node, "clock-output-names", + i, &clk_name); + if (WARN_ON(ret)) + goto err_clk_register; + + iclk = &pll->clks[i]; + iclk->name = clk_name; + iclk->pll = pll; + iclk->ctrl = &clk_ctrl[i]; + + init.name = clk_name; + init.ops = &iproc_clk_ops; + init.flags = 0; + init.parent_names = (parent_name ? &parent_name : NULL); + init.num_parents = (parent_name ? 1 : 0); + iclk->hw.init = &init; + + clk = clk_register(NULL, &iclk->hw); + if (WARN_ON(IS_ERR(clk))) + goto err_clk_register; + + pll->clk_data.clks[i] = clk; + } + + ret = of_clk_add_provider(node, of_clk_src_onecell_get, &pll->clk_data); + if (WARN_ON(ret)) + goto err_clk_register; + + return; + +err_clk_register: + for (i = 0; i < num_clks; i++) { + kfree(pll->clks[i].name); + clk_unregister(pll->clk_data.clks[i]); + } + +err_pll_register: + if (pll->asiu_base) + iounmap(pll->asiu_base); + +err_asiu_iomap: + iounmap(pll->pwr_base); + +err_pwr_iomap: + iounmap(pll->pll_base); + +err_pll_iomap: + kfree(pll->clks); + +err_clks: + kfree(pll->clk_data.clks); + +err_clk_data: + kfree(pll); +} diff --git a/drivers/clk/bcm/clk-iproc.h b/drivers/clk/bcm/clk-iproc.h new file mode 100644 index 000000000000..d834b7abd5c6 --- /dev/null +++ b/drivers/clk/bcm/clk-iproc.h @@ -0,0 +1,178 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CLK_IPROC_H +#define _CLK_IPROC_H + +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/device.h> +#include <linux/of.h> +#include <linux/clk-provider.h> + +#define IPROC_CLK_NAME_LEN 25 +#define IPROC_CLK_INVALID_OFFSET 0xffffffff +#define bit_mask(width) ((1 << (width)) - 1) + +/* clocks that should not be disabled at runtime */ +#define IPROC_CLK_AON BIT(0) + +/* PLL that requires gating through ASIU */ +#define IPROC_CLK_PLL_ASIU BIT(1) + +/* PLL that has fractional part of the NDIV */ +#define IPROC_CLK_PLL_HAS_NDIV_FRAC BIT(2) + +/* + * Some of the iProc PLL/clocks may have an ASIC bug that requires read back + * of the same register following the write to flush the write transaction into + * the intended register + */ +#define IPROC_CLK_NEEDS_READ_BACK BIT(3) + +/* + * Some PLLs require the PLL SW override bit to be set before changes can be + * applied to the PLL + */ +#define IPROC_CLK_PLL_NEEDS_SW_CFG BIT(4) + +/* + * Parameters for VCO frequency configuration + * + * VCO frequency = + * ((ndiv_int + ndiv_frac / 2^20) * (ref freqeuncy / pdiv) + */ +struct iproc_pll_vco_param { + unsigned long rate; + unsigned int ndiv_int; + unsigned int ndiv_frac; + unsigned int pdiv; +}; + +struct iproc_clk_reg_op { + unsigned int offset; + unsigned int shift; + unsigned int width; +}; + +/* + * Clock gating control at the top ASIU level + */ +struct iproc_asiu_gate { + unsigned int offset; + unsigned int en_shift; +}; + +/* + * Control of powering on/off of a PLL + * + * Before powering off a PLL, input isolation (ISO) needs to be enabled + */ +struct iproc_pll_aon_pwr_ctrl { + unsigned int offset; + unsigned int pwr_width; + unsigned int pwr_shift; + unsigned int iso_shift; +}; + +/* + * Control of the PLL reset, with Ki, Kp, and Ka parameters + */ +struct iproc_pll_reset_ctrl { + unsigned int offset; + unsigned int reset_shift; + unsigned int p_reset_shift; + unsigned int ki_shift; + unsigned int ki_width; + unsigned int kp_shift; + unsigned int kp_width; + unsigned int ka_shift; + unsigned int ka_width; +}; + +/* + * To enable SW control of the PLL + */ +struct iproc_pll_sw_ctrl { + unsigned int offset; + unsigned int shift; +}; + +struct iproc_pll_vco_ctrl { + unsigned int u_offset; + unsigned int l_offset; +}; + +/* + * Main PLL control parameters + */ +struct iproc_pll_ctrl { + unsigned long flags; + struct iproc_pll_aon_pwr_ctrl aon; + struct iproc_asiu_gate asiu; + struct iproc_pll_reset_ctrl reset; + struct iproc_pll_sw_ctrl sw_ctrl; + struct iproc_clk_reg_op ndiv_int; + struct iproc_clk_reg_op ndiv_frac; + struct iproc_clk_reg_op pdiv; + struct iproc_pll_vco_ctrl vco_ctrl; + struct iproc_clk_reg_op status; +}; + +/* + * Controls enabling/disabling a PLL derived clock + */ +struct iproc_clk_enable_ctrl { + unsigned int offset; + unsigned int enable_shift; + unsigned int hold_shift; + unsigned int bypass_shift; +}; + +/* + * Main clock control parameters for clocks derived from the PLLs + */ +struct iproc_clk_ctrl { + unsigned int channel; + unsigned long flags; + struct iproc_clk_enable_ctrl enable; + struct iproc_clk_reg_op mdiv; +}; + +/* + * Divisor of the ASIU clocks + */ +struct iproc_asiu_div { + unsigned int offset; + unsigned int en_shift; + unsigned int high_shift; + unsigned int high_width; + unsigned int low_shift; + unsigned int low_width; +}; + +void __init iproc_armpll_setup(struct device_node *node); +void __init iproc_pll_clk_setup(struct device_node *node, + const struct iproc_pll_ctrl *pll_ctrl, + const struct iproc_pll_vco_param *vco, + unsigned int num_vco_entries, + const struct iproc_clk_ctrl *clk_ctrl, + unsigned int num_clks); +void __init iproc_asiu_setup(struct device_node *node, + const struct iproc_asiu_div *div, + const struct iproc_asiu_gate *gate, + unsigned int num_clks); + +#endif /* _CLK_IPROC_H */ diff --git a/drivers/clk/bcm/clk-kona-setup.c b/drivers/clk/bcm/clk-kona-setup.c index e5aededdd322..deaa7f962b84 100644 --- a/drivers/clk/bcm/clk-kona-setup.c +++ b/drivers/clk/bcm/clk-kona-setup.c @@ -21,8 +21,6 @@ #define selector_clear_exists(sel) ((sel)->width = 0) #define trigger_clear_exists(trig) FLAG_CLEAR(trig, TRIG, EXISTS) -LIST_HEAD(ccu_list); /* The list of set up CCUs */ - /* Validity checking */ static bool ccu_data_offsets_valid(struct ccu_data *ccu) @@ -773,7 +771,6 @@ static void kona_ccu_teardown(struct ccu_data *ccu) of_clk_del_provider(ccu->node); /* safe if never added */ ccu_clks_teardown(ccu); - list_del(&ccu->links); of_node_put(ccu->node); ccu->node = NULL; iounmap(ccu->base); @@ -847,7 +844,6 @@ void __init kona_dt_ccu_setup(struct ccu_data *ccu, goto out_err; } ccu->node = of_node_get(node); - list_add_tail(&ccu->links, &ccu_list); /* * Set up each defined kona clock and save the result in diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c index a0ef4f75d457..79a98506c433 100644 --- a/drivers/clk/bcm/clk-kona.c +++ b/drivers/clk/bcm/clk-kona.c @@ -1240,7 +1240,7 @@ static bool __kona_clk_init(struct kona_clk *bcm_clk) default: BUG(); } - return -EINVAL; + return false; } /* Set a CCU and all its clocks into their desired initial state */ diff --git a/drivers/clk/bcm/clk-kona.h b/drivers/clk/bcm/clk-kona.h index 6849a64baf6d..906576ec97b6 100644 --- a/drivers/clk/bcm/clk-kona.h +++ b/drivers/clk/bcm/clk-kona.h @@ -480,7 +480,6 @@ struct ccu_data { spinlock_t lock; /* serialization lock */ bool write_enabled; /* write access is currently enabled */ struct ccu_policy policy; - struct list_head links; /* for ccu_list */ struct device_node *node; struct clk_onecell_data clk_data; const char *name; @@ -492,7 +491,6 @@ struct ccu_data { #define KONA_CCU_COMMON(_prefix, _name, _ccuname) \ .name = #_name "_ccu", \ .lock = __SPIN_LOCK_UNLOCKED(_name ## _ccu_data.lock), \ - .links = LIST_HEAD_INIT(_name ## _ccu_data.links), \ .clk_data = { \ .clk_num = _prefix ## _ ## _ccuname ## _CCU_CLOCK_COUNT, \ } diff --git a/drivers/clk/berlin/berlin2-pll.c b/drivers/clk/berlin/berlin2-pll.c index bdc506b03824..f4b8d324b083 100644 --- a/drivers/clk/berlin/berlin2-pll.c +++ b/drivers/clk/berlin/berlin2-pll.c @@ -25,14 +25,7 @@ #include <asm/div64.h> #include "berlin2-div.h" - -struct berlin2_pll_map { - const u8 vcodiv[16]; - u8 mult; - u8 fbdiv_shift; - u8 rfdiv_shift; - u8 divsel_shift; -}; +#include "berlin2-pll.h" struct berlin2_pll { struct clk_hw hw; diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c index 88f4ff6916fe..90897af8d9f7 100644 --- a/drivers/clk/clk-asm9260.c +++ b/drivers/clk/clk-asm9260.c @@ -274,7 +274,7 @@ static void __init asm9260_acc_init(struct device_node *np) u32 accuracy = 0; base = of_io_request_and_map(np, 0, np->name); - if (!base) + if (IS_ERR(base)) panic("%s: unable to map resource", np->name); /* register pll */ diff --git a/drivers/clk/clk-axm5516.c b/drivers/clk/clk-axm5516.c index 0f6368ceec4c..c7c91a5ecf8b 100644 --- a/drivers/clk/clk-axm5516.c +++ b/drivers/clk/clk-axm5516.c @@ -556,7 +556,7 @@ static int axmclk_probe(struct platform_device *pdev) return PTR_ERR(regmap); num_clks = ARRAY_SIZE(axmclk_clocks); - pr_info("axmclk: supporting %u clocks\n", num_clks); + pr_info("axmclk: supporting %zu clocks\n", num_clks); priv = devm_kzalloc(dev, sizeof(*priv) + sizeof(*priv->clks) * num_clks, GFP_KERNEL); if (!priv) diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c index b8e4f8a822e9..f01164fada5d 100644 --- a/drivers/clk/clk-cdce706.c +++ b/drivers/clk/clk-cdce706.c @@ -94,7 +94,7 @@ static const char * const cdce706_source_name[] = { "clk_in0", "clk_in1", }; -static const char *cdce706_clkin_name[] = { +static const char * const cdce706_clkin_name[] = { "clk_in", }; @@ -102,7 +102,7 @@ static const char * const cdce706_pll_name[] = { "pll1", "pll2", "pll3", }; -static const char *cdce706_divider_parent_name[] = { +static const char * const cdce706_divider_parent_name[] = { "clk_in", "pll1", "pll2", "pll2", "pll3", }; @@ -666,6 +666,7 @@ static int cdce706_probe(struct i2c_client *client, static int cdce706_remove(struct i2c_client *client) { + of_clk_del_provider(client->dev.of_node); return 0; } diff --git a/drivers/clk/clk-cdce925.c b/drivers/clk/clk-cdce925.c new file mode 100644 index 000000000000..85fafb41e6ca --- /dev/null +++ b/drivers/clk/clk-cdce925.c @@ -0,0 +1,749 @@ +/* + * Driver for TI Dual PLL CDCE925 clock synthesizer + * + * This driver always connects the Y1 to the input clock, Y2/Y3 to PLL1 + * and Y4/Y5 to PLL2. PLL frequency is set on a first-come-first-serve + * basis. Clients can directly request any frequency that the chip can + * deliver using the standard clk framework. In addition, the device can + * be configured and activated via the devicetree. + * + * Copyright (C) 2014, Topic Embedded Products + * Licenced under GPL + */ +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/gcd.h> + +/* The chip has 2 PLLs which can be routed through dividers to 5 outputs. + * Model this as 2 PLL clocks which are parents to the outputs. + */ +#define NUMBER_OF_PLLS 2 +#define NUMBER_OF_OUTPUTS 5 + +#define CDCE925_REG_GLOBAL1 0x01 +#define CDCE925_REG_Y1SPIPDIVH 0x02 +#define CDCE925_REG_PDIVL 0x03 +#define CDCE925_REG_XCSEL 0x05 +/* PLL parameters start at 0x10, steps of 0x10 */ +#define CDCE925_OFFSET_PLL 0x10 +/* Add CDCE925_OFFSET_PLL * (pll) to these registers before sending */ +#define CDCE925_PLL_MUX_OUTPUTS 0x14 +#define CDCE925_PLL_MULDIV 0x18 + +#define CDCE925_PLL_FREQUENCY_MIN 80000000ul +#define CDCE925_PLL_FREQUENCY_MAX 230000000ul +struct clk_cdce925_chip; + +struct clk_cdce925_output { + struct clk_hw hw; + struct clk_cdce925_chip *chip; + u8 index; + u16 pdiv; /* 1..127 for Y2-Y5; 1..1023 for Y1 */ +}; +#define to_clk_cdce925_output(_hw) \ + container_of(_hw, struct clk_cdce925_output, hw) + +struct clk_cdce925_pll { + struct clk_hw hw; + struct clk_cdce925_chip *chip; + u8 index; + u16 m; /* 1..511 */ + u16 n; /* 1..4095 */ +}; +#define to_clk_cdce925_pll(_hw) container_of(_hw, struct clk_cdce925_pll, hw) + +struct clk_cdce925_chip { + struct regmap *regmap; + struct i2c_client *i2c_client; + struct clk_cdce925_pll pll[NUMBER_OF_PLLS]; + struct clk_cdce925_output clk[NUMBER_OF_OUTPUTS]; + struct clk *dt_clk[NUMBER_OF_OUTPUTS]; + struct clk_onecell_data onecell; +}; + +/* ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** ** */ + +static unsigned long cdce925_pll_calculate_rate(unsigned long parent_rate, + u16 n, u16 m) +{ + if ((!m || !n) || (m == n)) + return parent_rate; /* In bypass mode runs at same frequency */ + return mult_frac(parent_rate, (unsigned long)n, (unsigned long)m); +} + +static unsigned long cdce925_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + /* Output frequency of PLL is Fout = (Fin/Pdiv)*(N/M) */ + struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw); + + return cdce925_pll_calculate_rate(parent_rate, data->n, data->m); +} + +static void cdce925_pll_find_rate(unsigned long rate, + unsigned long parent_rate, u16 *n, u16 *m) +{ + unsigned long un; + unsigned long um; + unsigned long g; + + if (rate <= parent_rate) { + /* Can always deliver parent_rate in bypass mode */ + rate = parent_rate; + *n = 0; + *m = 0; + } else { + /* In PLL mode, need to apply min/max range */ + if (rate < CDCE925_PLL_FREQUENCY_MIN) + rate = CDCE925_PLL_FREQUENCY_MIN; + else if (rate > CDCE925_PLL_FREQUENCY_MAX) + rate = CDCE925_PLL_FREQUENCY_MAX; + + g = gcd(rate, parent_rate); + um = parent_rate / g; + un = rate / g; + /* When outside hw range, reduce to fit (rounding errors) */ + while ((un > 4095) || (um > 511)) { + un >>= 1; + um >>= 1; + } + if (un == 0) + un = 1; + if (um == 0) + um = 1; + + *n = un; + *m = um; + } +} + +static long cdce925_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + u16 n, m; + + cdce925_pll_find_rate(rate, *parent_rate, &n, &m); + return (long)cdce925_pll_calculate_rate(*parent_rate, n, m); +} + +static int cdce925_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw); + + if (!rate || (rate == parent_rate)) { + data->m = 0; /* Bypass mode */ + data->n = 0; + return 0; + } + + if ((rate < CDCE925_PLL_FREQUENCY_MIN) || + (rate > CDCE925_PLL_FREQUENCY_MAX)) { + pr_debug("%s: rate %lu outside PLL range.\n", __func__, rate); + return -EINVAL; + } + + if (rate < parent_rate) { + pr_debug("%s: rate %lu less than parent rate %lu.\n", __func__, + rate, parent_rate); + return -EINVAL; + } + + cdce925_pll_find_rate(rate, parent_rate, &data->n, &data->m); + return 0; +} + + +/* calculate p = max(0, 4 - int(log2 (n/m))) */ +static u8 cdce925_pll_calc_p(u16 n, u16 m) +{ + u8 p; + u16 r = n / m; + + if (r >= 16) + return 0; + p = 4; + while (r > 1) { + r >>= 1; + --p; + } + return p; +} + +/* Returns VCO range bits for VCO1_0_RANGE */ +static u8 cdce925_pll_calc_range_bits(struct clk_hw *hw, u16 n, u16 m) +{ + struct clk *parent = clk_get_parent(hw->clk); + unsigned long rate = clk_get_rate(parent); + + rate = mult_frac(rate, (unsigned long)n, (unsigned long)m); + if (rate >= 175000000) + return 0x3; + if (rate >= 150000000) + return 0x02; + if (rate >= 125000000) + return 0x01; + return 0x00; +} + +/* I2C clock, hence everything must happen in (un)prepare because this + * may sleep */ +static int cdce925_pll_prepare(struct clk_hw *hw) +{ + struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw); + u16 n = data->n; + u16 m = data->m; + u16 r; + u8 q; + u8 p; + u16 nn; + u8 pll[4]; /* Bits are spread out over 4 byte registers */ + u8 reg_ofs = data->index * CDCE925_OFFSET_PLL; + unsigned i; + + if ((!m || !n) || (m == n)) { + /* Set PLL mux to bypass mode, leave the rest as is */ + regmap_update_bits(data->chip->regmap, + reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x80); + } else { + /* According to data sheet: */ + /* p = max(0, 4 - int(log2 (n/m))) */ + p = cdce925_pll_calc_p(n, m); + /* nn = n * 2^p */ + nn = n * BIT(p); + /* q = int(nn/m) */ + q = nn / m; + if ((q < 16) || (1 > 64)) { + pr_debug("%s invalid q=%d\n", __func__, q); + return -EINVAL; + } + r = nn - (m*q); + if (r > 511) { + pr_debug("%s invalid r=%d\n", __func__, r); + return -EINVAL; + } + pr_debug("%s n=%d m=%d p=%d q=%d r=%d\n", __func__, + n, m, p, q, r); + /* encode into register bits */ + pll[0] = n >> 4; + pll[1] = ((n & 0x0F) << 4) | ((r >> 5) & 0x0F); + pll[2] = ((r & 0x1F) << 3) | ((q >> 3) & 0x07); + pll[3] = ((q & 0x07) << 5) | (p << 2) | + cdce925_pll_calc_range_bits(hw, n, m); + /* Write to registers */ + for (i = 0; i < ARRAY_SIZE(pll); ++i) + regmap_write(data->chip->regmap, + reg_ofs + CDCE925_PLL_MULDIV + i, pll[i]); + /* Enable PLL */ + regmap_update_bits(data->chip->regmap, + reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x00); + } + + return 0; +} + +static void cdce925_pll_unprepare(struct clk_hw *hw) +{ + struct clk_cdce925_pll *data = to_clk_cdce925_pll(hw); + u8 reg_ofs = data->index * CDCE925_OFFSET_PLL; + + regmap_update_bits(data->chip->regmap, + reg_ofs + CDCE925_PLL_MUX_OUTPUTS, 0x80, 0x80); +} + +static const struct clk_ops cdce925_pll_ops = { + .prepare = cdce925_pll_prepare, + .unprepare = cdce925_pll_unprepare, + .recalc_rate = cdce925_pll_recalc_rate, + .round_rate = cdce925_pll_round_rate, + .set_rate = cdce925_pll_set_rate, +}; + + +static void cdce925_clk_set_pdiv(struct clk_cdce925_output *data, u16 pdiv) +{ + switch (data->index) { + case 0: + regmap_update_bits(data->chip->regmap, + CDCE925_REG_Y1SPIPDIVH, + 0x03, (pdiv >> 8) & 0x03); + regmap_write(data->chip->regmap, 0x03, pdiv & 0xFF); + break; + case 1: + regmap_update_bits(data->chip->regmap, 0x16, 0x7F, pdiv); + break; + case 2: + regmap_update_bits(data->chip->regmap, 0x17, 0x7F, pdiv); + break; + case 3: + regmap_update_bits(data->chip->regmap, 0x26, 0x7F, pdiv); + break; + case 4: + regmap_update_bits(data->chip->regmap, 0x27, 0x7F, pdiv); + break; + } +} + +static void cdce925_clk_activate(struct clk_cdce925_output *data) +{ + switch (data->index) { + case 0: + regmap_update_bits(data->chip->regmap, + CDCE925_REG_Y1SPIPDIVH, 0x0c, 0x0c); + break; + case 1: + case 2: + regmap_update_bits(data->chip->regmap, 0x14, 0x03, 0x03); + break; + case 3: + case 4: + regmap_update_bits(data->chip->regmap, 0x24, 0x03, 0x03); + break; + } +} + +static int cdce925_clk_prepare(struct clk_hw *hw) +{ + struct clk_cdce925_output *data = to_clk_cdce925_output(hw); + + cdce925_clk_set_pdiv(data, data->pdiv); + cdce925_clk_activate(data); + return 0; +} + +static void cdce925_clk_unprepare(struct clk_hw *hw) +{ + struct clk_cdce925_output *data = to_clk_cdce925_output(hw); + + /* Disable clock by setting divider to "0" */ + cdce925_clk_set_pdiv(data, 0); +} + +static unsigned long cdce925_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_cdce925_output *data = to_clk_cdce925_output(hw); + + if (data->pdiv) + return parent_rate / data->pdiv; + return 0; +} + +static u16 cdce925_calc_divider(unsigned long rate, + unsigned long parent_rate) +{ + unsigned long divider; + + if (!rate) + return 0; + if (rate >= parent_rate) + return 1; + + divider = DIV_ROUND_CLOSEST(parent_rate, rate); + if (divider > 0x7F) + divider = 0x7F; + + return (u16)divider; +} + +static unsigned long cdce925_clk_best_parent_rate( + struct clk_hw *hw, unsigned long rate) +{ + struct clk *pll = clk_get_parent(hw->clk); + struct clk *root = clk_get_parent(pll); + unsigned long root_rate = clk_get_rate(root); + unsigned long best_rate_error = rate; + u16 pdiv_min; + u16 pdiv_max; + u16 pdiv_best; + u16 pdiv_now; + + if (root_rate % rate == 0) + return root_rate; /* Don't need the PLL, use bypass */ + + pdiv_min = (u16)max(1ul, DIV_ROUND_UP(CDCE925_PLL_FREQUENCY_MIN, rate)); + pdiv_max = (u16)min(127ul, CDCE925_PLL_FREQUENCY_MAX / rate); + + if (pdiv_min > pdiv_max) + return 0; /* No can do? */ + + pdiv_best = pdiv_min; + for (pdiv_now = pdiv_min; pdiv_now < pdiv_max; ++pdiv_now) { + unsigned long target_rate = rate * pdiv_now; + long pll_rate = clk_round_rate(pll, target_rate); + unsigned long actual_rate; + unsigned long rate_error; + + if (pll_rate <= 0) + continue; + actual_rate = pll_rate / pdiv_now; + rate_error = abs((long)actual_rate - (long)rate); + if (rate_error < best_rate_error) { + pdiv_best = pdiv_now; + best_rate_error = rate_error; + } + /* TODO: Consider PLL frequency based on smaller n/m values + * and pick the better one if the error is equal */ + } + + return rate * pdiv_best; +} + +static long cdce925_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long l_parent_rate = *parent_rate; + u16 divider = cdce925_calc_divider(rate, l_parent_rate); + + if (l_parent_rate / divider != rate) { + l_parent_rate = cdce925_clk_best_parent_rate(hw, rate); + divider = cdce925_calc_divider(rate, l_parent_rate); + *parent_rate = l_parent_rate; + } + + if (divider) + return (long)(l_parent_rate / divider); + return 0; +} + +static int cdce925_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_cdce925_output *data = to_clk_cdce925_output(hw); + + data->pdiv = cdce925_calc_divider(rate, parent_rate); + + return 0; +} + +static const struct clk_ops cdce925_clk_ops = { + .prepare = cdce925_clk_prepare, + .unprepare = cdce925_clk_unprepare, + .recalc_rate = cdce925_clk_recalc_rate, + .round_rate = cdce925_clk_round_rate, + .set_rate = cdce925_clk_set_rate, +}; + + +static u16 cdce925_y1_calc_divider(unsigned long rate, + unsigned long parent_rate) +{ + unsigned long divider; + + if (!rate) + return 0; + if (rate >= parent_rate) + return 1; + + divider = DIV_ROUND_CLOSEST(parent_rate, rate); + if (divider > 0x3FF) /* Y1 has 10-bit divider */ + divider = 0x3FF; + + return (u16)divider; +} + +static long cdce925_clk_y1_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + unsigned long l_parent_rate = *parent_rate; + u16 divider = cdce925_y1_calc_divider(rate, l_parent_rate); + + if (divider) + return (long)(l_parent_rate / divider); + return 0; +} + +static int cdce925_clk_y1_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_cdce925_output *data = to_clk_cdce925_output(hw); + + data->pdiv = cdce925_y1_calc_divider(rate, parent_rate); + + return 0; +} + +static const struct clk_ops cdce925_clk_y1_ops = { + .prepare = cdce925_clk_prepare, + .unprepare = cdce925_clk_unprepare, + .recalc_rate = cdce925_clk_recalc_rate, + .round_rate = cdce925_clk_y1_round_rate, + .set_rate = cdce925_clk_y1_set_rate, +}; + + +static struct regmap_config cdce925_regmap_config = { + .name = "configuration0", + .reg_bits = 8, + .val_bits = 8, + .cache_type = REGCACHE_RBTREE, + .max_register = 0x2F, +}; + +#define CDCE925_I2C_COMMAND_BLOCK_TRANSFER 0x00 +#define CDCE925_I2C_COMMAND_BYTE_TRANSFER 0x80 + +static int cdce925_regmap_i2c_write( + void *context, const void *data, size_t count) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + int ret; + u8 reg_data[2]; + + if (count != 2) + return -ENOTSUPP; + + /* First byte is command code */ + reg_data[0] = CDCE925_I2C_COMMAND_BYTE_TRANSFER | ((u8 *)data)[0]; + reg_data[1] = ((u8 *)data)[1]; + + dev_dbg(&i2c->dev, "%s(%zu) %#x %#x\n", __func__, count, + reg_data[0], reg_data[1]); + + ret = i2c_master_send(i2c, reg_data, count); + if (likely(ret == count)) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static int cdce925_regmap_i2c_read(void *context, + const void *reg, size_t reg_size, void *val, size_t val_size) +{ + struct device *dev = context; + struct i2c_client *i2c = to_i2c_client(dev); + struct i2c_msg xfer[2]; + int ret; + u8 reg_data[2]; + + if (reg_size != 1) + return -ENOTSUPP; + + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].buf = reg_data; + if (val_size == 1) { + reg_data[0] = + CDCE925_I2C_COMMAND_BYTE_TRANSFER | ((u8 *)reg)[0]; + xfer[0].len = 1; + } else { + reg_data[0] = + CDCE925_I2C_COMMAND_BLOCK_TRANSFER | ((u8 *)reg)[0]; + reg_data[1] = val_size; + xfer[0].len = 2; + } + + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_RD; + xfer[1].len = val_size; + xfer[1].buf = val; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (likely(ret == 2)) { + dev_dbg(&i2c->dev, "%s(%zu, %zu) %#x %#x\n", __func__, + reg_size, val_size, reg_data[0], *((u8 *)val)); + return 0; + } else if (ret < 0) + return ret; + else + return -EIO; +} + +/* The CDCE925 uses a funky way to read/write registers. Bulk mode is + * just weird, so just use the single byte mode exclusively. */ +static struct regmap_bus regmap_cdce925_bus = { + .write = cdce925_regmap_i2c_write, + .read = cdce925_regmap_i2c_read, +}; + +static int cdce925_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct clk_cdce925_chip *data; + struct device_node *node = client->dev.of_node; + const char *parent_name; + const char *pll_clk_name[NUMBER_OF_PLLS] = {NULL,}; + struct clk_init_data init; + struct clk *clk; + u32 value; + int i; + int err; + struct device_node *np_output; + char child_name[6]; + + dev_dbg(&client->dev, "%s\n", __func__); + data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->i2c_client = client; + data->regmap = devm_regmap_init(&client->dev, ®map_cdce925_bus, + &client->dev, &cdce925_regmap_config); + if (IS_ERR(data->regmap)) { + dev_err(&client->dev, "failed to allocate register map\n"); + return PTR_ERR(data->regmap); + } + i2c_set_clientdata(client, data); + + parent_name = of_clk_get_parent_name(node, 0); + if (!parent_name) { + dev_err(&client->dev, "missing parent clock\n"); + return -ENODEV; + } + dev_dbg(&client->dev, "parent is: %s\n", parent_name); + + if (of_property_read_u32(node, "xtal-load-pf", &value) == 0) + regmap_write(data->regmap, + CDCE925_REG_XCSEL, (value << 3) & 0xF8); + /* PWDN bit */ + regmap_update_bits(data->regmap, CDCE925_REG_GLOBAL1, BIT(4), 0); + + /* Set input source for Y1 to be the XTAL */ + regmap_update_bits(data->regmap, 0x02, BIT(7), 0); + + init.ops = &cdce925_pll_ops; + init.flags = 0; + init.parent_names = &parent_name; + init.num_parents = parent_name ? 1 : 0; + + /* Register PLL clocks */ + for (i = 0; i < NUMBER_OF_PLLS; ++i) { + pll_clk_name[i] = kasprintf(GFP_KERNEL, "%s.pll%d", + client->dev.of_node->name, i); + init.name = pll_clk_name[i]; + data->pll[i].chip = data; + data->pll[i].hw.init = &init; + data->pll[i].index = i; + clk = devm_clk_register(&client->dev, &data->pll[i].hw); + if (IS_ERR(clk)) { + dev_err(&client->dev, "Failed register PLL %d\n", i); + err = PTR_ERR(clk); + goto error; + } + sprintf(child_name, "PLL%d", i+1); + np_output = of_get_child_by_name(node, child_name); + if (!np_output) + continue; + if (!of_property_read_u32(np_output, + "clock-frequency", &value)) { + err = clk_set_rate(clk, value); + if (err) + dev_err(&client->dev, + "unable to set PLL frequency %ud\n", + value); + } + if (!of_property_read_u32(np_output, + "spread-spectrum", &value)) { + u8 flag = of_property_read_bool(np_output, + "spread-spectrum-center") ? 0x80 : 0x00; + regmap_update_bits(data->regmap, + 0x16 + (i*CDCE925_OFFSET_PLL), + 0x80, flag); + regmap_update_bits(data->regmap, + 0x12 + (i*CDCE925_OFFSET_PLL), + 0x07, value & 0x07); + } + } + + /* Register output clock Y1 */ + init.ops = &cdce925_clk_y1_ops; + init.flags = 0; + init.num_parents = 1; + init.parent_names = &parent_name; /* Mux Y1 to input */ + init.name = kasprintf(GFP_KERNEL, "%s.Y1", client->dev.of_node->name); + data->clk[0].chip = data; + data->clk[0].hw.init = &init; + data->clk[0].index = 0; + data->clk[0].pdiv = 1; + clk = devm_clk_register(&client->dev, &data->clk[0].hw); + kfree(init.name); /* clock framework made a copy of the name */ + if (IS_ERR(clk)) { + dev_err(&client->dev, "clock registration Y1 failed\n"); + err = PTR_ERR(clk); + goto error; + } + data->dt_clk[0] = clk; + + /* Register output clocks Y2 .. Y5*/ + init.ops = &cdce925_clk_ops; + init.flags = CLK_SET_RATE_PARENT; + init.num_parents = 1; + for (i = 1; i < NUMBER_OF_OUTPUTS; ++i) { + init.name = kasprintf(GFP_KERNEL, "%s.Y%d", + client->dev.of_node->name, i+1); + data->clk[i].chip = data; + data->clk[i].hw.init = &init; + data->clk[i].index = i; + data->clk[i].pdiv = 1; + switch (i) { + case 1: + case 2: + /* Mux Y2/3 to PLL1 */ + init.parent_names = &pll_clk_name[0]; + break; + case 3: + case 4: + /* Mux Y4/5 to PLL2 */ + init.parent_names = &pll_clk_name[1]; + break; + } + clk = devm_clk_register(&client->dev, &data->clk[i].hw); + kfree(init.name); /* clock framework made a copy of the name */ + if (IS_ERR(clk)) { + dev_err(&client->dev, "clock registration failed\n"); + err = PTR_ERR(clk); + goto error; + } + data->dt_clk[i] = clk; + } + + /* Register the output clocks */ + data->onecell.clk_num = NUMBER_OF_OUTPUTS; + data->onecell.clks = data->dt_clk; + err = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get, + &data->onecell); + if (err) + dev_err(&client->dev, "unable to add OF clock provider\n"); + + err = 0; + +error: + for (i = 0; i < NUMBER_OF_PLLS; ++i) + /* clock framework made a copy of the name */ + kfree(pll_clk_name[i]); + + return err; +} + +static const struct i2c_device_id cdce925_id[] = { + { "cdce925", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, cdce925_id); + +static const struct of_device_id clk_cdce925_of_match[] = { + { .compatible = "ti,cdce925" }, + { }, +}; +MODULE_DEVICE_TABLE(of, clk_cdce925_of_match); + +static struct i2c_driver cdce925_driver = { + .driver = { + .name = "cdce925", + .of_match_table = of_match_ptr(clk_cdce925_of_match), + }, + .probe = cdce925_probe, + .id_table = cdce925_id, +}; +module_i2c_driver(cdce925_driver); + +MODULE_AUTHOR("Mike Looijmans <[email protected]>"); +MODULE_DESCRIPTION("cdce925 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index 956b7e54fa1c..616f5aef3c26 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -188,7 +188,7 @@ static void clk_composite_disable(struct clk_hw *hw) } struct clk *clk_register_composite(struct device *dev, const char *name, - const char **parent_names, int num_parents, + const char * const *parent_names, int num_parents, struct clk_hw *mux_hw, const struct clk_ops *mux_ops, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, @@ -200,10 +200,8 @@ struct clk *clk_register_composite(struct device *dev, const char *name, struct clk_ops *clk_composite_ops; composite = kzalloc(sizeof(*composite), GFP_KERNEL); - if (!composite) { - pr_err("%s: could not allocate composite clk\n", __func__); + if (!composite) return ERR_PTR(-ENOMEM); - } init.name = name; init.flags = flags | CLK_IS_BASIC; diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 48a65b2b4027..43a218f35b19 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -106,8 +106,9 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) rc = clk_set_rate(clk, rate); if (rc < 0) - pr_err("clk: couldn't set %s clock rate: %d\n", - __clk_get_name(clk), rc); + pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n", + __clk_get_name(clk), rate, rc, + clk_get_rate(clk)); clk_put(clk); } index++; @@ -124,7 +125,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) * and sets any specified clock parents and rates. The @clk_supplier argument * should be set to true if @node may be also a clock supplier of any clock * listed in its 'assigned-clocks' or 'assigned-clock-parents' properties. - * If @clk_supplier is false the function exits returnning 0 as soon as it + * If @clk_supplier is false the function exits returning 0 as soon as it * determines the @node is also a supplier of any of the clocks. */ int of_clk_set_defaults(struct device_node *node, bool clk_supplier) diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index 25006a8bb8e6..706b5783c360 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -430,11 +430,9 @@ static struct clk *_register_divider(struct device *dev, const char *name, } /* allocate the divider */ - div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); - if (!div) { - pr_err("%s: could not allocate divider clk\n", __func__); + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) return ERR_PTR(-ENOMEM); - } init.name = name; init.ops = &clk_divider_ops; diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c index d9e3f671c2ea..fccabe497f6e 100644 --- a/drivers/clk/clk-fixed-factor.c +++ b/drivers/clk/clk-fixed-factor.c @@ -55,10 +55,16 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate, static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { + /* + * We must report success but we can do so unconditionally because + * clk_factor_round_rate returns values that ensure this call is a + * nop. + */ + return 0; } -struct clk_ops clk_fixed_factor_ops = { +const struct clk_ops clk_fixed_factor_ops = { .round_rate = clk_factor_round_rate, .set_rate = clk_factor_set_rate, .recalc_rate = clk_factor_recalc_rate, @@ -74,10 +80,8 @@ struct clk *clk_register_fixed_factor(struct device *dev, const char *name, struct clk *clk; fix = kmalloc(sizeof(*fix), GFP_KERNEL); - if (!fix) { - pr_err("%s: could not allocate fixed factor clk\n", __func__); + if (!fix) return ERR_PTR(-ENOMEM); - } /* struct clk_fixed_factor assignments */ fix->mult = mult; diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c index 0fc56ab6e844..f85ec8d1711f 100644 --- a/drivers/clk/clk-fixed-rate.c +++ b/drivers/clk/clk-fixed-rate.c @@ -65,11 +65,9 @@ struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev, struct clk_init_data init; /* allocate fixed-rate clock */ - fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL); - if (!fixed) { - pr_err("%s: could not allocate fixed clk\n", __func__); + fixed = kzalloc(sizeof(*fixed), GFP_KERNEL); + if (!fixed) return ERR_PTR(-ENOMEM); - } init.name = name; init.ops = &clk_fixed_rate_ops; diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c index 6aa72d9d79ba..140eb5844dc4 100644 --- a/drivers/clk/clk-fractional-divider.c +++ b/drivers/clk/clk-fractional-divider.c @@ -109,10 +109,8 @@ struct clk *clk_register_fractional_divider(struct device *dev, struct clk *clk; fd = kzalloc(sizeof(*fd), GFP_KERNEL); - if (!fd) { - dev_err(dev, "could not allocate fractional divider clk\n"); + if (!fd) return ERR_PTR(-ENOMEM); - } init.name = name; init.ops = &clk_fractional_divider_ops; diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index 3f0e4200cb5d..551dd0672794 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -135,11 +135,9 @@ struct clk *clk_register_gate(struct device *dev, const char *name, } /* allocate the gate */ - gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); - if (!gate) { - pr_err("%s: could not allocate gated clk\n", __func__); + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) return ERR_PTR(-ENOMEM); - } init.name = name; init.ops = &clk_gate_ops; diff --git a/drivers/clk/clk-gpio-gate.c b/drivers/clk/clk-gpio-gate.c index a71cabedda93..f564e624fb93 100644 --- a/drivers/clk/clk-gpio-gate.c +++ b/drivers/clk/clk-gpio-gate.c @@ -189,7 +189,7 @@ static struct clk *of_clk_gpio_gate_delayed_register_get( /** * of_gpio_gate_clk_setup() - Setup function for gpio controlled clock */ -void __init of_gpio_gate_clk_setup(struct device_node *node) +static void __init of_gpio_gate_clk_setup(struct device_node *node) { struct clk_gpio_gate_delayed_register_data *data; @@ -203,6 +203,5 @@ void __init of_gpio_gate_clk_setup(struct device_node *node) of_clk_add_provider(node, of_clk_gpio_gate_delayed_register_get, data); } -EXPORT_SYMBOL_GPL(of_gpio_gate_clk_setup); CLK_OF_DECLARE(gpio_gate_clk, "gpio-gate-clock", of_gpio_gate_clk_setup); #endif diff --git a/drivers/clk/clk-ls1x.c b/drivers/clk/clk-ls1x.c index ca80103ac188..d4c61985f448 100644 --- a/drivers/clk/clk-ls1x.c +++ b/drivers/clk/clk-ls1x.c @@ -80,9 +80,9 @@ static struct clk *__init clk_register_pll(struct device *dev, return clk; } -static const char const *cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", }; -static const char const *ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", }; -static const char const *dc_parents[] = { "dc_clk_div", "osc_33m_clk", }; +static const char * const cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", }; +static const char * const ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", }; +static const char * const dc_parents[] = { "dc_clk_div", "osc_33m_clk", }; void __init ls1x_clk_init(void) { diff --git a/drivers/clk/clk-max-gen.c b/drivers/clk/clk-max-gen.c index 6505049d50f1..35af9cb6da4f 100644 --- a/drivers/clk/clk-max-gen.c +++ b/drivers/clk/clk-max-gen.c @@ -31,6 +31,8 @@ #include <linux/of.h> #include <linux/export.h> +#include "clk-max-gen.h" + struct max_gen_clk { struct regmap *regmap; u32 mask; diff --git a/drivers/clk/clk-max77686.c b/drivers/clk/clk-max77686.c index 86cdb3a28629..446c2fe76dc2 100644 --- a/drivers/clk/clk-max77686.c +++ b/drivers/clk/clk-max77686.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/max77686.h> #include <linux/mfd/max77686-private.h> diff --git a/drivers/clk/clk-max77802.c b/drivers/clk/clk-max77802.c index 0729dc723a8f..74c49b93a6eb 100644 --- a/drivers/clk/clk-max77802.c +++ b/drivers/clk/clk-max77802.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/max77686-private.h> #include <linux/clk-provider.h> diff --git a/drivers/clk/clk-moxart.c b/drivers/clk/clk-moxart.c index 30a3b6999e10..5181b89c3cb2 100644 --- a/drivers/clk/clk-moxart.c +++ b/drivers/clk/clk-moxart.c @@ -15,7 +15,7 @@ #include <linux/of_address.h> #include <linux/clkdev.h> -void __init moxart_of_pll_clk_init(struct device_node *node) +static void __init moxart_of_pll_clk_init(struct device_node *node) { static void __iomem *base; struct clk *clk, *ref_clk; @@ -53,7 +53,7 @@ void __init moxart_of_pll_clk_init(struct device_node *node) CLK_OF_DECLARE(moxart_pll_clock, "moxa,moxart-pll-clock", moxart_of_pll_clk_init); -void __init moxart_of_apb_clk_init(struct device_node *node) +static void __init moxart_of_apb_clk_init(struct device_node *node) { static void __iomem *base; struct clk *clk, *pll_clk; diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 69a094c3783d..6066a01b20ea 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -114,7 +114,8 @@ const struct clk_ops clk_mux_ro_ops = { EXPORT_SYMBOL_GPL(clk_mux_ro_ops); struct clk *clk_register_mux_table(struct device *dev, const char *name, - const char **parent_names, u8 num_parents, unsigned long flags, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock) { @@ -166,7 +167,8 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name, EXPORT_SYMBOL_GPL(clk_register_mux_table); struct clk *clk_register_mux(struct device *dev, const char *name, - const char **parent_names, u8 num_parents, unsigned long flags, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock) { diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c index 05e04ce0f148..c9487179f25f 100644 --- a/drivers/clk/clk-nomadik.c +++ b/drivers/clk/clk-nomadik.c @@ -503,8 +503,7 @@ static int __init nomadik_src_clk_init_debugfs(void) NULL, NULL, &nomadik_src_clk_debugfs_ops); return 0; } - -module_init(nomadik_src_clk_init_debugfs); +device_initcall(nomadik_src_clk_init_debugfs); #endif diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 30335d3b99af..e39e1e680b3c 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -552,7 +552,8 @@ static const struct clk_ops si5351_pll_ops = { * MSx_P2[19:0] = 128 * b - c * floor(128 * b/c) = (128*b) mod c * MSx_P3[19:0] = c * - * MS[6,7] are integer (P1) divide only, P2 = 0, P3 = 0 + * MS[6,7] are integer (P1) divide only, P1 = divide value, + * P2 and P3 are not applicable * * for 150MHz < fOUT <= 160MHz: * @@ -606,9 +607,6 @@ static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw, if (!hwdata->params.valid) si5351_read_parameters(hwdata->drvdata, reg, &hwdata->params); - if (hwdata->params.p3 == 0) - return parent_rate; - /* * multisync0-5: fOUT = (128 * P3 * fIN) / (P1*P3 + P2 + 512*P3) * multisync6-7: fOUT = fIN / P1 @@ -616,6 +614,8 @@ static unsigned long si5351_msynth_recalc_rate(struct clk_hw *hw, rate = parent_rate; if (hwdata->num > 5) { m = hwdata->params.p1; + } else if (hwdata->params.p3 == 0) { + return parent_rate; } else if ((si5351_reg_read(hwdata->drvdata, reg + 2) & SI5351_OUTPUT_CLK_DIVBY4) == SI5351_OUTPUT_CLK_DIVBY4) { m = 4; @@ -679,6 +679,16 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate, c = 1; *parent_rate = a * rate; + } else if (hwdata->num >= 6) { + /* determine the closest integer divider */ + a = DIV_ROUND_CLOSEST(*parent_rate, rate); + if (a < SI5351_MULTISYNTH_A_MIN) + a = SI5351_MULTISYNTH_A_MIN; + if (a > SI5351_MULTISYNTH67_A_MAX) + a = SI5351_MULTISYNTH67_A_MAX; + + b = 0; + c = 1; } else { unsigned long rfrac, denom; @@ -692,9 +702,7 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate, a = *parent_rate / rate; if (a < SI5351_MULTISYNTH_A_MIN) a = SI5351_MULTISYNTH_A_MIN; - if (hwdata->num >= 6 && a > SI5351_MULTISYNTH67_A_MAX) - a = SI5351_MULTISYNTH67_A_MAX; - else if (a > SI5351_MULTISYNTH_A_MAX) + if (a > SI5351_MULTISYNTH_A_MAX) a = SI5351_MULTISYNTH_A_MAX; /* find best approximation for b/c = fVCO mod fOUT */ @@ -723,6 +731,10 @@ static long si5351_msynth_round_rate(struct clk_hw *hw, unsigned long rate, hwdata->params.p3 = 1; hwdata->params.p2 = 0; hwdata->params.p1 = 0; + } else if (hwdata->num >= 6) { + hwdata->params.p3 = 0; + hwdata->params.p2 = 0; + hwdata->params.p1 = a; } else { hwdata->params.p3 = c; hwdata->params.p2 = (128 * b) % c; diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c new file mode 100644 index 000000000000..b9b12a742970 --- /dev/null +++ b/drivers/clk/clk-stm32f4.c @@ -0,0 +1,380 @@ +/* + * Author: Daniel Thompson <[email protected]> + * + * Inspired by clk-asm9260.c . + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#define STM32F4_RCC_PLLCFGR 0x04 +#define STM32F4_RCC_CFGR 0x08 +#define STM32F4_RCC_AHB1ENR 0x30 +#define STM32F4_RCC_AHB2ENR 0x34 +#define STM32F4_RCC_AHB3ENR 0x38 +#define STM32F4_RCC_APB1ENR 0x40 +#define STM32F4_RCC_APB2ENR 0x44 + +struct stm32f4_gate_data { + u8 offset; + u8 bit_idx; + const char *name; + const char *parent_name; + unsigned long flags; +}; + +static const struct stm32f4_gate_data stm32f4_gates[] __initconst = { + { STM32F4_RCC_AHB1ENR, 0, "gpioa", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 1, "gpiob", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 2, "gpioc", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 3, "gpiod", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 4, "gpioe", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 5, "gpiof", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 6, "gpiog", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 7, "gpioh", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 8, "gpioi", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 9, "gpioj", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 10, "gpiok", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 12, "crc", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 18, "bkpsra", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 20, "ccmdatam", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 21, "dma1", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 22, "dma2", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 23, "dma2d", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 25, "ethmac", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 26, "ethmactx", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 27, "ethmacrx", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 28, "ethmacptp", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 29, "otghs", "ahb_div" }, + { STM32F4_RCC_AHB1ENR, 30, "otghsulpi", "ahb_div" }, + + { STM32F4_RCC_AHB2ENR, 0, "dcmi", "ahb_div" }, + { STM32F4_RCC_AHB2ENR, 4, "cryp", "ahb_div" }, + { STM32F4_RCC_AHB2ENR, 5, "hash", "ahb_div" }, + { STM32F4_RCC_AHB2ENR, 6, "rng", "pll48" }, + { STM32F4_RCC_AHB2ENR, 7, "otgfs", "pll48" }, + + { STM32F4_RCC_AHB3ENR, 0, "fmc", "ahb_div", + CLK_IGNORE_UNUSED }, + + { STM32F4_RCC_APB1ENR, 0, "tim2", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 1, "tim3", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 2, "tim4", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 3, "tim5", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 4, "tim6", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 5, "tim7", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 6, "tim12", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 7, "tim13", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 8, "tim14", "apb1_mul" }, + { STM32F4_RCC_APB1ENR, 11, "wwdg", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 14, "spi2", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 15, "spi3", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 17, "uart2", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 18, "uart3", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 19, "uart4", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 20, "uart5", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 21, "i2c1", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 22, "i2c2", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 23, "i2c3", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 25, "can1", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 26, "can2", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 28, "pwr", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 29, "dac", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 30, "uart7", "apb1_div" }, + { STM32F4_RCC_APB1ENR, 31, "uart8", "apb1_div" }, + + { STM32F4_RCC_APB2ENR, 0, "tim1", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 1, "tim8", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 4, "usart1", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 5, "usart6", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 8, "adc1", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 9, "adc2", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 10, "adc3", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 11, "sdio", "pll48" }, + { STM32F4_RCC_APB2ENR, 12, "spi1", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 13, "spi4", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 14, "syscfg", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 16, "tim9", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 17, "tim10", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 18, "tim11", "apb2_mul" }, + { STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" }, + { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" }, +}; + +/* + * MAX_CLKS is the maximum value in the enumeration below plus the combined + * hweight of stm32f42xx_gate_map (plus one). + */ +#define MAX_CLKS 74 + +enum { SYSTICK, FCLK }; + +/* + * This bitmask tells us which bit offsets (0..192) on STM32F4[23]xxx + * have gate bits associated with them. Its combined hweight is 71. + */ +static const u64 stm32f42xx_gate_map[] = { 0x000000f17ef417ffull, + 0x0000000000000001ull, + 0x04777f33f6fec9ffull }; + +static struct clk *clks[MAX_CLKS]; +static DEFINE_SPINLOCK(stm32f4_clk_lock); +static void __iomem *base; + +/* + * "Multiplier" device for APBx clocks. + * + * The APBx dividers are power-of-two dividers and, if *not* running in 1:1 + * mode, they also tap out the one of the low order state bits to run the + * timers. ST datasheets represent this feature as a (conditional) clock + * multiplier. + */ +struct clk_apb_mul { + struct clk_hw hw; + u8 bit_idx; +}; + +#define to_clk_apb_mul(_hw) container_of(_hw, struct clk_apb_mul, hw) + +static unsigned long clk_apb_mul_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_apb_mul *am = to_clk_apb_mul(hw); + + if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx)) + return parent_rate * 2; + + return parent_rate; +} + +static long clk_apb_mul_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_apb_mul *am = to_clk_apb_mul(hw); + unsigned long mult = 1; + + if (readl(base + STM32F4_RCC_CFGR) & BIT(am->bit_idx)) + mult = 2; + + if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + unsigned long best_parent = rate / mult; + + *prate = + __clk_round_rate(__clk_get_parent(hw->clk), best_parent); + } + + return *prate * mult; +} + +static int clk_apb_mul_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + /* + * We must report success but we can do so unconditionally because + * clk_apb_mul_round_rate returns values that ensure this call is a + * nop. + */ + + return 0; +} + +static const struct clk_ops clk_apb_mul_factor_ops = { + .round_rate = clk_apb_mul_round_rate, + .set_rate = clk_apb_mul_set_rate, + .recalc_rate = clk_apb_mul_recalc_rate, +}; + +static struct clk *clk_register_apb_mul(struct device *dev, const char *name, + const char *parent_name, + unsigned long flags, u8 bit_idx) +{ + struct clk_apb_mul *am; + struct clk_init_data init; + struct clk *clk; + + am = kzalloc(sizeof(*am), GFP_KERNEL); + if (!am) + return ERR_PTR(-ENOMEM); + + am->bit_idx = bit_idx; + am->hw.init = &init; + + init.name = name; + init.ops = &clk_apb_mul_factor_ops; + init.flags = flags; + init.parent_names = &parent_name; + init.num_parents = 1; + + clk = clk_register(dev, &am->hw); + + if (IS_ERR(clk)) + kfree(am); + + return clk; +} + +/* + * Decode current PLL state and (statically) model the state we inherit from + * the bootloader. + */ +static void stm32f4_rcc_register_pll(const char *hse_clk, const char *hsi_clk) +{ + unsigned long pllcfgr = readl(base + STM32F4_RCC_PLLCFGR); + + unsigned long pllm = pllcfgr & 0x3f; + unsigned long plln = (pllcfgr >> 6) & 0x1ff; + unsigned long pllp = BIT(((pllcfgr >> 16) & 3) + 1); + const char *pllsrc = pllcfgr & BIT(22) ? hse_clk : hsi_clk; + unsigned long pllq = (pllcfgr >> 24) & 0xf; + + clk_register_fixed_factor(NULL, "vco", pllsrc, 0, plln, pllm); + clk_register_fixed_factor(NULL, "pll", "vco", 0, 1, pllp); + clk_register_fixed_factor(NULL, "pll48", "vco", 0, 1, pllq); +} + +/* + * Converts the primary and secondary indices (as they appear in DT) to an + * offset into our struct clock array. + */ +static int stm32f4_rcc_lookup_clk_idx(u8 primary, u8 secondary) +{ + u64 table[ARRAY_SIZE(stm32f42xx_gate_map)]; + + if (primary == 1) { + if (WARN_ON(secondary > FCLK)) + return -EINVAL; + return secondary; + } + + memcpy(table, stm32f42xx_gate_map, sizeof(table)); + + /* only bits set in table can be used as indices */ + if (WARN_ON(secondary > 8 * sizeof(table) || + 0 == (table[BIT_ULL_WORD(secondary)] & + BIT_ULL_MASK(secondary)))) + return -EINVAL; + + /* mask out bits above our current index */ + table[BIT_ULL_WORD(secondary)] &= + GENMASK_ULL(secondary % BITS_PER_LONG_LONG, 0); + + return FCLK + hweight64(table[0]) + + (BIT_ULL_WORD(secondary) >= 1 ? hweight64(table[1]) : 0) + + (BIT_ULL_WORD(secondary) >= 2 ? hweight64(table[2]) : 0); +} + +static struct clk * +stm32f4_rcc_lookup_clk(struct of_phandle_args *clkspec, void *data) +{ + int i = stm32f4_rcc_lookup_clk_idx(clkspec->args[0], clkspec->args[1]); + + if (i < 0) + return ERR_PTR(-EINVAL); + + return clks[i]; +} + +static const char *sys_parents[] __initdata = { "hsi", NULL, "pll" }; + +static const struct clk_div_table ahb_div_table[] = { + { 0x0, 1 }, { 0x1, 1 }, { 0x2, 1 }, { 0x3, 1 }, + { 0x4, 1 }, { 0x5, 1 }, { 0x6, 1 }, { 0x7, 1 }, + { 0x8, 2 }, { 0x9, 4 }, { 0xa, 8 }, { 0xb, 16 }, + { 0xc, 64 }, { 0xd, 128 }, { 0xe, 256 }, { 0xf, 512 }, + { 0 }, +}; + +static const struct clk_div_table apb_div_table[] = { + { 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }, + { 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 }, + { 0 }, +}; + +static void __init stm32f4_rcc_init(struct device_node *np) +{ + const char *hse_clk; + int n; + + base = of_iomap(np, 0); + if (!base) { + pr_err("%s: unable to map resource", np->name); + return; + } + + hse_clk = of_clk_get_parent_name(np, 0); + + clk_register_fixed_rate_with_accuracy(NULL, "hsi", NULL, 0, + 16000000, 160000); + stm32f4_rcc_register_pll(hse_clk, "hsi"); + + sys_parents[1] = hse_clk; + clk_register_mux_table( + NULL, "sys", sys_parents, ARRAY_SIZE(sys_parents), 0, + base + STM32F4_RCC_CFGR, 0, 3, 0, NULL, &stm32f4_clk_lock); + + clk_register_divider_table(NULL, "ahb_div", "sys", + CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR, + 4, 4, 0, ahb_div_table, &stm32f4_clk_lock); + + clk_register_divider_table(NULL, "apb1_div", "ahb_div", + CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR, + 10, 3, 0, apb_div_table, &stm32f4_clk_lock); + clk_register_apb_mul(NULL, "apb1_mul", "apb1_div", + CLK_SET_RATE_PARENT, 12); + + clk_register_divider_table(NULL, "apb2_div", "ahb_div", + CLK_SET_RATE_PARENT, base + STM32F4_RCC_CFGR, + 13, 3, 0, apb_div_table, &stm32f4_clk_lock); + clk_register_apb_mul(NULL, "apb2_mul", "apb2_div", + CLK_SET_RATE_PARENT, 15); + + clks[SYSTICK] = clk_register_fixed_factor(NULL, "systick", "ahb_div", + 0, 1, 8); + clks[FCLK] = clk_register_fixed_factor(NULL, "fclk", "ahb_div", + 0, 1, 1); + + for (n = 0; n < ARRAY_SIZE(stm32f4_gates); n++) { + const struct stm32f4_gate_data *gd = &stm32f4_gates[n]; + unsigned int secondary = + 8 * (gd->offset - STM32F4_RCC_AHB1ENR) + gd->bit_idx; + int idx = stm32f4_rcc_lookup_clk_idx(0, secondary); + + if (idx < 0) + goto fail; + + clks[idx] = clk_register_gate( + NULL, gd->name, gd->parent_name, gd->flags, + base + gd->offset, gd->bit_idx, 0, &stm32f4_clk_lock); + + if (IS_ERR(clks[n])) { + pr_err("%s: Unable to register leaf clock %s\n", + np->full_name, gd->name); + goto fail; + } + } + + of_clk_add_provider(np, stm32f4_rcc_lookup_clk, NULL); + return; +fail: + iounmap(base); +} +CLK_OF_DECLARE(stm32f4_rcc, "st,stm32f42xx-rcc", stm32f4_rcc_init); diff --git a/drivers/clk/clk-u300.c b/drivers/clk/clk-u300.c index 406bfc1375b2..18bf5e576b93 100644 --- a/drivers/clk/clk-u300.c +++ b/drivers/clk/clk-u300.c @@ -12,6 +12,7 @@ #include <linux/clk-provider.h> #include <linux/spinlock.h> #include <linux/of.h> +#include <linux/platform_data/clk-u300.h> /* APP side SYSCON registers */ /* CLK Control Register 16bit (R/W) */ diff --git a/drivers/clk/clk-xgene.c b/drivers/clk/clk-xgene.c index dd8a62d8f11f..f26b3ac36b27 100644 --- a/drivers/clk/clk-xgene.c +++ b/drivers/clk/clk-xgene.c @@ -42,12 +42,12 @@ static DEFINE_SPINLOCK(clk_lock); -static inline u32 xgene_clk_read(void *csr) +static inline u32 xgene_clk_read(void __iomem *csr) { return readl_relaxed(csr); } -static inline void xgene_clk_write(u32 data, void *csr) +static inline void xgene_clk_write(u32 data, void __iomem *csr) { return writel_relaxed(data, csr); } @@ -119,7 +119,7 @@ static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw, return fvco / nout; } -const struct clk_ops xgene_clk_pll_ops = { +static const struct clk_ops xgene_clk_pll_ops = { .is_enabled = xgene_clk_pll_is_enabled, .recalc_rate = xgene_clk_pll_recalc_rate, }; @@ -167,7 +167,7 @@ static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_ty { const char *clk_name = np->full_name; struct clk *clk; - void *reg; + void __iomem *reg; reg = of_iomap(np, 0); if (reg == NULL) { @@ -222,20 +222,22 @@ static int xgene_clk_enable(struct clk_hw *hw) struct xgene_clk *pclk = to_xgene_clk(hw); unsigned long flags = 0; u32 data; + phys_addr_t reg; if (pclk->lock) spin_lock_irqsave(pclk->lock, flags); if (pclk->param.csr_reg != NULL) { pr_debug("%s clock enabled\n", pclk->name); + reg = __pa(pclk->param.csr_reg); /* First enable the clock */ data = xgene_clk_read(pclk->param.csr_reg + pclk->param.reg_clk_offset); data |= pclk->param.reg_clk_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_clk_offset); - pr_debug("%s clock PADDR base 0x%016LX clk offset 0x%08X mask 0x%08X value 0x%08X\n", - pclk->name, __pa(pclk->param.csr_reg), + pr_debug("%s clock PADDR base %pa clk offset 0x%08X mask 0x%08X value 0x%08X\n", + pclk->name, ®, pclk->param.reg_clk_offset, pclk->param.reg_clk_mask, data); @@ -245,8 +247,8 @@ static int xgene_clk_enable(struct clk_hw *hw) data &= ~pclk->param.reg_csr_mask; xgene_clk_write(data, pclk->param.csr_reg + pclk->param.reg_csr_offset); - pr_debug("%s CSR RESET PADDR base 0x%016LX csr offset 0x%08X mask 0x%08X value 0x%08X\n", - pclk->name, __pa(pclk->param.csr_reg), + pr_debug("%s CSR RESET PADDR base %pa csr offset 0x%08X mask 0x%08X value 0x%08X\n", + pclk->name, ®, pclk->param.reg_csr_offset, pclk->param.reg_csr_mask, data); } @@ -386,7 +388,7 @@ static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate, return parent_rate / divider; } -const struct clk_ops xgene_clk_ops = { +static const struct clk_ops xgene_clk_ops = { .enable = xgene_clk_enable, .disable = xgene_clk_disable, .is_enabled = xgene_clk_is_enabled, @@ -456,7 +458,7 @@ static void __init xgene_devclk_init(struct device_node *np) parameters.csr_reg = NULL; parameters.divider_reg = NULL; for (i = 0; i < 2; i++) { - void *map_res; + void __iomem *map_res; rc = of_address_to_resource(np, i, &res); if (rc != 0) { if (i == 0) { diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5b0f41868b42..ddb4b541016f 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -21,6 +21,7 @@ #include <linux/device.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/clkdev.h> #include "clk.h" @@ -37,13 +38,6 @@ static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); -static long clk_core_get_accuracy(struct clk_core *clk); -static unsigned long clk_core_get_rate(struct clk_core *clk); -static int clk_core_get_phase(struct clk_core *clk); -static bool clk_core_is_prepared(struct clk_core *clk); -static bool clk_core_is_enabled(struct clk_core *clk); -static struct clk_core *clk_core_lookup(const char *name); - /*** private data structures ***/ struct clk_core { @@ -68,11 +62,11 @@ struct clk_core { int phase; struct hlist_head children; struct hlist_node child_node; - struct hlist_node debug_node; struct hlist_head clks; unsigned int notifier_count; #ifdef CONFIG_DEBUG_FS struct dentry *dentry; + struct hlist_node debug_node; #endif struct kref ref; }; @@ -145,382 +139,71 @@ static void clk_enable_unlock(unsigned long flags) spin_unlock_irqrestore(&enable_lock, flags); } -/*** debugfs support ***/ - -#ifdef CONFIG_DEBUG_FS -#include <linux/debugfs.h> - -static struct dentry *rootdir; -static int inited = 0; -static DEFINE_MUTEX(clk_debug_lock); -static HLIST_HEAD(clk_debug_list); - -static struct hlist_head *all_lists[] = { - &clk_root_list, - &clk_orphan_list, - NULL, -}; - -static struct hlist_head *orphan_list[] = { - &clk_orphan_list, - NULL, -}; - -static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, - int level) -{ - if (!c) - return; - - seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", - level * 3 + 1, "", - 30 - level * 3, c->name, - c->enable_count, c->prepare_count, clk_core_get_rate(c), - clk_core_get_accuracy(c), clk_core_get_phase(c)); -} - -static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, - int level) -{ - struct clk_core *child; - - if (!c) - return; - - clk_summary_show_one(s, c, level); - - hlist_for_each_entry(child, &c->children, child_node) - clk_summary_show_subtree(s, child, level + 1); -} - -static int clk_summary_show(struct seq_file *s, void *data) -{ - struct clk_core *c; - struct hlist_head **lists = (struct hlist_head **)s->private; - - seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); - seq_puts(s, "----------------------------------------------------------------------------------------\n"); - - clk_prepare_lock(); - - for (; *lists; lists++) - hlist_for_each_entry(c, *lists, child_node) - clk_summary_show_subtree(s, c, 0); - - clk_prepare_unlock(); - - return 0; -} - - -static int clk_summary_open(struct inode *inode, struct file *file) -{ - return single_open(file, clk_summary_show, inode->i_private); -} - -static const struct file_operations clk_summary_fops = { - .open = clk_summary_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) -{ - if (!c) - return; - - seq_printf(s, "\"%s\": { ", c->name); - seq_printf(s, "\"enable_count\": %d,", c->enable_count); - seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); - seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c)); - seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c)); - seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); -} - -static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) -{ - struct clk_core *child; - - if (!c) - return; - - clk_dump_one(s, c, level); - - hlist_for_each_entry(child, &c->children, child_node) { - seq_printf(s, ","); - clk_dump_subtree(s, child, level + 1); - } - - seq_printf(s, "}"); -} - -static int clk_dump(struct seq_file *s, void *data) -{ - struct clk_core *c; - bool first_node = true; - struct hlist_head **lists = (struct hlist_head **)s->private; - - seq_printf(s, "{"); - - clk_prepare_lock(); - - for (; *lists; lists++) { - hlist_for_each_entry(c, *lists, child_node) { - if (!first_node) - seq_puts(s, ","); - first_node = false; - clk_dump_subtree(s, c, 0); - } - } - - clk_prepare_unlock(); - - seq_printf(s, "}"); - return 0; -} - - -static int clk_dump_open(struct inode *inode, struct file *file) -{ - return single_open(file, clk_dump, inode->i_private); -} - -static const struct file_operations clk_dump_fops = { - .open = clk_dump_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry) -{ - struct dentry *d; - int ret = -ENOMEM; - - if (!clk || !pdentry) { - ret = -EINVAL; - goto out; - } - - d = debugfs_create_dir(clk->name, pdentry); - if (!d) - goto out; - - clk->dentry = d; - - d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry, - (u32 *)&clk->rate); - if (!d) - goto err_out; - - d = debugfs_create_u32("clk_accuracy", S_IRUGO, clk->dentry, - (u32 *)&clk->accuracy); - if (!d) - goto err_out; - - d = debugfs_create_u32("clk_phase", S_IRUGO, clk->dentry, - (u32 *)&clk->phase); - if (!d) - goto err_out; - - d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry, - (u32 *)&clk->flags); - if (!d) - goto err_out; - - d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry, - (u32 *)&clk->prepare_count); - if (!d) - goto err_out; - - d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry, - (u32 *)&clk->enable_count); - if (!d) - goto err_out; - - d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry, - (u32 *)&clk->notifier_count); - if (!d) - goto err_out; - - if (clk->ops->debug_init) { - ret = clk->ops->debug_init(clk->hw, clk->dentry); - if (ret) - goto err_out; - } - - ret = 0; - goto out; - -err_out: - debugfs_remove_recursive(clk->dentry); - clk->dentry = NULL; -out: - return ret; -} - -/** - * clk_debug_register - add a clk node to the debugfs clk tree - * @clk: the clk being added to the debugfs clk tree - * - * Dynamically adds a clk to the debugfs clk tree if debugfs has been - * initialized. Otherwise it bails out early since the debugfs clk tree - * will be created lazily by clk_debug_init as part of a late_initcall. - */ -static int clk_debug_register(struct clk_core *clk) -{ - int ret = 0; - - mutex_lock(&clk_debug_lock); - hlist_add_head(&clk->debug_node, &clk_debug_list); - - if (!inited) - goto unlock; - - ret = clk_debug_create_one(clk, rootdir); -unlock: - mutex_unlock(&clk_debug_lock); - - return ret; -} - - /** - * clk_debug_unregister - remove a clk node from the debugfs clk tree - * @clk: the clk being removed from the debugfs clk tree - * - * Dynamically removes a clk and all it's children clk nodes from the - * debugfs clk tree if clk->dentry points to debugfs created by - * clk_debug_register in __clk_init. - */ -static void clk_debug_unregister(struct clk_core *clk) +static bool clk_core_is_prepared(struct clk_core *core) { - mutex_lock(&clk_debug_lock); - hlist_del_init(&clk->debug_node); - debugfs_remove_recursive(clk->dentry); - clk->dentry = NULL; - mutex_unlock(&clk_debug_lock); -} - -struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, - void *data, const struct file_operations *fops) -{ - struct dentry *d = NULL; - - if (hw->core->dentry) - d = debugfs_create_file(name, mode, hw->core->dentry, data, - fops); + /* + * .is_prepared is optional for clocks that can prepare + * fall back to software usage counter if it is missing + */ + if (!core->ops->is_prepared) + return core->prepare_count; - return d; + return core->ops->is_prepared(core->hw); } -EXPORT_SYMBOL_GPL(clk_debugfs_add_file); -/** - * clk_debug_init - lazily create the debugfs clk tree visualization - * - * clks are often initialized very early during boot before memory can - * be dynamically allocated and well before debugfs is setup. - * clk_debug_init walks the clk tree hierarchy while holding - * prepare_lock and creates the topology as part of a late_initcall, - * thus insuring that clks initialized very early will still be - * represented in the debugfs clk tree. This function should only be - * called once at boot-time, and all other clks added dynamically will - * be done so with clk_debug_register. - */ -static int __init clk_debug_init(void) +static bool clk_core_is_enabled(struct clk_core *core) { - struct clk_core *clk; - struct dentry *d; - - rootdir = debugfs_create_dir("clk", NULL); - - if (!rootdir) - return -ENOMEM; - - d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, - &clk_summary_fops); - if (!d) - return -ENOMEM; - - d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, - &clk_dump_fops); - if (!d) - return -ENOMEM; - - d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, - &orphan_list, &clk_summary_fops); - if (!d) - return -ENOMEM; - - d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, - &orphan_list, &clk_dump_fops); - if (!d) - return -ENOMEM; - - mutex_lock(&clk_debug_lock); - hlist_for_each_entry(clk, &clk_debug_list, debug_node) - clk_debug_create_one(clk, rootdir); - - inited = 1; - mutex_unlock(&clk_debug_lock); + /* + * .is_enabled is only mandatory for clocks that gate + * fall back to software usage counter if .is_enabled is missing + */ + if (!core->ops->is_enabled) + return core->enable_count; - return 0; -} -late_initcall(clk_debug_init); -#else -static inline int clk_debug_register(struct clk_core *clk) { return 0; } -static inline void clk_debug_reparent(struct clk_core *clk, - struct clk_core *new_parent) -{ + return core->ops->is_enabled(core->hw); } -static inline void clk_debug_unregister(struct clk_core *clk) -{ -} -#endif -/* caller must hold prepare_lock */ -static void clk_unprepare_unused_subtree(struct clk_core *clk) +static void clk_unprepare_unused_subtree(struct clk_core *core) { struct clk_core *child; lockdep_assert_held(&prepare_lock); - hlist_for_each_entry(child, &clk->children, child_node) + hlist_for_each_entry(child, &core->children, child_node) clk_unprepare_unused_subtree(child); - if (clk->prepare_count) + if (core->prepare_count) return; - if (clk->flags & CLK_IGNORE_UNUSED) + if (core->flags & CLK_IGNORE_UNUSED) return; - if (clk_core_is_prepared(clk)) { - trace_clk_unprepare(clk); - if (clk->ops->unprepare_unused) - clk->ops->unprepare_unused(clk->hw); - else if (clk->ops->unprepare) - clk->ops->unprepare(clk->hw); - trace_clk_unprepare_complete(clk); + if (clk_core_is_prepared(core)) { + trace_clk_unprepare(core); + if (core->ops->unprepare_unused) + core->ops->unprepare_unused(core->hw); + else if (core->ops->unprepare) + core->ops->unprepare(core->hw); + trace_clk_unprepare_complete(core); } } -/* caller must hold prepare_lock */ -static void clk_disable_unused_subtree(struct clk_core *clk) +static void clk_disable_unused_subtree(struct clk_core *core) { struct clk_core *child; unsigned long flags; lockdep_assert_held(&prepare_lock); - hlist_for_each_entry(child, &clk->children, child_node) + hlist_for_each_entry(child, &core->children, child_node) clk_disable_unused_subtree(child); flags = clk_enable_lock(); - if (clk->enable_count) + if (core->enable_count) goto unlock_out; - if (clk->flags & CLK_IGNORE_UNUSED) + if (core->flags & CLK_IGNORE_UNUSED) goto unlock_out; /* @@ -528,13 +211,13 @@ static void clk_disable_unused_subtree(struct clk_core *clk) * sequence. call .disable_unused if available, otherwise fall * back to .disable */ - if (clk_core_is_enabled(clk)) { - trace_clk_disable(clk); - if (clk->ops->disable_unused) - clk->ops->disable_unused(clk->hw); - else if (clk->ops->disable) - clk->ops->disable(clk->hw); - trace_clk_disable_complete(clk); + if (clk_core_is_enabled(core)) { + trace_clk_disable(core); + if (core->ops->disable_unused) + core->ops->disable_unused(core->hw); + else if (core->ops->disable) + core->ops->disable(core->hw); + trace_clk_disable_complete(core); } unlock_out: @@ -551,7 +234,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup); static int clk_disable_unused(void) { - struct clk_core *clk; + struct clk_core *core; if (clk_ignore_unused) { pr_warn("clk: Not disabling unused clocks\n"); @@ -560,17 +243,17 @@ static int clk_disable_unused(void) clk_prepare_lock(); - hlist_for_each_entry(clk, &clk_root_list, child_node) - clk_disable_unused_subtree(clk); + hlist_for_each_entry(core, &clk_root_list, child_node) + clk_disable_unused_subtree(core); - hlist_for_each_entry(clk, &clk_orphan_list, child_node) - clk_disable_unused_subtree(clk); + hlist_for_each_entry(core, &clk_orphan_list, child_node) + clk_disable_unused_subtree(core); - hlist_for_each_entry(clk, &clk_root_list, child_node) - clk_unprepare_unused_subtree(clk); + hlist_for_each_entry(core, &clk_root_list, child_node) + clk_unprepare_unused_subtree(core); - hlist_for_each_entry(clk, &clk_orphan_list, child_node) - clk_unprepare_unused_subtree(clk); + hlist_for_each_entry(core, &clk_orphan_list, child_node) + clk_unprepare_unused_subtree(core); clk_prepare_unlock(); @@ -608,18 +291,61 @@ struct clk *__clk_get_parent(struct clk *clk) } EXPORT_SYMBOL_GPL(__clk_get_parent); -static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk, +static struct clk_core *__clk_lookup_subtree(const char *name, + struct clk_core *core) +{ + struct clk_core *child; + struct clk_core *ret; + + if (!strcmp(core->name, name)) + return core; + + hlist_for_each_entry(child, &core->children, child_node) { + ret = __clk_lookup_subtree(name, child); + if (ret) + return ret; + } + + return NULL; +} + +static struct clk_core *clk_core_lookup(const char *name) +{ + struct clk_core *root_clk; + struct clk_core *ret; + + if (!name) + return NULL; + + /* search the 'proper' clk tree first */ + hlist_for_each_entry(root_clk, &clk_root_list, child_node) { + ret = __clk_lookup_subtree(name, root_clk); + if (ret) + return ret; + } + + /* if not found, then search the orphan tree */ + hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { + ret = __clk_lookup_subtree(name, root_clk); + if (ret) + return ret; + } + + return NULL; +} + +static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core, u8 index) { - if (!clk || index >= clk->num_parents) + if (!core || index >= core->num_parents) return NULL; - else if (!clk->parents) - return clk_core_lookup(clk->parent_names[index]); - else if (!clk->parents[index]) - return clk->parents[index] = - clk_core_lookup(clk->parent_names[index]); + else if (!core->parents) + return clk_core_lookup(core->parent_names[index]); + else if (!core->parents[index]) + return core->parents[index] = + clk_core_lookup(core->parent_names[index]); else - return clk->parents[index]; + return core->parents[index]; } struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) @@ -640,21 +366,21 @@ unsigned int __clk_get_enable_count(struct clk *clk) return !clk ? 0 : clk->core->enable_count; } -static unsigned long clk_core_get_rate_nolock(struct clk_core *clk) +static unsigned long clk_core_get_rate_nolock(struct clk_core *core) { unsigned long ret; - if (!clk) { + if (!core) { ret = 0; goto out; } - ret = clk->rate; + ret = core->rate; - if (clk->flags & CLK_IS_ROOT) + if (core->flags & CLK_IS_ROOT) goto out; - if (!clk->parent) + if (!core->parent) ret = 0; out: @@ -670,12 +396,12 @@ unsigned long __clk_get_rate(struct clk *clk) } EXPORT_SYMBOL_GPL(__clk_get_rate); -static unsigned long __clk_get_accuracy(struct clk_core *clk) +static unsigned long __clk_get_accuracy(struct clk_core *core) { - if (!clk) + if (!core) return 0; - return clk->accuracy; + return core->accuracy; } unsigned long __clk_get_flags(struct clk *clk) @@ -684,27 +410,6 @@ unsigned long __clk_get_flags(struct clk *clk) } EXPORT_SYMBOL_GPL(__clk_get_flags); -static bool clk_core_is_prepared(struct clk_core *clk) -{ - int ret; - - if (!clk) - return false; - - /* - * .is_prepared is optional for clocks that can prepare - * fall back to software usage counter if it is missing - */ - if (!clk->ops->is_prepared) { - ret = clk->prepare_count ? 1 : 0; - goto out; - } - - ret = clk->ops->is_prepared(clk->hw); -out: - return !!ret; -} - bool __clk_is_prepared(struct clk *clk) { if (!clk) @@ -713,27 +418,6 @@ bool __clk_is_prepared(struct clk *clk) return clk_core_is_prepared(clk->core); } -static bool clk_core_is_enabled(struct clk_core *clk) -{ - int ret; - - if (!clk) - return false; - - /* - * .is_enabled is only mandatory for clocks that gate - * fall back to software usage counter if .is_enabled is missing - */ - if (!clk->ops->is_enabled) { - ret = clk->enable_count ? 1 : 0; - goto out; - } - - ret = clk->ops->is_enabled(clk->hw); -out: - return !!ret; -} - bool __clk_is_enabled(struct clk *clk) { if (!clk) @@ -743,49 +427,6 @@ bool __clk_is_enabled(struct clk *clk) } EXPORT_SYMBOL_GPL(__clk_is_enabled); -static struct clk_core *__clk_lookup_subtree(const char *name, - struct clk_core *clk) -{ - struct clk_core *child; - struct clk_core *ret; - - if (!strcmp(clk->name, name)) - return clk; - - hlist_for_each_entry(child, &clk->children, child_node) { - ret = __clk_lookup_subtree(name, child); - if (ret) - return ret; - } - - return NULL; -} - -static struct clk_core *clk_core_lookup(const char *name) -{ - struct clk_core *root_clk; - struct clk_core *ret; - - if (!name) - return NULL; - - /* search the 'proper' clk tree first */ - hlist_for_each_entry(root_clk, &clk_root_list, child_node) { - ret = __clk_lookup_subtree(name, root_clk); - if (ret) - return ret; - } - - /* if not found, then search the orphan tree */ - hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) { - ret = __clk_lookup_subtree(name, root_clk); - if (ret) - return ret; - } - - return NULL; -} - static bool mux_is_better_rate(unsigned long rate, unsigned long now, unsigned long best, unsigned long flags) { @@ -853,7 +494,7 @@ struct clk *__clk_lookup(const char *name) return !core ? NULL : core->hw->clk; } -static void clk_core_get_boundaries(struct clk_core *clk, +static void clk_core_get_boundaries(struct clk_core *core, unsigned long *min_rate, unsigned long *max_rate) { @@ -862,10 +503,10 @@ static void clk_core_get_boundaries(struct clk_core *clk, *min_rate = 0; *max_rate = ULONG_MAX; - hlist_for_each_entry(clk_user, &clk->clks, clks_node) + hlist_for_each_entry(clk_user, &core->clks, clks_node) *min_rate = max(*min_rate, clk_user->min_rate); - hlist_for_each_entry(clk_user, &clk->clks, clks_node) + hlist_for_each_entry(clk_user, &core->clks, clks_node) *max_rate = min(*max_rate, clk_user->max_rate); } @@ -901,26 +542,28 @@ EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); /*** clk api ***/ -static void clk_core_unprepare(struct clk_core *clk) +static void clk_core_unprepare(struct clk_core *core) { - if (!clk) + lockdep_assert_held(&prepare_lock); + + if (!core) return; - if (WARN_ON(clk->prepare_count == 0)) + if (WARN_ON(core->prepare_count == 0)) return; - if (--clk->prepare_count > 0) + if (--core->prepare_count > 0) return; - WARN_ON(clk->enable_count > 0); + WARN_ON(core->enable_count > 0); - trace_clk_unprepare(clk); + trace_clk_unprepare(core); - if (clk->ops->unprepare) - clk->ops->unprepare(clk->hw); + if (core->ops->unprepare) + core->ops->unprepare(core->hw); - trace_clk_unprepare_complete(clk); - clk_core_unprepare(clk->parent); + trace_clk_unprepare_complete(core); + clk_core_unprepare(core->parent); } /** @@ -945,32 +588,34 @@ void clk_unprepare(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_unprepare); -static int clk_core_prepare(struct clk_core *clk) +static int clk_core_prepare(struct clk_core *core) { int ret = 0; - if (!clk) + lockdep_assert_held(&prepare_lock); + + if (!core) return 0; - if (clk->prepare_count == 0) { - ret = clk_core_prepare(clk->parent); + if (core->prepare_count == 0) { + ret = clk_core_prepare(core->parent); if (ret) return ret; - trace_clk_prepare(clk); + trace_clk_prepare(core); - if (clk->ops->prepare) - ret = clk->ops->prepare(clk->hw); + if (core->ops->prepare) + ret = core->ops->prepare(core->hw); - trace_clk_prepare_complete(clk); + trace_clk_prepare_complete(core); if (ret) { - clk_core_unprepare(clk->parent); + clk_core_unprepare(core->parent); return ret; } } - clk->prepare_count++; + core->prepare_count++; return 0; } @@ -1002,33 +647,27 @@ int clk_prepare(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_prepare); -static void clk_core_disable(struct clk_core *clk) +static void clk_core_disable(struct clk_core *core) { - if (!clk) + lockdep_assert_held(&enable_lock); + + if (!core) return; - if (WARN_ON(clk->enable_count == 0)) + if (WARN_ON(core->enable_count == 0)) return; - if (--clk->enable_count > 0) + if (--core->enable_count > 0) return; - trace_clk_disable(clk); + trace_clk_disable(core); - if (clk->ops->disable) - clk->ops->disable(clk->hw); + if (core->ops->disable) + core->ops->disable(core->hw); - trace_clk_disable_complete(clk); + trace_clk_disable_complete(core); - clk_core_disable(clk->parent); -} - -static void __clk_disable(struct clk *clk) -{ - if (!clk) - return; - - clk_core_disable(clk->core); + clk_core_disable(core->parent); } /** @@ -1051,52 +690,46 @@ void clk_disable(struct clk *clk) return; flags = clk_enable_lock(); - __clk_disable(clk); + clk_core_disable(clk->core); clk_enable_unlock(flags); } EXPORT_SYMBOL_GPL(clk_disable); -static int clk_core_enable(struct clk_core *clk) +static int clk_core_enable(struct clk_core *core) { int ret = 0; - if (!clk) + lockdep_assert_held(&enable_lock); + + if (!core) return 0; - if (WARN_ON(clk->prepare_count == 0)) + if (WARN_ON(core->prepare_count == 0)) return -ESHUTDOWN; - if (clk->enable_count == 0) { - ret = clk_core_enable(clk->parent); + if (core->enable_count == 0) { + ret = clk_core_enable(core->parent); if (ret) return ret; - trace_clk_enable(clk); + trace_clk_enable(core); - if (clk->ops->enable) - ret = clk->ops->enable(clk->hw); + if (core->ops->enable) + ret = core->ops->enable(core->hw); - trace_clk_enable_complete(clk); + trace_clk_enable_complete(core); if (ret) { - clk_core_disable(clk->parent); + clk_core_disable(core->parent); return ret; } } - clk->enable_count++; + core->enable_count++; return 0; } -static int __clk_enable(struct clk *clk) -{ - if (!clk) - return 0; - - return clk_core_enable(clk->core); -} - /** * clk_enable - ungate a clock * @clk: the clk being ungated @@ -1115,15 +748,18 @@ int clk_enable(struct clk *clk) unsigned long flags; int ret; + if (!clk) + return 0; + flags = clk_enable_lock(); - ret = __clk_enable(clk); + ret = clk_core_enable(clk->core); clk_enable_unlock(flags); return ret; } EXPORT_SYMBOL_GPL(clk_enable); -static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, +static unsigned long clk_core_round_rate_nolock(struct clk_core *core, unsigned long rate, unsigned long min_rate, unsigned long max_rate) @@ -1134,25 +770,25 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, lockdep_assert_held(&prepare_lock); - if (!clk) + if (!core) return 0; - parent = clk->parent; + parent = core->parent; if (parent) parent_rate = parent->rate; - if (clk->ops->determine_rate) { + if (core->ops->determine_rate) { parent_hw = parent ? parent->hw : NULL; - return clk->ops->determine_rate(clk->hw, rate, + return core->ops->determine_rate(core->hw, rate, min_rate, max_rate, &parent_rate, &parent_hw); - } else if (clk->ops->round_rate) - return clk->ops->round_rate(clk->hw, rate, &parent_rate); - else if (clk->flags & CLK_SET_RATE_PARENT) - return clk_core_round_rate_nolock(clk->parent, rate, min_rate, + } else if (core->ops->round_rate) + return core->ops->round_rate(core->hw, rate, &parent_rate); + else if (core->flags & CLK_SET_RATE_PARENT) + return clk_core_round_rate_nolock(core->parent, rate, min_rate, max_rate); else - return clk->rate; + return core->rate; } /** @@ -1162,8 +798,7 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, * @min_rate: returned rate must be greater than this rate * @max_rate: returned rate must be less than this rate * - * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and - * .determine_rate. + * Useful for clk_ops such as .set_rate and .determine_rate. */ unsigned long __clk_determine_rate(struct clk_hw *hw, unsigned long rate, @@ -1182,7 +817,7 @@ EXPORT_SYMBOL_GPL(__clk_determine_rate); * @clk: round the rate of this clock * @rate: the rate which is to be rounded * - * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate + * Useful for clk_ops such as .set_rate */ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) { @@ -1224,7 +859,7 @@ EXPORT_SYMBOL_GPL(clk_round_rate); /** * __clk_notify - call clk notifier chain - * @clk: struct clk * that is changing rate + * @core: clk that is changing rate * @msg: clk notifier type (see include/linux/clk.h) * @old_rate: old clk rate * @new_rate: new clk rate @@ -1236,7 +871,7 @@ EXPORT_SYMBOL_GPL(clk_round_rate); * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if * a driver returns that. */ -static int __clk_notify(struct clk_core *clk, unsigned long msg, +static int __clk_notify(struct clk_core *core, unsigned long msg, unsigned long old_rate, unsigned long new_rate) { struct clk_notifier *cn; @@ -1247,7 +882,7 @@ static int __clk_notify(struct clk_core *clk, unsigned long msg, cnd.new_rate = new_rate; list_for_each_entry(cn, &clk_notifier_list, node) { - if (cn->clk->core == clk) { + if (cn->clk->core == core) { cnd.clk = cn->clk; ret = srcu_notifier_call_chain(&cn->notifier_head, msg, &cnd); @@ -1259,44 +894,42 @@ static int __clk_notify(struct clk_core *clk, unsigned long msg, /** * __clk_recalc_accuracies - * @clk: first clk in the subtree + * @core: first clk in the subtree * * Walks the subtree of clks starting with clk and recalculates accuracies as * it goes. Note that if a clk does not implement the .recalc_accuracy - * callback then it is assumed that the clock will take on the accuracy of it's + * callback then it is assumed that the clock will take on the accuracy of its * parent. - * - * Caller must hold prepare_lock. */ -static void __clk_recalc_accuracies(struct clk_core *clk) +static void __clk_recalc_accuracies(struct clk_core *core) { unsigned long parent_accuracy = 0; struct clk_core *child; lockdep_assert_held(&prepare_lock); - if (clk->parent) - parent_accuracy = clk->parent->accuracy; + if (core->parent) + parent_accuracy = core->parent->accuracy; - if (clk->ops->recalc_accuracy) - clk->accuracy = clk->ops->recalc_accuracy(clk->hw, + if (core->ops->recalc_accuracy) + core->accuracy = core->ops->recalc_accuracy(core->hw, parent_accuracy); else - clk->accuracy = parent_accuracy; + core->accuracy = parent_accuracy; - hlist_for_each_entry(child, &clk->children, child_node) + hlist_for_each_entry(child, &core->children, child_node) __clk_recalc_accuracies(child); } -static long clk_core_get_accuracy(struct clk_core *clk) +static long clk_core_get_accuracy(struct clk_core *core) { unsigned long accuracy; clk_prepare_lock(); - if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE)) - __clk_recalc_accuracies(clk); + if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) + __clk_recalc_accuracies(core); - accuracy = __clk_get_accuracy(clk); + accuracy = __clk_get_accuracy(core); clk_prepare_unlock(); return accuracy; @@ -1320,17 +953,17 @@ long clk_get_accuracy(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_get_accuracy); -static unsigned long clk_recalc(struct clk_core *clk, +static unsigned long clk_recalc(struct clk_core *core, unsigned long parent_rate) { - if (clk->ops->recalc_rate) - return clk->ops->recalc_rate(clk->hw, parent_rate); + if (core->ops->recalc_rate) + return core->ops->recalc_rate(core->hw, parent_rate); return parent_rate; } /** * __clk_recalc_rates - * @clk: first clk in the subtree + * @core: first clk in the subtree * @msg: notification type (see include/linux/clk.h) * * Walks the subtree of clks starting with clk and recalculates rates as it @@ -1339,10 +972,8 @@ static unsigned long clk_recalc(struct clk_core *clk, * * clk_recalc_rates also propagates the POST_RATE_CHANGE notification, * if necessary. - * - * Caller must hold prepare_lock. */ -static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg) +static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) { unsigned long old_rate; unsigned long parent_rate = 0; @@ -1350,34 +981,34 @@ static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg) lockdep_assert_held(&prepare_lock); - old_rate = clk->rate; + old_rate = core->rate; - if (clk->parent) - parent_rate = clk->parent->rate; + if (core->parent) + parent_rate = core->parent->rate; - clk->rate = clk_recalc(clk, parent_rate); + core->rate = clk_recalc(core, parent_rate); /* * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE * & ABORT_RATE_CHANGE notifiers */ - if (clk->notifier_count && msg) - __clk_notify(clk, msg, old_rate, clk->rate); + if (core->notifier_count && msg) + __clk_notify(core, msg, old_rate, core->rate); - hlist_for_each_entry(child, &clk->children, child_node) + hlist_for_each_entry(child, &core->children, child_node) __clk_recalc_rates(child, msg); } -static unsigned long clk_core_get_rate(struct clk_core *clk) +static unsigned long clk_core_get_rate(struct clk_core *core) { unsigned long rate; clk_prepare_lock(); - if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) - __clk_recalc_rates(clk, 0); + if (core && (core->flags & CLK_GET_RATE_NOCACHE)) + __clk_recalc_rates(core, 0); - rate = clk_core_get_rate_nolock(clk); + rate = clk_core_get_rate_nolock(core); clk_prepare_unlock(); return rate; @@ -1400,15 +1031,15 @@ unsigned long clk_get_rate(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_get_rate); -static int clk_fetch_parent_index(struct clk_core *clk, +static int clk_fetch_parent_index(struct clk_core *core, struct clk_core *parent) { int i; - if (!clk->parents) { - clk->parents = kcalloc(clk->num_parents, + if (!core->parents) { + core->parents = kcalloc(core->num_parents, sizeof(struct clk *), GFP_KERNEL); - if (!clk->parents) + if (!core->parents) return -ENOMEM; } @@ -1417,15 +1048,15 @@ static int clk_fetch_parent_index(struct clk_core *clk, * or if not yet cached, use string name comparison and cache * them now to avoid future calls to clk_core_lookup. */ - for (i = 0; i < clk->num_parents; i++) { - if (clk->parents[i] == parent) + for (i = 0; i < core->num_parents; i++) { + if (core->parents[i] == parent) return i; - if (clk->parents[i]) + if (core->parents[i]) continue; - if (!strcmp(clk->parent_names[i], parent->name)) { - clk->parents[i] = clk_core_lookup(parent->name); + if (!strcmp(core->parent_names[i], parent->name)) { + core->parents[i] = clk_core_lookup(parent->name); return i; } } @@ -1433,28 +1064,28 @@ static int clk_fetch_parent_index(struct clk_core *clk, return -EINVAL; } -static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent) +static void clk_reparent(struct clk_core *core, struct clk_core *new_parent) { - hlist_del(&clk->child_node); + hlist_del(&core->child_node); if (new_parent) { /* avoid duplicate POST_RATE_CHANGE notifications */ - if (new_parent->new_child == clk) + if (new_parent->new_child == core) new_parent->new_child = NULL; - hlist_add_head(&clk->child_node, &new_parent->children); + hlist_add_head(&core->child_node, &new_parent->children); } else { - hlist_add_head(&clk->child_node, &clk_orphan_list); + hlist_add_head(&core->child_node, &clk_orphan_list); } - clk->parent = new_parent; + core->parent = new_parent; } -static struct clk_core *__clk_set_parent_before(struct clk_core *clk, +static struct clk_core *__clk_set_parent_before(struct clk_core *core, struct clk_core *parent) { unsigned long flags; - struct clk_core *old_parent = clk->parent; + struct clk_core *old_parent = core->parent; /* * Migrate prepare state between parents and prevent race with @@ -1473,17 +1104,17 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk, * * See also: Comment for clk_set_parent() below. */ - if (clk->prepare_count) { + if (core->prepare_count) { clk_core_prepare(parent); flags = clk_enable_lock(); clk_core_enable(parent); - clk_core_enable(clk); + clk_core_enable(core); clk_enable_unlock(flags); } /* update the clk tree topology */ flags = clk_enable_lock(); - clk_reparent(clk, parent); + clk_reparent(core, parent); clk_enable_unlock(flags); return old_parent; @@ -1508,31 +1139,31 @@ static void __clk_set_parent_after(struct clk_core *core, } } -static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent, +static int __clk_set_parent(struct clk_core *core, struct clk_core *parent, u8 p_index) { unsigned long flags; int ret = 0; struct clk_core *old_parent; - old_parent = __clk_set_parent_before(clk, parent); + old_parent = __clk_set_parent_before(core, parent); - trace_clk_set_parent(clk, parent); + trace_clk_set_parent(core, parent); /* change clock input source */ - if (parent && clk->ops->set_parent) - ret = clk->ops->set_parent(clk->hw, p_index); + if (parent && core->ops->set_parent) + ret = core->ops->set_parent(core->hw, p_index); - trace_clk_set_parent_complete(clk, parent); + trace_clk_set_parent_complete(core, parent); if (ret) { flags = clk_enable_lock(); - clk_reparent(clk, old_parent); + clk_reparent(core, old_parent); clk_enable_unlock(flags); - if (clk->prepare_count) { + if (core->prepare_count) { flags = clk_enable_lock(); - clk_core_disable(clk); + clk_core_disable(core); clk_core_disable(parent); clk_enable_unlock(flags); clk_core_unprepare(parent); @@ -1540,14 +1171,14 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent, return ret; } - __clk_set_parent_after(clk, parent, old_parent); + __clk_set_parent_after(core, parent, old_parent); return 0; } /** * __clk_speculate_rates - * @clk: first clk in the subtree + * @core: first clk in the subtree * @parent_rate: the "future" rate of clk's parent * * Walks the subtree of clks starting with clk, speculating rates as it @@ -1558,10 +1189,8 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent, * subtree have subscribed to the notifications. Note that if a clk does not * implement the .recalc_rate callback then it is assumed that the clock will * take on the rate of its parent. - * - * Caller must hold prepare_lock. */ -static int __clk_speculate_rates(struct clk_core *clk, +static int __clk_speculate_rates(struct clk_core *core, unsigned long parent_rate) { struct clk_core *child; @@ -1570,19 +1199,19 @@ static int __clk_speculate_rates(struct clk_core *clk, lockdep_assert_held(&prepare_lock); - new_rate = clk_recalc(clk, parent_rate); + new_rate = clk_recalc(core, parent_rate); /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */ - if (clk->notifier_count) - ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate); + if (core->notifier_count) + ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate); if (ret & NOTIFY_STOP_MASK) { pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n", - __func__, clk->name, ret); + __func__, core->name, ret); goto out; } - hlist_for_each_entry(child, &clk->children, child_node) { + hlist_for_each_entry(child, &core->children, child_node) { ret = __clk_speculate_rates(child, new_rate); if (ret & NOTIFY_STOP_MASK) break; @@ -1592,20 +1221,20 @@ out: return ret; } -static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate, +static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate, struct clk_core *new_parent, u8 p_index) { struct clk_core *child; - clk->new_rate = new_rate; - clk->new_parent = new_parent; - clk->new_parent_index = p_index; + core->new_rate = new_rate; + core->new_parent = new_parent; + core->new_parent_index = p_index; /* include clk in new parent's PRE_RATE_CHANGE notifications */ - clk->new_child = NULL; - if (new_parent && new_parent != clk->parent) - new_parent->new_child = clk; + core->new_child = NULL; + if (new_parent && new_parent != core->parent) + new_parent->new_child = core; - hlist_for_each_entry(child, &clk->children, child_node) { + hlist_for_each_entry(child, &core->children, child_node) { child->new_rate = clk_recalc(child, new_rate); clk_calc_subtree(child, child->new_rate, NULL, 0); } @@ -1615,10 +1244,10 @@ static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate, * calculate the new rates returning the topmost clock that has to be * changed. */ -static struct clk_core *clk_calc_new_rates(struct clk_core *clk, +static struct clk_core *clk_calc_new_rates(struct clk_core *core, unsigned long rate) { - struct clk_core *top = clk; + struct clk_core *top = core; struct clk_core *old_parent, *parent; struct clk_hw *parent_hw; unsigned long best_parent_rate = 0; @@ -1629,20 +1258,20 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk, long ret; /* sanity */ - if (IS_ERR_OR_NULL(clk)) + if (IS_ERR_OR_NULL(core)) return NULL; /* save parent rate, if it exists */ - parent = old_parent = clk->parent; + parent = old_parent = core->parent; if (parent) best_parent_rate = parent->rate; - clk_core_get_boundaries(clk, &min_rate, &max_rate); + clk_core_get_boundaries(core, &min_rate, &max_rate); /* find the closest rate and parent clk/rate */ - if (clk->ops->determine_rate) { + if (core->ops->determine_rate) { parent_hw = parent ? parent->hw : NULL; - ret = clk->ops->determine_rate(clk->hw, rate, + ret = core->ops->determine_rate(core->hw, rate, min_rate, max_rate, &best_parent_rate, @@ -1652,8 +1281,8 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk, new_rate = ret; parent = parent_hw ? parent_hw->core : NULL; - } else if (clk->ops->round_rate) { - ret = clk->ops->round_rate(clk->hw, rate, + } else if (core->ops->round_rate) { + ret = core->ops->round_rate(core->hw, rate, &best_parent_rate); if (ret < 0) return NULL; @@ -1661,9 +1290,9 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk, new_rate = ret; if (new_rate < min_rate || new_rate > max_rate) return NULL; - } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { + } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) { /* pass-through clock without adjustable parent */ - clk->new_rate = clk->rate; + core->new_rate = core->rate; return NULL; } else { /* pass-through clock with adjustable parent */ @@ -1674,28 +1303,28 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk, /* some clocks must be gated to change parent */ if (parent != old_parent && - (clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { + (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { pr_debug("%s: %s not gated but wants to reparent\n", - __func__, clk->name); + __func__, core->name); return NULL; } /* try finding the new parent index */ - if (parent && clk->num_parents > 1) { - p_index = clk_fetch_parent_index(clk, parent); + if (parent && core->num_parents > 1) { + p_index = clk_fetch_parent_index(core, parent); if (p_index < 0) { pr_debug("%s: clk %s can not be parent of clk %s\n", - __func__, parent->name, clk->name); + __func__, parent->name, core->name); return NULL; } } - if ((clk->flags & CLK_SET_RATE_PARENT) && parent && + if ((core->flags & CLK_SET_RATE_PARENT) && parent && best_parent_rate != parent->rate) top = clk_calc_new_rates(parent, best_parent_rate); out: - clk_calc_subtree(clk, new_rate, parent, p_index); + clk_calc_subtree(core, new_rate, parent, p_index); return top; } @@ -1705,33 +1334,33 @@ out: * so that in case of an error we can walk down the whole tree again and * abort the change. */ -static struct clk_core *clk_propagate_rate_change(struct clk_core *clk, +static struct clk_core *clk_propagate_rate_change(struct clk_core *core, unsigned long event) { struct clk_core *child, *tmp_clk, *fail_clk = NULL; int ret = NOTIFY_DONE; - if (clk->rate == clk->new_rate) + if (core->rate == core->new_rate) return NULL; - if (clk->notifier_count) { - ret = __clk_notify(clk, event, clk->rate, clk->new_rate); + if (core->notifier_count) { + ret = __clk_notify(core, event, core->rate, core->new_rate); if (ret & NOTIFY_STOP_MASK) - fail_clk = clk; + fail_clk = core; } - hlist_for_each_entry(child, &clk->children, child_node) { + hlist_for_each_entry(child, &core->children, child_node) { /* Skip children who will be reparented to another clock */ - if (child->new_parent && child->new_parent != clk) + if (child->new_parent && child->new_parent != core) continue; tmp_clk = clk_propagate_rate_change(child, event); if (tmp_clk) fail_clk = tmp_clk; } - /* handle the new child who might not be in clk->children yet */ - if (clk->new_child) { - tmp_clk = clk_propagate_rate_change(clk->new_child, event); + /* handle the new child who might not be in core->children yet */ + if (core->new_child) { + tmp_clk = clk_propagate_rate_change(core->new_child, event); if (tmp_clk) fail_clk = tmp_clk; } @@ -1743,7 +1372,7 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *clk, * walk down a subtree and set the new rates notifying the rate * change on the way */ -static void clk_change_rate(struct clk_core *clk) +static void clk_change_rate(struct clk_core *core) { struct clk_core *child; struct hlist_node *tmp; @@ -1752,77 +1381,80 @@ static void clk_change_rate(struct clk_core *clk) bool skip_set_rate = false; struct clk_core *old_parent; - old_rate = clk->rate; + old_rate = core->rate; - if (clk->new_parent) - best_parent_rate = clk->new_parent->rate; - else if (clk->parent) - best_parent_rate = clk->parent->rate; + if (core->new_parent) + best_parent_rate = core->new_parent->rate; + else if (core->parent) + best_parent_rate = core->parent->rate; - if (clk->new_parent && clk->new_parent != clk->parent) { - old_parent = __clk_set_parent_before(clk, clk->new_parent); - trace_clk_set_parent(clk, clk->new_parent); + if (core->new_parent && core->new_parent != core->parent) { + old_parent = __clk_set_parent_before(core, core->new_parent); + trace_clk_set_parent(core, core->new_parent); - if (clk->ops->set_rate_and_parent) { + if (core->ops->set_rate_and_parent) { skip_set_rate = true; - clk->ops->set_rate_and_parent(clk->hw, clk->new_rate, + core->ops->set_rate_and_parent(core->hw, core->new_rate, best_parent_rate, - clk->new_parent_index); - } else if (clk->ops->set_parent) { - clk->ops->set_parent(clk->hw, clk->new_parent_index); + core->new_parent_index); + } else if (core->ops->set_parent) { + core->ops->set_parent(core->hw, core->new_parent_index); } - trace_clk_set_parent_complete(clk, clk->new_parent); - __clk_set_parent_after(clk, clk->new_parent, old_parent); + trace_clk_set_parent_complete(core, core->new_parent); + __clk_set_parent_after(core, core->new_parent, old_parent); } - trace_clk_set_rate(clk, clk->new_rate); + trace_clk_set_rate(core, core->new_rate); - if (!skip_set_rate && clk->ops->set_rate) - clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate); + if (!skip_set_rate && core->ops->set_rate) + core->ops->set_rate(core->hw, core->new_rate, best_parent_rate); - trace_clk_set_rate_complete(clk, clk->new_rate); + trace_clk_set_rate_complete(core, core->new_rate); - clk->rate = clk_recalc(clk, best_parent_rate); + core->rate = clk_recalc(core, best_parent_rate); - if (clk->notifier_count && old_rate != clk->rate) - __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate); + if (core->notifier_count && old_rate != core->rate) + __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate); + + if (core->flags & CLK_RECALC_NEW_RATES) + (void)clk_calc_new_rates(core, core->new_rate); /* * Use safe iteration, as change_rate can actually swap parents * for certain clock types. */ - hlist_for_each_entry_safe(child, tmp, &clk->children, child_node) { + hlist_for_each_entry_safe(child, tmp, &core->children, child_node) { /* Skip children who will be reparented to another clock */ - if (child->new_parent && child->new_parent != clk) + if (child->new_parent && child->new_parent != core) continue; clk_change_rate(child); } - /* handle the new child who might not be in clk->children yet */ - if (clk->new_child) - clk_change_rate(clk->new_child); + /* handle the new child who might not be in core->children yet */ + if (core->new_child) + clk_change_rate(core->new_child); } -static int clk_core_set_rate_nolock(struct clk_core *clk, +static int clk_core_set_rate_nolock(struct clk_core *core, unsigned long req_rate) { struct clk_core *top, *fail_clk; unsigned long rate = req_rate; int ret = 0; - if (!clk) + if (!core) return 0; /* bail early if nothing to do */ - if (rate == clk_core_get_rate_nolock(clk)) + if (rate == clk_core_get_rate_nolock(core)) return 0; - if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) + if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count) return -EBUSY; /* calculate new rates and get the topmost changed clock */ - top = clk_calc_new_rates(clk, rate); + top = clk_calc_new_rates(core, rate); if (!top) return -EINVAL; @@ -1838,7 +1470,7 @@ static int clk_core_set_rate_nolock(struct clk_core *clk, /* change the rates */ clk_change_rate(top); - clk->req_rate = req_rate; + core->req_rate = req_rate; return ret; } @@ -1977,55 +1609,63 @@ EXPORT_SYMBOL_GPL(clk_get_parent); * .parents array exists, and if so use it to avoid an expensive tree * traversal. If .parents does not exist then walk the tree. */ -static struct clk_core *__clk_init_parent(struct clk_core *clk) +static struct clk_core *__clk_init_parent(struct clk_core *core) { struct clk_core *ret = NULL; u8 index; /* handle the trivial cases */ - if (!clk->num_parents) + if (!core->num_parents) goto out; - if (clk->num_parents == 1) { - if (IS_ERR_OR_NULL(clk->parent)) - clk->parent = clk_core_lookup(clk->parent_names[0]); - ret = clk->parent; + if (core->num_parents == 1) { + if (IS_ERR_OR_NULL(core->parent)) + core->parent = clk_core_lookup(core->parent_names[0]); + ret = core->parent; goto out; } - if (!clk->ops->get_parent) { - WARN(!clk->ops->get_parent, + if (!core->ops->get_parent) { + WARN(!core->ops->get_parent, "%s: multi-parent clocks must implement .get_parent\n", __func__); goto out; }; /* - * Do our best to cache parent clocks in clk->parents. This prevents - * unnecessary and expensive lookups. We don't set clk->parent here; + * Do our best to cache parent clocks in core->parents. This prevents + * unnecessary and expensive lookups. We don't set core->parent here; * that is done by the calling function. */ - index = clk->ops->get_parent(clk->hw); + index = core->ops->get_parent(core->hw); - if (!clk->parents) - clk->parents = - kcalloc(clk->num_parents, sizeof(struct clk *), + if (!core->parents) + core->parents = + kcalloc(core->num_parents, sizeof(struct clk *), GFP_KERNEL); - ret = clk_core_get_parent_by_index(clk, index); + ret = clk_core_get_parent_by_index(core, index); out: return ret; } -static void clk_core_reparent(struct clk_core *clk, +static void clk_core_reparent(struct clk_core *core, struct clk_core *new_parent) { - clk_reparent(clk, new_parent); - __clk_recalc_accuracies(clk); - __clk_recalc_rates(clk, POST_RATE_CHANGE); + clk_reparent(core, new_parent); + __clk_recalc_accuracies(core); + __clk_recalc_rates(core, POST_RATE_CHANGE); +} + +void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent) +{ + if (!hw) + return; + + clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core); } /** @@ -2062,61 +1702,61 @@ bool clk_has_parent(struct clk *clk, struct clk *parent) } EXPORT_SYMBOL_GPL(clk_has_parent); -static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent) +static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent) { int ret = 0; int p_index = 0; unsigned long p_rate = 0; - if (!clk) + if (!core) return 0; /* prevent racing with updates to the clock topology */ clk_prepare_lock(); - if (clk->parent == parent) + if (core->parent == parent) goto out; /* verify ops for for multi-parent clks */ - if ((clk->num_parents > 1) && (!clk->ops->set_parent)) { + if ((core->num_parents > 1) && (!core->ops->set_parent)) { ret = -ENOSYS; goto out; } /* check that we are allowed to re-parent if the clock is in use */ - if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count) { + if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) { ret = -EBUSY; goto out; } /* try finding the new parent index */ if (parent) { - p_index = clk_fetch_parent_index(clk, parent); + p_index = clk_fetch_parent_index(core, parent); p_rate = parent->rate; if (p_index < 0) { pr_debug("%s: clk %s can not be parent of clk %s\n", - __func__, parent->name, clk->name); + __func__, parent->name, core->name); ret = p_index; goto out; } } /* propagate PRE_RATE_CHANGE notifications */ - ret = __clk_speculate_rates(clk, p_rate); + ret = __clk_speculate_rates(core, p_rate); /* abort if a driver objects */ if (ret & NOTIFY_STOP_MASK) goto out; /* do the re-parent */ - ret = __clk_set_parent(clk, parent, p_index); + ret = __clk_set_parent(core, parent, p_index); /* propagate rate an accuracy recalculation accordingly */ if (ret) { - __clk_recalc_rates(clk, ABORT_RATE_CHANGE); + __clk_recalc_rates(core, ABORT_RATE_CHANGE); } else { - __clk_recalc_rates(clk, POST_RATE_CHANGE); - __clk_recalc_accuracies(clk); + __clk_recalc_rates(core, POST_RATE_CHANGE); + __clk_recalc_accuracies(core); } out: @@ -2201,21 +1841,16 @@ int clk_set_phase(struct clk *clk, int degrees) } EXPORT_SYMBOL_GPL(clk_set_phase); -static int clk_core_get_phase(struct clk_core *clk) +static int clk_core_get_phase(struct clk_core *core) { - int ret = 0; - - if (!clk) - goto out; + int ret; clk_prepare_lock(); - ret = clk->phase; + ret = core->phase; clk_prepare_unlock(); -out: return ret; } -EXPORT_SYMBOL_GPL(clk_get_phase); /** * clk_get_phase - return the phase shift of a clock signal @@ -2231,6 +1866,7 @@ int clk_get_phase(struct clk *clk) return clk_core_get_phase(clk->core); } +EXPORT_SYMBOL_GPL(clk_get_phase); /** * clk_is_match - check if two clk's point to the same hardware clock @@ -2258,6 +1894,337 @@ bool clk_is_match(const struct clk *p, const struct clk *q) } EXPORT_SYMBOL_GPL(clk_is_match); +/*** debugfs support ***/ + +#ifdef CONFIG_DEBUG_FS +#include <linux/debugfs.h> + +static struct dentry *rootdir; +static int inited = 0; +static DEFINE_MUTEX(clk_debug_lock); +static HLIST_HEAD(clk_debug_list); + +static struct hlist_head *all_lists[] = { + &clk_root_list, + &clk_orphan_list, + NULL, +}; + +static struct hlist_head *orphan_list[] = { + &clk_orphan_list, + NULL, +}; + +static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, + int level) +{ + if (!c) + return; + + seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", + level * 3 + 1, "", + 30 - level * 3, c->name, + c->enable_count, c->prepare_count, clk_core_get_rate(c), + clk_core_get_accuracy(c), clk_core_get_phase(c)); +} + +static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, + int level) +{ + struct clk_core *child; + + if (!c) + return; + + clk_summary_show_one(s, c, level); + + hlist_for_each_entry(child, &c->children, child_node) + clk_summary_show_subtree(s, child, level + 1); +} + +static int clk_summary_show(struct seq_file *s, void *data) +{ + struct clk_core *c; + struct hlist_head **lists = (struct hlist_head **)s->private; + + seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); + seq_puts(s, "----------------------------------------------------------------------------------------\n"); + + clk_prepare_lock(); + + for (; *lists; lists++) + hlist_for_each_entry(c, *lists, child_node) + clk_summary_show_subtree(s, c, 0); + + clk_prepare_unlock(); + + return 0; +} + + +static int clk_summary_open(struct inode *inode, struct file *file) +{ + return single_open(file, clk_summary_show, inode->i_private); +} + +static const struct file_operations clk_summary_fops = { + .open = clk_summary_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) +{ + if (!c) + return; + + /* This should be JSON format, i.e. elements separated with a comma */ + seq_printf(s, "\"%s\": { ", c->name); + seq_printf(s, "\"enable_count\": %d,", c->enable_count); + seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); + seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); + seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); + seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); +} + +static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) +{ + struct clk_core *child; + + if (!c) + return; + + clk_dump_one(s, c, level); + + hlist_for_each_entry(child, &c->children, child_node) { + seq_printf(s, ","); + clk_dump_subtree(s, child, level + 1); + } + + seq_printf(s, "}"); +} + +static int clk_dump(struct seq_file *s, void *data) +{ + struct clk_core *c; + bool first_node = true; + struct hlist_head **lists = (struct hlist_head **)s->private; + + seq_printf(s, "{"); + + clk_prepare_lock(); + + for (; *lists; lists++) { + hlist_for_each_entry(c, *lists, child_node) { + if (!first_node) + seq_puts(s, ","); + first_node = false; + clk_dump_subtree(s, c, 0); + } + } + + clk_prepare_unlock(); + + seq_puts(s, "}\n"); + return 0; +} + + +static int clk_dump_open(struct inode *inode, struct file *file) +{ + return single_open(file, clk_dump, inode->i_private); +} + +static const struct file_operations clk_dump_fops = { + .open = clk_dump_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) +{ + struct dentry *d; + int ret = -ENOMEM; + + if (!core || !pdentry) { + ret = -EINVAL; + goto out; + } + + d = debugfs_create_dir(core->name, pdentry); + if (!d) + goto out; + + core->dentry = d; + + d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry, + (u32 *)&core->rate); + if (!d) + goto err_out; + + d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry, + (u32 *)&core->accuracy); + if (!d) + goto err_out; + + d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry, + (u32 *)&core->phase); + if (!d) + goto err_out; + + d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry, + (u32 *)&core->flags); + if (!d) + goto err_out; + + d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry, + (u32 *)&core->prepare_count); + if (!d) + goto err_out; + + d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry, + (u32 *)&core->enable_count); + if (!d) + goto err_out; + + d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry, + (u32 *)&core->notifier_count); + if (!d) + goto err_out; + + if (core->ops->debug_init) { + ret = core->ops->debug_init(core->hw, core->dentry); + if (ret) + goto err_out; + } + + ret = 0; + goto out; + +err_out: + debugfs_remove_recursive(core->dentry); + core->dentry = NULL; +out: + return ret; +} + +/** + * clk_debug_register - add a clk node to the debugfs clk directory + * @core: the clk being added to the debugfs clk directory + * + * Dynamically adds a clk to the debugfs clk directory if debugfs has been + * initialized. Otherwise it bails out early since the debugfs clk directory + * will be created lazily by clk_debug_init as part of a late_initcall. + */ +static int clk_debug_register(struct clk_core *core) +{ + int ret = 0; + + mutex_lock(&clk_debug_lock); + hlist_add_head(&core->debug_node, &clk_debug_list); + + if (!inited) + goto unlock; + + ret = clk_debug_create_one(core, rootdir); +unlock: + mutex_unlock(&clk_debug_lock); + + return ret; +} + + /** + * clk_debug_unregister - remove a clk node from the debugfs clk directory + * @core: the clk being removed from the debugfs clk directory + * + * Dynamically removes a clk and all its child nodes from the + * debugfs clk directory if clk->dentry points to debugfs created by + * clk_debug_register in __clk_init. + */ +static void clk_debug_unregister(struct clk_core *core) +{ + mutex_lock(&clk_debug_lock); + hlist_del_init(&core->debug_node); + debugfs_remove_recursive(core->dentry); + core->dentry = NULL; + mutex_unlock(&clk_debug_lock); +} + +struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, + void *data, const struct file_operations *fops) +{ + struct dentry *d = NULL; + + if (hw->core->dentry) + d = debugfs_create_file(name, mode, hw->core->dentry, data, + fops); + + return d; +} +EXPORT_SYMBOL_GPL(clk_debugfs_add_file); + +/** + * clk_debug_init - lazily populate the debugfs clk directory + * + * clks are often initialized very early during boot before memory can be + * dynamically allocated and well before debugfs is setup. This function + * populates the debugfs clk directory once at boot-time when we know that + * debugfs is setup. It should only be called once at boot-time, all other clks + * added dynamically will be done so with clk_debug_register. + */ +static int __init clk_debug_init(void) +{ + struct clk_core *core; + struct dentry *d; + + rootdir = debugfs_create_dir("clk", NULL); + + if (!rootdir) + return -ENOMEM; + + d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists, + &clk_summary_fops); + if (!d) + return -ENOMEM; + + d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists, + &clk_dump_fops); + if (!d) + return -ENOMEM; + + d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir, + &orphan_list, &clk_summary_fops); + if (!d) + return -ENOMEM; + + d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir, + &orphan_list, &clk_dump_fops); + if (!d) + return -ENOMEM; + + mutex_lock(&clk_debug_lock); + hlist_for_each_entry(core, &clk_debug_list, debug_node) + clk_debug_create_one(core, rootdir); + + inited = 1; + mutex_unlock(&clk_debug_lock); + + return 0; +} +late_initcall(clk_debug_init); +#else +static inline int clk_debug_register(struct clk_core *core) { return 0; } +static inline void clk_debug_reparent(struct clk_core *core, + struct clk_core *new_parent) +{ +} +static inline void clk_debug_unregister(struct clk_core *core) +{ +} +#endif + /** * __clk_init - initialize the data structures in a struct clk * @dev: device initializing this clk, placeholder for now @@ -2271,67 +2238,67 @@ static int __clk_init(struct device *dev, struct clk *clk_user) int i, ret = 0; struct clk_core *orphan; struct hlist_node *tmp2; - struct clk_core *clk; + struct clk_core *core; unsigned long rate; if (!clk_user) return -EINVAL; - clk = clk_user->core; + core = clk_user->core; clk_prepare_lock(); /* check to see if a clock with this name is already registered */ - if (clk_core_lookup(clk->name)) { + if (clk_core_lookup(core->name)) { pr_debug("%s: clk %s already initialized\n", - __func__, clk->name); + __func__, core->name); ret = -EEXIST; goto out; } /* check that clk_ops are sane. See Documentation/clk.txt */ - if (clk->ops->set_rate && - !((clk->ops->round_rate || clk->ops->determine_rate) && - clk->ops->recalc_rate)) { + if (core->ops->set_rate && + !((core->ops->round_rate || core->ops->determine_rate) && + core->ops->recalc_rate)) { pr_warning("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n", - __func__, clk->name); + __func__, core->name); ret = -EINVAL; goto out; } - if (clk->ops->set_parent && !clk->ops->get_parent) { + if (core->ops->set_parent && !core->ops->get_parent) { pr_warning("%s: %s must implement .get_parent & .set_parent\n", - __func__, clk->name); + __func__, core->name); ret = -EINVAL; goto out; } - if (clk->ops->set_rate_and_parent && - !(clk->ops->set_parent && clk->ops->set_rate)) { + if (core->ops->set_rate_and_parent && + !(core->ops->set_parent && core->ops->set_rate)) { pr_warn("%s: %s must implement .set_parent & .set_rate\n", - __func__, clk->name); + __func__, core->name); ret = -EINVAL; goto out; } /* throw a WARN if any entries in parent_names are NULL */ - for (i = 0; i < clk->num_parents; i++) - WARN(!clk->parent_names[i], + for (i = 0; i < core->num_parents; i++) + WARN(!core->parent_names[i], "%s: invalid NULL in %s's .parent_names\n", - __func__, clk->name); + __func__, core->name); /* * Allocate an array of struct clk *'s to avoid unnecessary string * look-ups of clk's possible parents. This can fail for clocks passed - * in to clk_init during early boot; thus any access to clk->parents[] + * in to clk_init during early boot; thus any access to core->parents[] * must always check for a NULL pointer and try to populate it if * necessary. * - * If clk->parents is not NULL we skip this entire block. This allows - * for clock drivers to statically initialize clk->parents. + * If core->parents is not NULL we skip this entire block. This allows + * for clock drivers to statically initialize core->parents. */ - if (clk->num_parents > 1 && !clk->parents) { - clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), + if (core->num_parents > 1 && !core->parents) { + core->parents = kcalloc(core->num_parents, sizeof(struct clk *), GFP_KERNEL); /* * clk_core_lookup returns NULL for parents that have not been @@ -2339,16 +2306,16 @@ static int __clk_init(struct device *dev, struct clk *clk_user) * for a NULL pointer. We can always perform lazy lookups for * missing parents later on. */ - if (clk->parents) - for (i = 0; i < clk->num_parents; i++) - clk->parents[i] = - clk_core_lookup(clk->parent_names[i]); + if (core->parents) + for (i = 0; i < core->num_parents; i++) + core->parents[i] = + clk_core_lookup(core->parent_names[i]); } - clk->parent = __clk_init_parent(clk); + core->parent = __clk_init_parent(core); /* - * Populate clk->parent if parent has already been __clk_init'd. If + * Populate core->parent if parent has already been __clk_init'd. If * parent has not yet been __clk_init'd then place clk in the orphan * list. If clk has set the CLK_IS_ROOT flag then place it in the root * clk list. @@ -2357,13 +2324,13 @@ static int __clk_init(struct device *dev, struct clk *clk_user) * clocks and re-parent any that are children of the clock currently * being clk_init'd. */ - if (clk->parent) - hlist_add_head(&clk->child_node, - &clk->parent->children); - else if (clk->flags & CLK_IS_ROOT) - hlist_add_head(&clk->child_node, &clk_root_list); + if (core->parent) + hlist_add_head(&core->child_node, + &core->parent->children); + else if (core->flags & CLK_IS_ROOT) + hlist_add_head(&core->child_node, &clk_root_list); else - hlist_add_head(&clk->child_node, &clk_orphan_list); + hlist_add_head(&core->child_node, &clk_orphan_list); /* * Set clk's accuracy. The preferred method is to use @@ -2372,23 +2339,23 @@ static int __clk_init(struct device *dev, struct clk *clk_user) * parent (or is orphaned) then accuracy is set to zero (perfect * clock). */ - if (clk->ops->recalc_accuracy) - clk->accuracy = clk->ops->recalc_accuracy(clk->hw, - __clk_get_accuracy(clk->parent)); - else if (clk->parent) - clk->accuracy = clk->parent->accuracy; + if (core->ops->recalc_accuracy) + core->accuracy = core->ops->recalc_accuracy(core->hw, + __clk_get_accuracy(core->parent)); + else if (core->parent) + core->accuracy = core->parent->accuracy; else - clk->accuracy = 0; + core->accuracy = 0; /* * Set clk's phase. * Since a phase is by definition relative to its parent, just * query the current clock phase, or just assume it's in phase. */ - if (clk->ops->get_phase) - clk->phase = clk->ops->get_phase(clk->hw); + if (core->ops->get_phase) + core->phase = core->ops->get_phase(core->hw); else - clk->phase = 0; + core->phase = 0; /* * Set clk's rate. The preferred method is to use .recalc_rate. For @@ -2396,14 +2363,14 @@ static int __clk_init(struct device *dev, struct clk *clk_user) * parent's rate. If a clock doesn't have a parent (or is orphaned) * then rate is set to zero. */ - if (clk->ops->recalc_rate) - rate = clk->ops->recalc_rate(clk->hw, - clk_core_get_rate_nolock(clk->parent)); - else if (clk->parent) - rate = clk->parent->rate; + if (core->ops->recalc_rate) + rate = core->ops->recalc_rate(core->hw, + clk_core_get_rate_nolock(core->parent)); + else if (core->parent) + rate = core->parent->rate; else rate = 0; - clk->rate = clk->req_rate = rate; + core->rate = core->req_rate = rate; /* * walk the list of orphan clocks and reparent any that are children of @@ -2412,14 +2379,14 @@ static int __clk_init(struct device *dev, struct clk *clk_user) hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { if (orphan->num_parents && orphan->ops->get_parent) { i = orphan->ops->get_parent(orphan->hw); - if (!strcmp(clk->name, orphan->parent_names[i])) - clk_core_reparent(orphan, clk); + if (!strcmp(core->name, orphan->parent_names[i])) + clk_core_reparent(orphan, core); continue; } for (i = 0; i < orphan->num_parents; i++) - if (!strcmp(clk->name, orphan->parent_names[i])) { - clk_core_reparent(orphan, clk); + if (!strcmp(core->name, orphan->parent_names[i])) { + clk_core_reparent(orphan, core); break; } } @@ -2432,15 +2399,15 @@ static int __clk_init(struct device *dev, struct clk *clk_user) * Please consider other ways of solving initialization problems before * using this callback, as its use is discouraged. */ - if (clk->ops->init) - clk->ops->init(clk->hw); + if (core->ops->init) + core->ops->init(core->hw); - kref_init(&clk->ref); + kref_init(&core->ref); out: clk_prepare_unlock(); if (!ret) - clk_debug_register(clk); + clk_debug_register(core); return ret; } @@ -2486,63 +2453,58 @@ void __clk_free_clk(struct clk *clk) * * clk_register is the primary interface for populating the clock tree with new * clock nodes. It returns a pointer to the newly allocated struct clk which - * cannot be dereferenced by driver code but may be used in conjuction with the + * cannot be dereferenced by driver code but may be used in conjunction with the * rest of the clock API. In the event of an error clk_register will return an * error code; drivers must test for an error code after calling clk_register. */ struct clk *clk_register(struct device *dev, struct clk_hw *hw) { int i, ret; - struct clk_core *clk; + struct clk_core *core; - clk = kzalloc(sizeof(*clk), GFP_KERNEL); - if (!clk) { - pr_err("%s: could not allocate clk\n", __func__); + core = kzalloc(sizeof(*core), GFP_KERNEL); + if (!core) { ret = -ENOMEM; goto fail_out; } - clk->name = kstrdup_const(hw->init->name, GFP_KERNEL); - if (!clk->name) { - pr_err("%s: could not allocate clk->name\n", __func__); + core->name = kstrdup_const(hw->init->name, GFP_KERNEL); + if (!core->name) { ret = -ENOMEM; goto fail_name; } - clk->ops = hw->init->ops; + core->ops = hw->init->ops; if (dev && dev->driver) - clk->owner = dev->driver->owner; - clk->hw = hw; - clk->flags = hw->init->flags; - clk->num_parents = hw->init->num_parents; - hw->core = clk; + core->owner = dev->driver->owner; + core->hw = hw; + core->flags = hw->init->flags; + core->num_parents = hw->init->num_parents; + hw->core = core; /* allocate local copy in case parent_names is __initdata */ - clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), + core->parent_names = kcalloc(core->num_parents, sizeof(char *), GFP_KERNEL); - if (!clk->parent_names) { - pr_err("%s: could not allocate clk->parent_names\n", __func__); + if (!core->parent_names) { ret = -ENOMEM; goto fail_parent_names; } /* copy each string name in case parent_names is __initdata */ - for (i = 0; i < clk->num_parents; i++) { - clk->parent_names[i] = kstrdup_const(hw->init->parent_names[i], + for (i = 0; i < core->num_parents; i++) { + core->parent_names[i] = kstrdup_const(hw->init->parent_names[i], GFP_KERNEL); - if (!clk->parent_names[i]) { - pr_err("%s: could not copy parent_names\n", __func__); + if (!core->parent_names[i]) { ret = -ENOMEM; goto fail_parent_names_copy; } } - INIT_HLIST_HEAD(&clk->clks); + INIT_HLIST_HEAD(&core->clks); hw->clk = __clk_create_clk(hw, NULL, NULL); if (IS_ERR(hw->clk)) { - pr_err("%s: could not allocate per-user clk\n", __func__); ret = PTR_ERR(hw->clk); goto fail_parent_names_copy; } @@ -2556,35 +2518,32 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) fail_parent_names_copy: while (--i >= 0) - kfree_const(clk->parent_names[i]); - kfree(clk->parent_names); + kfree_const(core->parent_names[i]); + kfree(core->parent_names); fail_parent_names: - kfree_const(clk->name); + kfree_const(core->name); fail_name: - kfree(clk); + kfree(core); fail_out: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(clk_register); -/* - * Free memory allocated for a clock. - * Caller must hold prepare_lock. - */ +/* Free memory allocated for a clock. */ static void __clk_release(struct kref *ref) { - struct clk_core *clk = container_of(ref, struct clk_core, ref); - int i = clk->num_parents; + struct clk_core *core = container_of(ref, struct clk_core, ref); + int i = core->num_parents; lockdep_assert_held(&prepare_lock); - kfree(clk->parents); + kfree(core->parents); while (--i >= 0) - kfree_const(clk->parent_names[i]); + kfree_const(core->parent_names[i]); - kfree(clk->parent_names); - kfree_const(clk->name); - kfree(clk); + kfree(core->parent_names); + kfree_const(core->name); + kfree(core); } /* @@ -3068,6 +3027,27 @@ const char *of_clk_get_parent_name(struct device_node *np, int index) } EXPORT_SYMBOL_GPL(of_clk_get_parent_name); +/** + * of_clk_parent_fill() - Fill @parents with names of @np's parents and return + * number of parents + * @np: Device node pointer associated with clock provider + * @parents: pointer to char array that hold the parents' names + * @size: size of the @parents array + * + * Return: number of parents for the clock node. + */ +int of_clk_parent_fill(struct device_node *np, const char **parents, + unsigned int size) +{ + unsigned int i = 0; + + while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL) + i++; + + return i; +} +EXPORT_SYMBOL_GPL(of_clk_parent_fill); + struct clock_provider { of_clk_init_cb_t clk_init_cb; struct device_node *np; diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig new file mode 100644 index 000000000000..b4165ba75d9f --- /dev/null +++ b/drivers/clk/hisilicon/Kconfig @@ -0,0 +1,6 @@ +config COMMON_CLK_HI6220 + bool "Hi6220 Clock Driver" + depends on ARCH_HISI || COMPILE_TEST + default ARCH_HISI + help + Build the Hisilicon Hi6220 clock driver based on the common clock framework. diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile index 038c02f4d0e7..48f0116a032a 100644 --- a/drivers/clk/hisilicon/Makefile +++ b/drivers/clk/hisilicon/Makefile @@ -2,8 +2,9 @@ # Hisilicon Clock specific Makefile # -obj-y += clk.o clkgate-separated.o +obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o +obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c index 472dd2cb10b3..715d34a5ef9b 100644 --- a/drivers/clk/hisilicon/clk-hi3620.c +++ b/drivers/clk/hisilicon/clk-hi3620.c @@ -38,44 +38,44 @@ #include "clk.h" /* clock parent list */ -static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", }; -static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", }; -static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", }; -static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", }; -static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", }; -static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", }; -static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", }; -static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", }; -static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", }; -static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", }; -static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", }; -static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", }; -static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", }; -static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", }; -static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", }; -static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; -static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; -static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", }; +static const char *const timer0_mux_p[] __initconst = { "osc32k", "timerclk01", }; +static const char *const timer1_mux_p[] __initconst = { "osc32k", "timerclk01", }; +static const char *const timer2_mux_p[] __initconst = { "osc32k", "timerclk23", }; +static const char *const timer3_mux_p[] __initconst = { "osc32k", "timerclk23", }; +static const char *const timer4_mux_p[] __initconst = { "osc32k", "timerclk45", }; +static const char *const timer5_mux_p[] __initconst = { "osc32k", "timerclk45", }; +static const char *const timer6_mux_p[] __initconst = { "osc32k", "timerclk67", }; +static const char *const timer7_mux_p[] __initconst = { "osc32k", "timerclk67", }; +static const char *const timer8_mux_p[] __initconst = { "osc32k", "timerclk89", }; +static const char *const timer9_mux_p[] __initconst = { "osc32k", "timerclk89", }; +static const char *const uart0_mux_p[] __initconst = { "osc26m", "pclk", }; +static const char *const uart1_mux_p[] __initconst = { "osc26m", "pclk", }; +static const char *const uart2_mux_p[] __initconst = { "osc26m", "pclk", }; +static const char *const uart3_mux_p[] __initconst = { "osc26m", "pclk", }; +static const char *const uart4_mux_p[] __initconst = { "osc26m", "pclk", }; +static const char *const spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", }; +static const char *const spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", }; +static const char *const spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", }; /* share axi parent */ -static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", }; -static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", }; -static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", }; -static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", }; -static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4", +static const char *const saxi_mux_p[] __initconst = { "armpll3", "armpll2", }; +static const char *const pwm0_mux_p[] __initconst = { "osc32k", "osc26m", }; +static const char *const pwm1_mux_p[] __initconst = { "osc32k", "osc26m", }; +static const char *const sd_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const mmc1_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", }; +static const char *const g2d_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const venc_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const vdec_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const vpp_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const edc0_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const ldi0_mux_p[] __initconst = { "armpll2", "armpll4", "armpll3", "armpll5", }; -static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4", +static const char *const edc1_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const ldi1_mux_p[] __initconst = { "armpll2", "armpll4", "armpll3", "armpll5", }; -static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", }; -static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", }; -static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", }; +static const char *const rclk_hsic_p[] __initconst = { "armpll3", "armpll2", }; +static const char *const mmc2_mux_p[] __initconst = { "armpll2", "armpll3", }; +static const char *const mmc3_mux_p[] __initconst = { "armpll2", "armpll3", }; /* fixed rate clocks */ diff --git a/drivers/clk/hisilicon/clk-hi6220.c b/drivers/clk/hisilicon/clk-hi6220.c new file mode 100644 index 000000000000..4563343b6420 --- /dev/null +++ b/drivers/clk/hisilicon/clk-hi6220.c @@ -0,0 +1,284 @@ +/* + * Hisilicon Hi6220 clock driver + * + * Copyright (c) 2015 Hisilicon Limited. + * + * Author: Bintian Wang <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/hi6220-clock.h> + +#include "clk.h" + + +/* clocks in AO (always on) controller */ +static struct hisi_fixed_rate_clock hi6220_fixed_rate_clks[] __initdata = { + { HI6220_REF32K, "ref32k", NULL, CLK_IS_ROOT, 32764, }, + { HI6220_CLK_TCXO, "clk_tcxo", NULL, CLK_IS_ROOT, 19200000, }, + { HI6220_MMC1_PAD, "mmc1_pad", NULL, CLK_IS_ROOT, 100000000, }, + { HI6220_MMC2_PAD, "mmc2_pad", NULL, CLK_IS_ROOT, 100000000, }, + { HI6220_MMC0_PAD, "mmc0_pad", NULL, CLK_IS_ROOT, 200000000, }, + { HI6220_PLL_BBP, "bbppll0", NULL, CLK_IS_ROOT, 245760000, }, + { HI6220_PLL_GPU, "gpupll", NULL, CLK_IS_ROOT, 1000000000,}, + { HI6220_PLL1_DDR, "ddrpll1", NULL, CLK_IS_ROOT, 1066000000,}, + { HI6220_PLL_SYS, "syspll", NULL, CLK_IS_ROOT, 1200000000,}, + { HI6220_PLL_SYS_MEDIA, "media_syspll", NULL, CLK_IS_ROOT, 1200000000,}, + { HI6220_DDR_SRC, "ddr_sel_src", NULL, CLK_IS_ROOT, 1200000000,}, + { HI6220_PLL_MEDIA, "media_pll", NULL, CLK_IS_ROOT, 1440000000,}, + { HI6220_PLL_DDR, "ddrpll0", NULL, CLK_IS_ROOT, 1600000000,}, +}; + +static struct hisi_fixed_factor_clock hi6220_fixed_factor_clks[] __initdata = { + { HI6220_300M, "clk_300m", "syspll", 1, 4, 0, }, + { HI6220_150M, "clk_150m", "clk_300m", 1, 2, 0, }, + { HI6220_PICOPHY_SRC, "picophy_src", "clk_150m", 1, 4, 0, }, + { HI6220_MMC0_SRC_SEL, "mmc0srcsel", "mmc0_sel", 1, 8, 0, }, + { HI6220_MMC1_SRC_SEL, "mmc1srcsel", "mmc1_sel", 1, 8, 0, }, + { HI6220_MMC2_SRC_SEL, "mmc2srcsel", "mmc2_sel", 1, 8, 0, }, + { HI6220_VPU_CODEC, "vpucodec", "codec_jpeg_aclk", 1, 2, 0, }, + { HI6220_MMC0_SMP, "mmc0_sample", "mmc0_sel", 1, 8, 0, }, + { HI6220_MMC1_SMP, "mmc1_sample", "mmc1_sel", 1, 8, 0, }, + { HI6220_MMC2_SMP, "mmc2_sample", "mmc2_sel", 1, 8, 0, }, +}; + +static struct hisi_gate_clock hi6220_separated_gate_clks_ao[] __initdata = { + { HI6220_WDT0_PCLK, "wdt0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 12, 0, }, + { HI6220_WDT1_PCLK, "wdt1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 13, 0, }, + { HI6220_WDT2_PCLK, "wdt2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 14, 0, }, + { HI6220_TIMER0_PCLK, "timer0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 15, 0, }, + { HI6220_TIMER1_PCLK, "timer1_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 16, 0, }, + { HI6220_TIMER2_PCLK, "timer2_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 17, 0, }, + { HI6220_TIMER3_PCLK, "timer3_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 18, 0, }, + { HI6220_TIMER4_PCLK, "timer4_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 19, 0, }, + { HI6220_TIMER5_PCLK, "timer5_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 20, 0, }, + { HI6220_TIMER6_PCLK, "timer6_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 21, 0, }, + { HI6220_TIMER7_PCLK, "timer7_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 22, 0, }, + { HI6220_TIMER8_PCLK, "timer8_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 23, 0, }, + { HI6220_UART0_PCLK, "uart0_pclk", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x630, 24, 0, }, +}; + +static void __init hi6220_clk_ao_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data_ao; + + clk_data_ao = hisi_clk_init(np, HI6220_AO_NR_CLKS); + if (!clk_data_ao) + return; + + hisi_clk_register_fixed_rate(hi6220_fixed_rate_clks, + ARRAY_SIZE(hi6220_fixed_rate_clks), clk_data_ao); + + hisi_clk_register_fixed_factor(hi6220_fixed_factor_clks, + ARRAY_SIZE(hi6220_fixed_factor_clks), clk_data_ao); + + hisi_clk_register_gate_sep(hi6220_separated_gate_clks_ao, + ARRAY_SIZE(hi6220_separated_gate_clks_ao), clk_data_ao); +} +CLK_OF_DECLARE(hi6220_clk_ao, "hisilicon,hi6220-aoctrl", hi6220_clk_ao_init); + + +/* clocks in sysctrl */ +static const char *mmc0_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", }; +static const char *mmc0_mux1_p[] __initdata = { "mmc0_mux0", "pll_media_gate", }; +static const char *mmc0_src_p[] __initdata = { "mmc0srcsel", "mmc0_div", }; +static const char *mmc1_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", }; +static const char *mmc1_mux1_p[] __initdata = { "mmc1_mux0", "pll_media_gate", }; +static const char *mmc1_src_p[] __initdata = { "mmc1srcsel", "mmc1_div", }; +static const char *mmc2_mux0_p[] __initdata = { "pll_ddr_gate", "syspll", }; +static const char *mmc2_mux1_p[] __initdata = { "mmc2_mux0", "pll_media_gate", }; +static const char *mmc2_src_p[] __initdata = { "mmc2srcsel", "mmc2_div", }; +static const char *mmc0_sample_in[] __initdata = { "mmc0_sample", "mmc0_pad", }; +static const char *mmc1_sample_in[] __initdata = { "mmc1_sample", "mmc1_pad", }; +static const char *mmc2_sample_in[] __initdata = { "mmc2_sample", "mmc2_pad", }; +static const char *uart1_src[] __initdata = { "clk_tcxo", "clk_150m", }; +static const char *uart2_src[] __initdata = { "clk_tcxo", "clk_150m", }; +static const char *uart3_src[] __initdata = { "clk_tcxo", "clk_150m", }; +static const char *uart4_src[] __initdata = { "clk_tcxo", "clk_150m", }; +static const char *hifi_src[] __initdata = { "syspll", "pll_media_gate", }; + +static struct hisi_gate_clock hi6220_separated_gate_clks_sys[] __initdata = { + { HI6220_MMC0_CLK, "mmc0_clk", "mmc0_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0, 0, }, + { HI6220_MMC0_CIUCLK, "mmc0_ciuclk", "mmc0_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 0, 0, }, + { HI6220_MMC1_CLK, "mmc1_clk", "mmc1_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1, 0, }, + { HI6220_MMC1_CIUCLK, "mmc1_ciuclk", "mmc1_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 1, 0, }, + { HI6220_MMC2_CLK, "mmc2_clk", "mmc2_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2, 0, }, + { HI6220_MMC2_CIUCLK, "mmc2_ciuclk", "mmc2_smp_in", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 2, 0, }, + { HI6220_USBOTG_HCLK, "usbotg_hclk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 4, 0, }, + { HI6220_CLK_PICOPHY, "clk_picophy", "cs_dapb", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x200, 5, 0, }, + { HI6220_HIFI, "hifi_clk", "hifi_div", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x210, 0, 0, }, + { HI6220_DACODEC_PCLK, "dacodec_pclk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x210, 5, 0, }, + { HI6220_EDMAC_ACLK, "edmac_aclk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x220, 2, 0, }, + { HI6220_CS_ATB, "cs_atb", "cs_atb_div", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 0, 0, }, + { HI6220_I2C0_CLK, "i2c0_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 1, 0, }, + { HI6220_I2C1_CLK, "i2c1_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 2, 0, }, + { HI6220_I2C2_CLK, "i2c2_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 3, 0, }, + { HI6220_I2C3_CLK, "i2c3_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 4, 0, }, + { HI6220_UART1_PCLK, "uart1_pclk", "uart1_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 5, 0, }, + { HI6220_UART2_PCLK, "uart2_pclk", "uart2_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 6, 0, }, + { HI6220_UART3_PCLK, "uart3_pclk", "uart3_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 7, 0, }, + { HI6220_UART4_PCLK, "uart4_pclk", "uart4_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 8, 0, }, + { HI6220_SPI_CLK, "spi_clk", "clk_150m", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 9, 0, }, + { HI6220_TSENSOR_CLK, "tsensor_clk", "clk_bus", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x230, 12, 0, }, + { HI6220_MMU_CLK, "mmu_clk", "ddrc_axi1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x240, 11, 0, }, + { HI6220_HIFI_SEL, "hifi_sel", "hifi_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 0, 0, }, + { HI6220_MMC0_SYSPLL, "mmc0_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 1, 0, }, + { HI6220_MMC1_SYSPLL, "mmc1_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 2, 0, }, + { HI6220_MMC2_SYSPLL, "mmc2_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 3, 0, }, + { HI6220_MMC0_SEL, "mmc0_sel", "mmc0_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 6, 0, }, + { HI6220_MMC1_SEL, "mmc1_sel", "mmc1_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 7, 0, }, + { HI6220_BBPPLL_SEL, "bbppll_sel", "pll0_bbp_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 9, 0, }, + { HI6220_MEDIA_PLL_SRC, "media_pll_src", "pll_media_gate", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 10, 0, }, + { HI6220_MMC2_SEL, "mmc2_sel", "mmc2_mux1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 11, 0, }, + { HI6220_CS_ATB_SYSPLL, "cs_atb_syspll", "syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x270, 12, 0, }, +}; + +static struct hisi_mux_clock hi6220_mux_clks_sys[] __initdata = { + { HI6220_MMC0_SRC, "mmc0_src", mmc0_src_p, ARRAY_SIZE(mmc0_src_p), CLK_SET_RATE_PARENT, 0x4, 0, 1, 0, }, + { HI6220_MMC0_SMP_IN, "mmc0_smp_in", mmc0_sample_in, ARRAY_SIZE(mmc0_sample_in), CLK_SET_RATE_PARENT, 0x4, 0, 1, 0, }, + { HI6220_MMC1_SRC, "mmc1_src", mmc1_src_p, ARRAY_SIZE(mmc1_src_p), CLK_SET_RATE_PARENT, 0x4, 2, 1, 0, }, + { HI6220_MMC1_SMP_IN, "mmc1_smp_in", mmc1_sample_in, ARRAY_SIZE(mmc1_sample_in), CLK_SET_RATE_PARENT, 0x4, 2, 1, 0, }, + { HI6220_MMC2_SRC, "mmc2_src", mmc2_src_p, ARRAY_SIZE(mmc2_src_p), CLK_SET_RATE_PARENT, 0x4, 4, 1, 0, }, + { HI6220_MMC2_SMP_IN, "mmc2_smp_in", mmc2_sample_in, ARRAY_SIZE(mmc2_sample_in), CLK_SET_RATE_PARENT, 0x4, 4, 1, 0, }, + { HI6220_HIFI_SRC, "hifi_src", hifi_src, ARRAY_SIZE(hifi_src), CLK_SET_RATE_PARENT, 0x400, 0, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_UART1_SRC, "uart1_src", uart1_src, ARRAY_SIZE(uart1_src), CLK_SET_RATE_PARENT, 0x400, 1, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_UART2_SRC, "uart2_src", uart2_src, ARRAY_SIZE(uart2_src), CLK_SET_RATE_PARENT, 0x400, 2, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_UART3_SRC, "uart3_src", uart3_src, ARRAY_SIZE(uart3_src), CLK_SET_RATE_PARENT, 0x400, 3, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_UART4_SRC, "uart4_src", uart4_src, ARRAY_SIZE(uart4_src), CLK_SET_RATE_PARENT, 0x400, 4, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_MMC0_MUX0, "mmc0_mux0", mmc0_mux0_p, ARRAY_SIZE(mmc0_mux0_p), CLK_SET_RATE_PARENT, 0x400, 5, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_MMC1_MUX0, "mmc1_mux0", mmc1_mux0_p, ARRAY_SIZE(mmc1_mux0_p), CLK_SET_RATE_PARENT, 0x400, 11, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_MMC2_MUX0, "mmc2_mux0", mmc2_mux0_p, ARRAY_SIZE(mmc2_mux0_p), CLK_SET_RATE_PARENT, 0x400, 12, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_MMC0_MUX1, "mmc0_mux1", mmc0_mux1_p, ARRAY_SIZE(mmc0_mux1_p), CLK_SET_RATE_PARENT, 0x400, 13, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_MMC1_MUX1, "mmc1_mux1", mmc1_mux1_p, ARRAY_SIZE(mmc1_mux1_p), CLK_SET_RATE_PARENT, 0x400, 14, 1, CLK_MUX_HIWORD_MASK,}, + { HI6220_MMC2_MUX1, "mmc2_mux1", mmc2_mux1_p, ARRAY_SIZE(mmc2_mux1_p), CLK_SET_RATE_PARENT, 0x400, 15, 1, CLK_MUX_HIWORD_MASK,}, +}; + +static struct hi6220_divider_clock hi6220_div_clks_sys[] __initdata = { + { HI6220_CLK_BUS, "clk_bus", "clk_300m", CLK_SET_RATE_PARENT, 0x490, 0, 4, 7, }, + { HI6220_MMC0_DIV, "mmc0_div", "mmc0_syspll", CLK_SET_RATE_PARENT, 0x494, 0, 6, 7, }, + { HI6220_MMC1_DIV, "mmc1_div", "mmc1_syspll", CLK_SET_RATE_PARENT, 0x498, 0, 6, 7, }, + { HI6220_MMC2_DIV, "mmc2_div", "mmc2_syspll", CLK_SET_RATE_PARENT, 0x49c, 0, 6, 7, }, + { HI6220_HIFI_DIV, "hifi_div", "hifi_sel", CLK_SET_RATE_PARENT, 0x4a0, 0, 4, 7, }, + { HI6220_BBPPLL0_DIV, "bbppll0_div", "bbppll_sel", CLK_SET_RATE_PARENT, 0x4a0, 8, 6, 15,}, + { HI6220_CS_DAPB, "cs_dapb", "picophy_src", CLK_SET_RATE_PARENT, 0x4a0, 24, 2, 31,}, + { HI6220_CS_ATB_DIV, "cs_atb_div", "cs_atb_syspll", CLK_SET_RATE_PARENT, 0x4a4, 0, 4, 7, }, +}; + +static void __init hi6220_clk_sys_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + + clk_data = hisi_clk_init(np, HI6220_SYS_NR_CLKS); + if (!clk_data) + return; + + hisi_clk_register_gate_sep(hi6220_separated_gate_clks_sys, + ARRAY_SIZE(hi6220_separated_gate_clks_sys), clk_data); + + hisi_clk_register_mux(hi6220_mux_clks_sys, + ARRAY_SIZE(hi6220_mux_clks_sys), clk_data); + + hi6220_clk_register_divider(hi6220_div_clks_sys, + ARRAY_SIZE(hi6220_div_clks_sys), clk_data); +} +CLK_OF_DECLARE(hi6220_clk_sys, "hisilicon,hi6220-sysctrl", hi6220_clk_sys_init); + + +/* clocks in media controller */ +static const char *clk_1000_1200_src[] __initdata = { "pll_gpu_gate", "media_syspll_src", }; +static const char *clk_1440_1200_src[] __initdata = { "media_syspll_src", "media_pll_src", }; +static const char *clk_1000_1440_src[] __initdata = { "pll_gpu_gate", "media_pll_src", }; + +static struct hisi_gate_clock hi6220_separated_gate_clks_media[] __initdata = { + { HI6220_DSI_PCLK, "dsi_pclk", "vpucodec", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 0, 0, }, + { HI6220_G3D_PCLK, "g3d_pclk", "vpucodec", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 1, 0, }, + { HI6220_ACLK_CODEC_VPU, "aclk_codec_vpu", "ade_core_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 3, 0, }, + { HI6220_ISP_SCLK, "isp_sclk", "isp_sclk_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 5, 0, }, + { HI6220_ADE_CORE, "ade_core", "ade_core_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 6, 0, }, + { HI6220_MED_MMU, "media_mmu", "mmu_clk", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 8, 0, }, + { HI6220_CFG_CSI4PHY, "cfg_csi4phy", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 9, 0, }, + { HI6220_CFG_CSI2PHY, "cfg_csi2phy", "clk_tcxo", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 10, 0, }, + { HI6220_ISP_SCLK_GATE, "isp_sclk_gate", "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 11, 0, }, + { HI6220_ISP_SCLK_GATE1, "isp_sclk_gate1", "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 12, 0, }, + { HI6220_ADE_CORE_GATE, "ade_core_gate", "media_pll_src", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 14, 0, }, + { HI6220_CODEC_VPU_GATE, "codec_vpu_gate", "clk_1000_1440", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 15, 0, }, + { HI6220_MED_SYSPLL, "media_syspll_src", "media_syspll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x520, 17, 0, }, +}; + +static struct hisi_mux_clock hi6220_mux_clks_media[] __initdata = { + { HI6220_1440_1200, "clk_1440_1200", clk_1440_1200_src, ARRAY_SIZE(clk_1440_1200_src), CLK_SET_RATE_PARENT, 0x51c, 0, 1, 0, }, + { HI6220_1000_1200, "clk_1000_1200", clk_1000_1200_src, ARRAY_SIZE(clk_1000_1200_src), CLK_SET_RATE_PARENT, 0x51c, 1, 1, 0, }, + { HI6220_1000_1440, "clk_1000_1440", clk_1000_1440_src, ARRAY_SIZE(clk_1000_1440_src), CLK_SET_RATE_PARENT, 0x51c, 6, 1, 0, }, +}; + +static struct hi6220_divider_clock hi6220_div_clks_media[] __initdata = { + { HI6220_CODEC_JPEG, "codec_jpeg_aclk", "media_pll_src", CLK_SET_RATE_PARENT, 0xcbc, 0, 4, 23, }, + { HI6220_ISP_SCLK_SRC, "isp_sclk_src", "isp_sclk_gate", CLK_SET_RATE_PARENT, 0xcbc, 8, 4, 15, }, + { HI6220_ISP_SCLK1, "isp_sclk1", "isp_sclk_gate1", CLK_SET_RATE_PARENT, 0xcbc, 24, 4, 31, }, + { HI6220_ADE_CORE_SRC, "ade_core_src", "ade_core_gate", CLK_SET_RATE_PARENT, 0xcc0, 16, 3, 23, }, + { HI6220_ADE_PIX_SRC, "ade_pix_src", "clk_1440_1200", CLK_SET_RATE_PARENT, 0xcc0, 24, 6, 31, }, + { HI6220_G3D_CLK, "g3d_clk", "clk_1000_1200", CLK_SET_RATE_PARENT, 0xcc4, 8, 4, 15, }, + { HI6220_CODEC_VPU_SRC, "codec_vpu_src", "codec_vpu_gate", CLK_SET_RATE_PARENT, 0xcc4, 24, 6, 31, }, +}; + +static void __init hi6220_clk_media_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + + clk_data = hisi_clk_init(np, HI6220_MEDIA_NR_CLKS); + if (!clk_data) + return; + + hisi_clk_register_gate_sep(hi6220_separated_gate_clks_media, + ARRAY_SIZE(hi6220_separated_gate_clks_media), clk_data); + + hisi_clk_register_mux(hi6220_mux_clks_media, + ARRAY_SIZE(hi6220_mux_clks_media), clk_data); + + hi6220_clk_register_divider(hi6220_div_clks_media, + ARRAY_SIZE(hi6220_div_clks_media), clk_data); +} +CLK_OF_DECLARE(hi6220_clk_media, "hisilicon,hi6220-mediactrl", hi6220_clk_media_init); + + +/* clocks in pmctrl */ +static struct hisi_gate_clock hi6220_gate_clks_power[] __initdata = { + { HI6220_PLL_GPU_GATE, "pll_gpu_gate", "gpupll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x8, 0, 0, }, + { HI6220_PLL1_DDR_GATE, "pll1_ddr_gate", "ddrpll1", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x10, 0, 0, }, + { HI6220_PLL_DDR_GATE, "pll_ddr_gate", "ddrpll0", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x18, 0, 0, }, + { HI6220_PLL_MEDIA_GATE, "pll_media_gate", "media_pll", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x38, 0, 0, }, + { HI6220_PLL0_BBP_GATE, "pll0_bbp_gate", "bbppll0", CLK_SET_RATE_PARENT|CLK_IGNORE_UNUSED, 0x48, 0, 0, }, +}; + +static struct hi6220_divider_clock hi6220_div_clks_power[] __initdata = { + { HI6220_DDRC_SRC, "ddrc_src", "ddr_sel_src", CLK_SET_RATE_PARENT, 0x5a8, 0, 4, 0, }, + { HI6220_DDRC_AXI1, "ddrc_axi1", "ddrc_src", CLK_SET_RATE_PARENT, 0x5a8, 8, 2, 0, }, +}; + +static void __init hi6220_clk_power_init(struct device_node *np) +{ + struct hisi_clock_data *clk_data; + + clk_data = hisi_clk_init(np, HI6220_POWER_NR_CLKS); + if (!clk_data) + return; + + hisi_clk_register_gate(hi6220_gate_clks_power, + ARRAY_SIZE(hi6220_gate_clks_power), clk_data); + + hi6220_clk_register_divider(hi6220_div_clks_power, + ARRAY_SIZE(hi6220_div_clks_power), clk_data); +} +CLK_OF_DECLARE(hi6220_clk_power, "hisilicon,hi6220-pmctrl", hi6220_clk_power_init); diff --git a/drivers/clk/hisilicon/clk-hix5hd2.c b/drivers/clk/hisilicon/clk-hix5hd2.c index f1d239435826..0aaf29da8491 100644 --- a/drivers/clk/hisilicon/clk-hix5hd2.c +++ b/drivers/clk/hisilicon/clk-hix5hd2.c @@ -46,15 +46,15 @@ static struct hisi_fixed_rate_clock hix5hd2_fixed_rate_clks[] __initdata = { { HIX5HD2_FIXED_83M, "83m", NULL, CLK_IS_ROOT, 83333333, }, }; -static const char *sfc_mux_p[] __initdata = { +static const char *const sfc_mux_p[] __initconst = { "24m", "150m", "200m", "100m", "75m", }; static u32 sfc_mux_table[] = {0, 4, 5, 6, 7}; -static const char *sdio_mux_p[] __initdata = { +static const char *const sdio_mux_p[] __initconst = { "75m", "100m", "50m", "15m", }; static u32 sdio_mux_table[] = {0, 1, 2, 3}; -static const char *fephy_mux_p[] __initdata = { "25m", "125m"}; +static const char *const fephy_mux_p[] __initconst = { "25m", "125m"}; static u32 fephy_mux_table[] = {0, 1}; @@ -252,8 +252,9 @@ static struct clk_ops clk_complex_ops = { .disable = clk_complex_disable, }; -void __init hix5hd2_clk_register_complex(struct hix5hd2_complex_clock *clks, - int nums, struct hisi_clock_data *data) +static void __init +hix5hd2_clk_register_complex(struct hix5hd2_complex_clock *clks, int nums, + struct hisi_clock_data *data) { void __iomem *base = data->base; int i; diff --git a/drivers/clk/hisilicon/clk.c b/drivers/clk/hisilicon/clk.c index a078e84f7b05..c90a89739b03 100644 --- a/drivers/clk/hisilicon/clk.c +++ b/drivers/clk/hisilicon/clk.c @@ -232,3 +232,32 @@ void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *clks, data->clk_data.clks[clks[i].id] = clk; } } + +void __init hi6220_clk_register_divider(struct hi6220_divider_clock *clks, + int nums, struct hisi_clock_data *data) +{ + struct clk *clk; + void __iomem *base = data->base; + int i; + + for (i = 0; i < nums; i++) { + clk = hi6220_register_clkdiv(NULL, clks[i].name, + clks[i].parent_name, + clks[i].flags, + base + clks[i].offset, + clks[i].shift, + clks[i].width, + clks[i].mask_bit, + &hisi_clk_lock); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock %s\n", + __func__, clks[i].name); + continue; + } + + if (clks[i].alias) + clk_register_clkdev(clk, clks[i].alias, NULL); + + data->clk_data.clks[clks[i].id] = clk; + } +} diff --git a/drivers/clk/hisilicon/clk.h b/drivers/clk/hisilicon/clk.h index 31083ffc0650..b56fbc1c5f27 100644 --- a/drivers/clk/hisilicon/clk.h +++ b/drivers/clk/hisilicon/clk.h @@ -55,7 +55,7 @@ struct hisi_fixed_factor_clock { struct hisi_mux_clock { unsigned int id; const char *name; - const char **parent_names; + const char *const *parent_names; u8 num_parents; unsigned long flags; unsigned long offset; @@ -79,6 +79,18 @@ struct hisi_divider_clock { const char *alias; }; +struct hi6220_divider_clock { + unsigned int id; + const char *name; + const char *parent_name; + unsigned long flags; + unsigned long offset; + u8 shift; + u8 width; + u32 mask_bit; + const char *alias; +}; + struct hisi_gate_clock { unsigned int id; const char *name; @@ -94,18 +106,23 @@ struct clk *hisi_register_clkgate_sep(struct device *, const char *, const char *, unsigned long, void __iomem *, u8, u8, spinlock_t *); +struct clk *hi6220_register_clkdiv(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, void __iomem *reg, + u8 shift, u8 width, u32 mask_bit, spinlock_t *lock); -struct hisi_clock_data __init *hisi_clk_init(struct device_node *, int); -void __init hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *, - int, struct hisi_clock_data *); -void __init hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *, - int, struct hisi_clock_data *); -void __init hisi_clk_register_mux(struct hisi_mux_clock *, int, +struct hisi_clock_data *hisi_clk_init(struct device_node *, int); +void hisi_clk_register_fixed_rate(struct hisi_fixed_rate_clock *, + int, struct hisi_clock_data *); +void hisi_clk_register_fixed_factor(struct hisi_fixed_factor_clock *, + int, struct hisi_clock_data *); +void hisi_clk_register_mux(struct hisi_mux_clock *, int, struct hisi_clock_data *); -void __init hisi_clk_register_divider(struct hisi_divider_clock *, +void hisi_clk_register_divider(struct hisi_divider_clock *, + int, struct hisi_clock_data *); +void hisi_clk_register_gate(struct hisi_gate_clock *, + int, struct hisi_clock_data *); +void hisi_clk_register_gate_sep(struct hisi_gate_clock *, + int, struct hisi_clock_data *); +void hi6220_clk_register_divider(struct hi6220_divider_clock *, int, struct hisi_clock_data *); -void __init hisi_clk_register_gate(struct hisi_gate_clock *, - int, struct hisi_clock_data *); -void __init hisi_clk_register_gate_sep(struct hisi_gate_clock *, - int, struct hisi_clock_data *); #endif /* __HISI_CLK_H */ diff --git a/drivers/clk/hisilicon/clkdivider-hi6220.c b/drivers/clk/hisilicon/clkdivider-hi6220.c new file mode 100644 index 000000000000..113eee8ed23a --- /dev/null +++ b/drivers/clk/hisilicon/clkdivider-hi6220.c @@ -0,0 +1,156 @@ +/* + * Hisilicon hi6220 SoC divider clock driver + * + * Copyright (c) 2015 Hisilicon Limited. + * + * Author: Bintian Wang <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/kernel.h> +#include <linux/clk-provider.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/err.h> +#include <linux/spinlock.h> + +#define div_mask(width) ((1 << (width)) - 1) + +/** + * struct hi6220_clk_divider - divider clock for hi6220 + * + * @hw: handle between common and hardware-specific interfaces + * @reg: register containing divider + * @shift: shift to the divider bit field + * @width: width of the divider bit field + * @mask: mask for setting divider rate + * @table: the div table that the divider supports + * @lock: register lock + */ +struct hi6220_clk_divider { + struct clk_hw hw; + void __iomem *reg; + u8 shift; + u8 width; + u32 mask; + const struct clk_div_table *table; + spinlock_t *lock; +}; + +#define to_hi6220_clk_divider(_hw) \ + container_of(_hw, struct hi6220_clk_divider, hw) + +static unsigned long hi6220_clkdiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned int val; + struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw); + + val = readl_relaxed(dclk->reg) >> dclk->shift; + val &= div_mask(dclk->width); + + return divider_recalc_rate(hw, parent_rate, val, dclk->table, + CLK_DIVIDER_ROUND_CLOSEST); +} + +static long hi6220_clkdiv_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw); + + return divider_round_rate(hw, rate, prate, dclk->table, + dclk->width, CLK_DIVIDER_ROUND_CLOSEST); +} + +static int hi6220_clkdiv_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + int value; + unsigned long flags = 0; + u32 data; + struct hi6220_clk_divider *dclk = to_hi6220_clk_divider(hw); + + value = divider_get_val(rate, parent_rate, dclk->table, + dclk->width, CLK_DIVIDER_ROUND_CLOSEST); + + if (dclk->lock) + spin_lock_irqsave(dclk->lock, flags); + + data = readl_relaxed(dclk->reg); + data &= ~(div_mask(dclk->width) << dclk->shift); + data |= value << dclk->shift; + data |= dclk->mask; + + writel_relaxed(data, dclk->reg); + + if (dclk->lock) + spin_unlock_irqrestore(dclk->lock, flags); + + return 0; +} + +static const struct clk_ops hi6220_clkdiv_ops = { + .recalc_rate = hi6220_clkdiv_recalc_rate, + .round_rate = hi6220_clkdiv_round_rate, + .set_rate = hi6220_clkdiv_set_rate, +}; + +struct clk *hi6220_register_clkdiv(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, void __iomem *reg, + u8 shift, u8 width, u32 mask_bit, spinlock_t *lock) +{ + struct hi6220_clk_divider *div; + struct clk *clk; + struct clk_init_data init; + struct clk_div_table *table; + u32 max_div, min_div; + int i; + + /* allocate the divider */ + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + /* Init the divider table */ + max_div = div_mask(width) + 1; + min_div = 1; + + table = kcalloc(max_div + 1, sizeof(*table), GFP_KERNEL); + if (!table) { + kfree(div); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < max_div; i++) { + table[i].div = min_div + i; + table[i].val = table[i].div - 1; + } + + init.name = name; + init.ops = &hi6220_clkdiv_ops; + init.flags = flags; + init.parent_names = parent_name ? &parent_name : NULL; + init.num_parents = parent_name ? 1 : 0; + + /* struct hi6220_clk_divider assignments */ + div->reg = reg; + div->shift = shift; + div->width = width; + div->mask = mask_bit ? BIT(mask_bit) : 0; + div->lock = lock; + div->hw.init = &init; + div->table = table; + + /* register the clock */ + clk = clk_register(dev, &div->hw); + if (IS_ERR(clk)) { + kfree(table); + kfree(div); + } + + return clk; +} diff --git a/drivers/clk/keystone/pll.c b/drivers/clk/keystone/pll.c index 0dd8a4b12747..4a375ead70e9 100644 --- a/drivers/clk/keystone/pll.c +++ b/drivers/clk/keystone/pll.c @@ -37,7 +37,8 @@ * Main PLL or any other PLLs in the device such as ARM PLL, DDR PLL * or PA PLL available on keystone2. These PLLs are controlled by * this register. Main PLL is controlled by a PLL controller. - * @pllm: PLL register map address + * @pllm: PLL register map address for multiplier bits + * @pllod: PLL register map address for post divider bits * @pll_ctl0: PLL controller map address * @pllm_lower_mask: multiplier lower mask * @pllm_upper_mask: multiplier upper mask @@ -53,6 +54,7 @@ struct clk_pll_data { u32 phy_pllm; u32 phy_pll_ctl0; void __iomem *pllm; + void __iomem *pllod; void __iomem *pll_ctl0; u32 pllm_lower_mask; u32 pllm_upper_mask; @@ -102,7 +104,11 @@ static unsigned long clk_pllclk_recalc(struct clk_hw *hw, /* read post divider from od bits*/ postdiv = ((val & pll_data->clkod_mask) >> pll_data->clkod_shift) + 1; - else + else if (pll_data->pllod) { + postdiv = readl(pll_data->pllod); + postdiv = ((postdiv & pll_data->clkod_mask) >> + pll_data->clkod_shift) + 1; + } else postdiv = pll_data->postdiv; rate /= (prediv + 1); @@ -172,12 +178,21 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl) /* assume the PLL has output divider register bits */ pll_data->clkod_mask = CLKOD_MASK; pll_data->clkod_shift = CLKOD_SHIFT; + + /* + * Check if there is an post-divider register. If not + * assume od bits are part of control register. + */ + i = of_property_match_string(node, "reg-names", + "post-divider"); + pll_data->pllod = of_iomap(node, i); } i = of_property_match_string(node, "reg-names", "control"); pll_data->pll_ctl0 = of_iomap(node, i); if (!pll_data->pll_ctl0) { pr_err("%s: ioremap failed\n", __func__); + iounmap(pll_data->pllod); goto out; } @@ -193,6 +208,7 @@ static void __init _of_pll_clk_init(struct device_node *node, bool pllctrl) pll_data->pllm = of_iomap(node, i); if (!pll_data->pllm) { iounmap(pll_data->pll_ctl0); + iounmap(pll_data->pllod); goto out; } } diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile new file mode 100644 index 000000000000..8e4b2a4635b9 --- /dev/null +++ b/drivers/clk/mediatek/Makefile @@ -0,0 +1,4 @@ +obj-y += clk-mtk.o clk-pll.o clk-gate.o +obj-$(CONFIG_RESET_CONTROLLER) += reset.o +obj-y += clk-mt8135.o +obj-y += clk-mt8173.o diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c new file mode 100644 index 000000000000..57020368a693 --- /dev/null +++ b/drivers/clk/mediatek/clk-gate.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of.h> +#include <linux/of_address.h> + +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/clkdev.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +static int mtk_cg_bit_is_cleared(struct clk_hw *hw) +{ + struct mtk_clk_gate *cg = to_clk_gate(hw); + u32 val; + + regmap_read(cg->regmap, cg->sta_ofs, &val); + + val &= BIT(cg->bit); + + return val == 0; +} + +static int mtk_cg_bit_is_set(struct clk_hw *hw) +{ + struct mtk_clk_gate *cg = to_clk_gate(hw); + u32 val; + + regmap_read(cg->regmap, cg->sta_ofs, &val); + + val &= BIT(cg->bit); + + return val != 0; +} + +static void mtk_cg_set_bit(struct clk_hw *hw) +{ + struct mtk_clk_gate *cg = to_clk_gate(hw); + + regmap_write(cg->regmap, cg->set_ofs, BIT(cg->bit)); +} + +static void mtk_cg_clr_bit(struct clk_hw *hw) +{ + struct mtk_clk_gate *cg = to_clk_gate(hw); + + regmap_write(cg->regmap, cg->clr_ofs, BIT(cg->bit)); +} + +static int mtk_cg_enable(struct clk_hw *hw) +{ + mtk_cg_clr_bit(hw); + + return 0; +} + +static void mtk_cg_disable(struct clk_hw *hw) +{ + mtk_cg_set_bit(hw); +} + +static int mtk_cg_enable_inv(struct clk_hw *hw) +{ + mtk_cg_set_bit(hw); + + return 0; +} + +static void mtk_cg_disable_inv(struct clk_hw *hw) +{ + mtk_cg_clr_bit(hw); +} + +const struct clk_ops mtk_clk_gate_ops_setclr = { + .is_enabled = mtk_cg_bit_is_cleared, + .enable = mtk_cg_enable, + .disable = mtk_cg_disable, +}; + +const struct clk_ops mtk_clk_gate_ops_setclr_inv = { + .is_enabled = mtk_cg_bit_is_set, + .enable = mtk_cg_enable_inv, + .disable = mtk_cg_disable_inv, +}; + +struct clk *mtk_clk_register_gate( + const char *name, + const char *parent_name, + struct regmap *regmap, + int set_ofs, + int clr_ofs, + int sta_ofs, + u8 bit, + const struct clk_ops *ops) +{ + struct mtk_clk_gate *cg; + struct clk *clk; + struct clk_init_data init = {}; + + cg = kzalloc(sizeof(*cg), GFP_KERNEL); + if (!cg) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = parent_name ? &parent_name : NULL; + init.num_parents = parent_name ? 1 : 0; + init.ops = ops; + + cg->regmap = regmap; + cg->set_ofs = set_ofs; + cg->clr_ofs = clr_ofs; + cg->sta_ofs = sta_ofs; + cg->bit = bit; + + cg->hw.init = &init; + + clk = clk_register(NULL, &cg->hw); + if (IS_ERR(clk)) + kfree(cg); + + return clk; +} diff --git a/drivers/clk/mediatek/clk-gate.h b/drivers/clk/mediatek/clk-gate.h new file mode 100644 index 000000000000..6b6780b1e9c5 --- /dev/null +++ b/drivers/clk/mediatek/clk-gate.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DRV_CLK_GATE_H +#define __DRV_CLK_GATE_H + +#include <linux/regmap.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> + +struct mtk_clk_gate { + struct clk_hw hw; + struct regmap *regmap; + int set_ofs; + int clr_ofs; + int sta_ofs; + u8 bit; +}; + +static inline struct mtk_clk_gate *to_clk_gate(struct clk_hw *hw) +{ + return container_of(hw, struct mtk_clk_gate, hw); +} + +extern const struct clk_ops mtk_clk_gate_ops_setclr; +extern const struct clk_ops mtk_clk_gate_ops_setclr_inv; + +struct clk *mtk_clk_register_gate( + const char *name, + const char *parent_name, + struct regmap *regmap, + int set_ofs, + int clr_ofs, + int sta_ofs, + u8 bit, + const struct clk_ops *ops); + +#endif /* __DRV_CLK_GATE_H */ diff --git a/drivers/clk/mediatek/clk-mt8135.c b/drivers/clk/mediatek/clk-mt8135.c new file mode 100644 index 000000000000..08b4b849b491 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8135.c @@ -0,0 +1,644 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/mfd/syscon.h> +#include <dt-bindings/clock/mt8135-clk.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +static DEFINE_SPINLOCK(mt8135_clk_lock); + +static const struct mtk_fixed_factor root_clk_alias[] __initconst = { + FACTOR(CLK_TOP_DSI0_LNTC_DSICLK, "dsi0_lntc_dsiclk", "clk_null", 1, 1), + FACTOR(CLK_TOP_HDMITX_CLKDIG_CTS, "hdmitx_clkdig_cts", "clk_null", 1, 1), + FACTOR(CLK_TOP_CLKPH_MCK, "clkph_mck", "clk_null", 1, 1), + FACTOR(CLK_TOP_CPUM_TCK_IN, "cpum_tck_in", "clk_null", 1, 1), +}; + +static const struct mtk_fixed_factor top_divs[] __initconst = { + FACTOR(CLK_TOP_MAINPLL_806M, "mainpll_806m", "mainpll", 1, 2), + FACTOR(CLK_TOP_MAINPLL_537P3M, "mainpll_537p3m", "mainpll", 1, 3), + FACTOR(CLK_TOP_MAINPLL_322P4M, "mainpll_322p4m", "mainpll", 1, 5), + FACTOR(CLK_TOP_MAINPLL_230P3M, "mainpll_230p3m", "mainpll", 1, 7), + + FACTOR(CLK_TOP_UNIVPLL_624M, "univpll_624m", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_416M, "univpll_416m", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIVPLL_249P6M, "univpll_249p6m", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIVPLL_178P3M, "univpll_178p3m", "univpll", 1, 7), + FACTOR(CLK_TOP_UNIVPLL_48M, "univpll_48m", "univpll", 1, 26), + + FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2), + FACTOR(CLK_TOP_MMPLL_D3, "mmpll_d3", "mmpll", 1, 3), + FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, 5), + FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, 7), + FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll_d2", 1, 2), + FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll_d3", 1, 2), + + FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "mainpll_806m", 1, 1), + FACTOR(CLK_TOP_SYSPLL_D4, "syspll_d4", "mainpll_806m", 1, 2), + FACTOR(CLK_TOP_SYSPLL_D6, "syspll_d6", "mainpll_806m", 1, 3), + FACTOR(CLK_TOP_SYSPLL_D8, "syspll_d8", "mainpll_806m", 1, 4), + FACTOR(CLK_TOP_SYSPLL_D10, "syspll_d10", "mainpll_806m", 1, 5), + FACTOR(CLK_TOP_SYSPLL_D12, "syspll_d12", "mainpll_806m", 1, 6), + FACTOR(CLK_TOP_SYSPLL_D16, "syspll_d16", "mainpll_806m", 1, 8), + FACTOR(CLK_TOP_SYSPLL_D24, "syspll_d24", "mainpll_806m", 1, 12), + + FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "mainpll_537p3m", 1, 1), + + FACTOR(CLK_TOP_SYSPLL_D2P5, "syspll_d2p5", "mainpll_322p4m", 2, 1), + FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "mainpll_322p4m", 1, 1), + + FACTOR(CLK_TOP_SYSPLL_D3P5, "syspll_d3p5", "mainpll_230p3m", 2, 1), + + FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univpll_624m", 1, 2), + FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univpll_624m", 1, 4), + FACTOR(CLK_TOP_UNIVPLL1_D6, "univpll1_d6", "univpll_624m", 1, 6), + FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univpll_624m", 1, 8), + FACTOR(CLK_TOP_UNIVPLL1_D10, "univpll1_d10", "univpll_624m", 1, 10), + + FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univpll_416m", 1, 2), + FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univpll_416m", 1, 4), + FACTOR(CLK_TOP_UNIVPLL2_D6, "univpll2_d6", "univpll_416m", 1, 6), + FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univpll_416m", 1, 8), + + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll_416m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll_249p6m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll_178p3m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL_D10, "univpll_d10", "univpll_249p6m", 1, 2), + FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univpll_48m", 1, 1), + + FACTOR(CLK_TOP_APLL, "apll_ck", "audpll", 1, 1), + FACTOR(CLK_TOP_APLL_D4, "apll_d4", "audpll", 1, 4), + FACTOR(CLK_TOP_APLL_D8, "apll_d8", "audpll", 1, 8), + FACTOR(CLK_TOP_APLL_D16, "apll_d16", "audpll", 1, 16), + FACTOR(CLK_TOP_APLL_D24, "apll_d24", "audpll", 1, 24), + + FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2), + FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4), + FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8), + + FACTOR(CLK_TOP_LVDSTX_CLKDIG_CT, "lvdstx_clkdig_cts", "lvdspll", 1, 1), + FACTOR(CLK_TOP_VPLL_DPIX, "vpll_dpix_ck", "lvdspll", 1, 1), + + FACTOR(CLK_TOP_TVHDMI_H, "tvhdmi_h_ck", "tvdpll", 1, 1), + + FACTOR(CLK_TOP_HDMITX_CLKDIG_D2, "hdmitx_clkdig_d2", "hdmitx_clkdig_cts", 1, 2), + FACTOR(CLK_TOP_HDMITX_CLKDIG_D3, "hdmitx_clkdig_d3", "hdmitx_clkdig_cts", 1, 3), + + FACTOR(CLK_TOP_TVHDMI_D2, "tvhdmi_d2", "tvhdmi_h_ck", 1, 2), + FACTOR(CLK_TOP_TVHDMI_D4, "tvhdmi_d4", "tvhdmi_h_ck", 1, 4), + + FACTOR(CLK_TOP_MEMPLL_MCK_D4, "mempll_mck_d4", "clkph_mck", 1, 4), +}; + +static const char * const axi_parents[] __initconst = { + "clk26m", + "syspll_d3", + "syspll_d4", + "syspll_d6", + "univpll_d5", + "univpll2_d2", + "syspll_d3p5" +}; + +static const char * const smi_parents[] __initconst = { + "clk26m", + "clkph_mck", + "syspll_d2p5", + "syspll_d3", + "syspll_d8", + "univpll_d5", + "univpll1_d2", + "univpll1_d6", + "mmpll_d3", + "mmpll_d4", + "mmpll_d5", + "mmpll_d6", + "mmpll_d7", + "vdecpll", + "lvdspll" +}; + +static const char * const mfg_parents[] __initconst = { + "clk26m", + "univpll1_d4", + "syspll_d2", + "syspll_d2p5", + "syspll_d3", + "univpll_d5", + "univpll1_d2", + "mmpll_d2", + "mmpll_d3", + "mmpll_d4", + "mmpll_d5", + "mmpll_d6", + "mmpll_d7" +}; + +static const char * const irda_parents[] __initconst = { + "clk26m", + "univpll2_d8", + "univpll1_d6" +}; + +static const char * const cam_parents[] __initconst = { + "clk26m", + "syspll_d3", + "syspll_d3p5", + "syspll_d4", + "univpll_d5", + "univpll2_d2", + "univpll_d7", + "univpll1_d4" +}; + +static const char * const aud_intbus_parents[] __initconst = { + "clk26m", + "syspll_d6", + "univpll_d10" +}; + +static const char * const jpg_parents[] __initconst = { + "clk26m", + "syspll_d5", + "syspll_d4", + "syspll_d3", + "univpll_d7", + "univpll2_d2", + "univpll_d5" +}; + +static const char * const disp_parents[] __initconst = { + "clk26m", + "syspll_d3p5", + "syspll_d3", + "univpll2_d2", + "univpll_d5", + "univpll1_d2", + "lvdspll", + "vdecpll" +}; + +static const char * const msdc30_parents[] __initconst = { + "clk26m", + "syspll_d6", + "syspll_d5", + "univpll1_d4", + "univpll2_d4", + "msdcpll" +}; + +static const char * const usb20_parents[] __initconst = { + "clk26m", + "univpll2_d6", + "univpll1_d10" +}; + +static const char * const venc_parents[] __initconst = { + "clk26m", + "syspll_d3", + "syspll_d8", + "univpll_d5", + "univpll1_d6", + "mmpll_d4", + "mmpll_d5", + "mmpll_d6" +}; + +static const char * const spi_parents[] __initconst = { + "clk26m", + "syspll_d6", + "syspll_d8", + "syspll_d10", + "univpll1_d6", + "univpll1_d8" +}; + +static const char * const uart_parents[] __initconst = { + "clk26m", + "univpll2_d8" +}; + +static const char * const mem_parents[] __initconst = { + "clk26m", + "clkph_mck" +}; + +static const char * const camtg_parents[] __initconst = { + "clk26m", + "univpll_d26", + "univpll1_d6", + "syspll_d16", + "syspll_d8" +}; + +static const char * const audio_parents[] __initconst = { + "clk26m", + "syspll_d24" +}; + +static const char * const fix_parents[] __initconst = { + "rtc32k", + "clk26m", + "univpll_d5", + "univpll_d7", + "univpll1_d2", + "univpll1_d4", + "univpll1_d6", + "univpll1_d8" +}; + +static const char * const vdec_parents[] __initconst = { + "clk26m", + "vdecpll", + "clkph_mck", + "syspll_d2p5", + "syspll_d3", + "syspll_d3p5", + "syspll_d4", + "syspll_d5", + "syspll_d6", + "syspll_d8", + "univpll1_d2", + "univpll2_d2", + "univpll_d7", + "univpll_d10", + "univpll2_d4", + "lvdspll" +}; + +static const char * const ddrphycfg_parents[] __initconst = { + "clk26m", + "axi_sel", + "syspll_d12" +}; + +static const char * const dpilvds_parents[] __initconst = { + "clk26m", + "lvdspll", + "lvdspll_d2", + "lvdspll_d4", + "lvdspll_d8" +}; + +static const char * const pmicspi_parents[] __initconst = { + "clk26m", + "univpll2_d6", + "syspll_d8", + "syspll_d10", + "univpll1_d10", + "mempll_mck_d4", + "univpll_d26", + "syspll_d24" +}; + +static const char * const smi_mfg_as_parents[] __initconst = { + "clk26m", + "smi_sel", + "mfg_sel", + "mem_sel" +}; + +static const char * const gcpu_parents[] __initconst = { + "clk26m", + "syspll_d4", + "univpll_d7", + "syspll_d5", + "syspll_d6" +}; + +static const char * const dpi1_parents[] __initconst = { + "clk26m", + "tvhdmi_h_ck", + "tvhdmi_d2", + "tvhdmi_d4" +}; + +static const char * const cci_parents[] __initconst = { + "clk26m", + "mainpll_537p3m", + "univpll_d3", + "syspll_d2p5", + "syspll_d3", + "syspll_d5" +}; + +static const char * const apll_parents[] __initconst = { + "clk26m", + "apll_ck", + "apll_d4", + "apll_d8", + "apll_d16", + "apll_d24" +}; + +static const char * const hdmipll_parents[] __initconst = { + "clk26m", + "hdmitx_clkdig_cts", + "hdmitx_clkdig_d2", + "hdmitx_clkdig_d3" +}; + +static const struct mtk_composite top_muxes[] __initconst = { + /* CLK_CFG_0 */ + MUX_GATE(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, + 0x0140, 0, 3, INVALID_MUX_GATE_BIT), + MUX_GATE(CLK_TOP_SMI_SEL, "smi_sel", smi_parents, 0x0140, 8, 4, 15), + MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x0140, 16, 4, 23), + MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x0140, 24, 2, 31), + /* CLK_CFG_1 */ + MUX_GATE(CLK_TOP_CAM_SEL, "cam_sel", cam_parents, 0x0144, 0, 3, 7), + MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents, + 0x0144, 8, 2, 15), + MUX_GATE(CLK_TOP_JPG_SEL, "jpg_sel", jpg_parents, 0x0144, 16, 3, 23), + MUX_GATE(CLK_TOP_DISP_SEL, "disp_sel", disp_parents, 0x0144, 24, 3, 31), + /* CLK_CFG_2 */ + MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_parents, 0x0148, 0, 3, 7), + MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_parents, 0x0148, 8, 3, 15), + MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_parents, 0x0148, 16, 3, 23), + MUX_GATE(CLK_TOP_MSDC30_4_SEL, "msdc30_4_sel", msdc30_parents, 0x0148, 24, 3, 31), + /* CLK_CFG_3 */ + MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x014c, 0, 2, 7), + /* CLK_CFG_4 */ + MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x0150, 8, 3, 15), + MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x0150, 16, 3, 23), + MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x0150, 24, 2, 31), + /* CLK_CFG_6 */ + MUX_GATE(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x0158, 0, 2, 7), + MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x0158, 8, 3, 15), + MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, 0x0158, 24, 2, 31), + /* CLK_CFG_7 */ + MUX_GATE(CLK_TOP_FIX_SEL, "fix_sel", fix_parents, 0x015c, 0, 3, 7), + MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x015c, 8, 4, 15), + MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents, + 0x015c, 16, 2, 23), + MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel", dpilvds_parents, 0x015c, 24, 3, 31), + /* CLK_CFG_8 */ + MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x0164, 0, 3, 7), + MUX_GATE(CLK_TOP_MSDC30_0_SEL, "msdc30_0_sel", msdc30_parents, 0x0164, 8, 3, 15), + MUX_GATE(CLK_TOP_SMI_MFG_AS_SEL, "smi_mfg_as_sel", smi_mfg_as_parents, + 0x0164, 16, 2, 23), + MUX_GATE(CLK_TOP_GCPU_SEL, "gcpu_sel", gcpu_parents, 0x0164, 24, 3, 31), + /* CLK_CFG_9 */ + MUX_GATE(CLK_TOP_DPI1_SEL, "dpi1_sel", dpi1_parents, 0x0168, 0, 2, 7), + MUX_GATE(CLK_TOP_CCI_SEL, "cci_sel", cci_parents, 0x0168, 8, 3, 15), + MUX_GATE(CLK_TOP_APLL_SEL, "apll_sel", apll_parents, 0x0168, 16, 3, 23), + MUX_GATE(CLK_TOP_HDMIPLL_SEL, "hdmipll_sel", hdmipll_parents, 0x0168, 24, 2, 31), +}; + +static const struct mtk_gate_regs infra_cg_regs = { + .set_ofs = 0x0040, + .clr_ofs = 0x0044, + .sta_ofs = 0x0048, +}; + +#define GATE_ICG(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &infra_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate infra_clks[] __initconst = { + GATE_ICG(CLK_INFRA_PMIC_WRAP, "pmic_wrap_ck", "axi_sel", 23), + GATE_ICG(CLK_INFRA_PMICSPI, "pmicspi_ck", "pmicspi_sel", 22), + GATE_ICG(CLK_INFRA_CCIF1_AP_CTRL, "ccif1_ap_ctrl", "axi_sel", 21), + GATE_ICG(CLK_INFRA_CCIF0_AP_CTRL, "ccif0_ap_ctrl", "axi_sel", 20), + GATE_ICG(CLK_INFRA_KP, "kp_ck", "axi_sel", 16), + GATE_ICG(CLK_INFRA_CPUM, "cpum_ck", "cpum_tck_in", 15), + GATE_ICG(CLK_INFRA_M4U, "m4u_ck", "mem_sel", 8), + GATE_ICG(CLK_INFRA_MFGAXI, "mfgaxi_ck", "axi_sel", 7), + GATE_ICG(CLK_INFRA_DEVAPC, "devapc_ck", "axi_sel", 6), + GATE_ICG(CLK_INFRA_AUDIO, "audio_ck", "aud_intbus_sel", 5), + GATE_ICG(CLK_INFRA_MFG_BUS, "mfg_bus_ck", "axi_sel", 2), + GATE_ICG(CLK_INFRA_SMI, "smi_ck", "smi_sel", 1), + GATE_ICG(CLK_INFRA_DBGCLK, "dbgclk_ck", "axi_sel", 0), +}; + +static const struct mtk_gate_regs peri0_cg_regs = { + .set_ofs = 0x0008, + .clr_ofs = 0x0010, + .sta_ofs = 0x0018, +}; + +static const struct mtk_gate_regs peri1_cg_regs = { + .set_ofs = 0x000c, + .clr_ofs = 0x0014, + .sta_ofs = 0x001c, +}; + +#define GATE_PERI0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &peri0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_PERI1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &peri1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate peri_gates[] __initconst = { + /* PERI0 */ + GATE_PERI0(CLK_PERI_I2C5, "i2c5_ck", "axi_sel", 31), + GATE_PERI0(CLK_PERI_I2C4, "i2c4_ck", "axi_sel", 30), + GATE_PERI0(CLK_PERI_I2C3, "i2c3_ck", "axi_sel", 29), + GATE_PERI0(CLK_PERI_I2C2, "i2c2_ck", "axi_sel", 28), + GATE_PERI0(CLK_PERI_I2C1, "i2c1_ck", "axi_sel", 27), + GATE_PERI0(CLK_PERI_I2C0, "i2c0_ck", "axi_sel", 26), + GATE_PERI0(CLK_PERI_UART3, "uart3_ck", "axi_sel", 25), + GATE_PERI0(CLK_PERI_UART2, "uart2_ck", "axi_sel", 24), + GATE_PERI0(CLK_PERI_UART1, "uart1_ck", "axi_sel", 23), + GATE_PERI0(CLK_PERI_UART0, "uart0_ck", "axi_sel", 22), + GATE_PERI0(CLK_PERI_IRDA, "irda_ck", "irda_sel", 21), + GATE_PERI0(CLK_PERI_NLI, "nli_ck", "axi_sel", 20), + GATE_PERI0(CLK_PERI_MD_HIF, "md_hif_ck", "axi_sel", 19), + GATE_PERI0(CLK_PERI_AP_HIF, "ap_hif_ck", "axi_sel", 18), + GATE_PERI0(CLK_PERI_MSDC30_3, "msdc30_3_ck", "msdc30_4_sel", 17), + GATE_PERI0(CLK_PERI_MSDC30_2, "msdc30_2_ck", "msdc30_3_sel", 16), + GATE_PERI0(CLK_PERI_MSDC30_1, "msdc30_1_ck", "msdc30_2_sel", 15), + GATE_PERI0(CLK_PERI_MSDC20_2, "msdc20_2_ck", "msdc30_1_sel", 14), + GATE_PERI0(CLK_PERI_MSDC20_1, "msdc20_1_ck", "msdc30_0_sel", 13), + GATE_PERI0(CLK_PERI_AP_DMA, "ap_dma_ck", "axi_sel", 12), + GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11), + GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10), + GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9), + GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8), + GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7), + GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6), + GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5), + GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4), + GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3), + GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2), + GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1), + GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "axi_sel", 0), + /* PERI1 */ + GATE_PERI1(CLK_PERI_USBSLV, "usbslv_ck", "axi_sel", 8), + GATE_PERI1(CLK_PERI_USB1_MCU, "usb1_mcu_ck", "axi_sel", 7), + GATE_PERI1(CLK_PERI_USB0_MCU, "usb0_mcu_ck", "axi_sel", 6), + GATE_PERI1(CLK_PERI_GCPU, "gcpu_ck", "gcpu_sel", 5), + GATE_PERI1(CLK_PERI_FHCTL, "fhctl_ck", "clk26m", 4), + GATE_PERI1(CLK_PERI_SPI1, "spi1_ck", "spi_sel", 3), + GATE_PERI1(CLK_PERI_AUXADC, "auxadc_ck", "clk26m", 2), + GATE_PERI1(CLK_PERI_PERI_PWRAP, "peri_pwrap_ck", "axi_sel", 1), + GATE_PERI1(CLK_PERI_I2C6, "i2c6_ck", "axi_sel", 0), +}; + +static const char * const uart_ck_sel_parents[] __initconst = { + "clk26m", + "uart_sel", +}; + +static const struct mtk_composite peri_clks[] __initconst = { + MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1), + MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1), + MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1), + MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1), +}; + +static void __init mtk_topckgen_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + void __iomem *base; + int r; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data); + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, + &mt8135_clk_lock, clk_data); + + clk_prepare_enable(clk_data->clks[CLK_TOP_CCI_SEL]); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); +} +CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8135-topckgen", mtk_topckgen_init); + +static void __init mtk_infrasys_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + int r; + + clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); + + mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), + clk_data); + + clk_prepare_enable(clk_data->clks[CLK_INFRA_M4U]); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + mtk_register_reset_controller(node, 2, 0x30); +} +CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8135-infracfg", mtk_infrasys_init); + +static void __init mtk_pericfg_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + int r; + void __iomem *base; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK); + + mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates), + clk_data); + mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base, + &mt8135_clk_lock, clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + mtk_register_reset_controller(node, 2, 0); +} +CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8135-pericfg", mtk_pericfg_init); + +#define MT8135_PLL_FMAX (2000 * MHZ) +#define CON0_MT8135_RST_BAR BIT(27) + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) { \ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = CON0_MT8135_RST_BAR, \ + .fmax = MT8135_PLL_FMAX, \ + .pcwbits = _pcwbits, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = _pcw_shift, \ + } + +static const struct mtk_pll_data plls[] = { + PLL(CLK_APMIXED_ARMPLL1, "armpll1", 0x200, 0x218, 0x80000001, 0, 21, 0x204, 24, 0x0, 0x204, 0), + PLL(CLK_APMIXED_ARMPLL2, "armpll2", 0x2cc, 0x2e4, 0x80000001, 0, 21, 0x2d0, 24, 0x0, 0x2d0, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x21c, 0x234, 0xf0000001, HAVE_RST_BAR, 21, 0x21c, 6, 0x0, 0x220, 0), + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x238, 0x250, 0xf3000001, HAVE_RST_BAR, 7, 0x238, 6, 0x0, 0x238, 9), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x254, 0x26c, 0xf0000001, HAVE_RST_BAR, 21, 0x254, 6, 0x0, 0x258, 0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x278, 0x290, 0x80000001, 0, 21, 0x278, 6, 0x0, 0x27c, 0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x294, 0x2ac, 0x80000001, 0, 31, 0x294, 6, 0x0, 0x298, 0), + PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2b0, 0x2c8, 0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x2b4, 0), + PLL(CLK_APMIXED_AUDPLL, "audpll", 0x2e8, 0x300, 0x80000001, 0, 31, 0x2e8, 6, 0x2f8, 0x2ec, 0), + PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x304, 0x31c, 0x80000001, 0, 21, 0x2b0, 6, 0x0, 0x308, 0), +}; + +static void __init mtk_apmixedsys_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + if (!clk_data) + return; + + mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); +} +CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8135-apmixedsys", + mtk_apmixedsys_init); diff --git a/drivers/clk/mediatek/clk-mt8173.c b/drivers/clk/mediatek/clk-mt8173.c new file mode 100644 index 000000000000..4b9e04cdf7e8 --- /dev/null +++ b/drivers/clk/mediatek/clk-mt8173.c @@ -0,0 +1,830 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/mfd/syscon.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +#include <dt-bindings/clock/mt8173-clk.h> + +static DEFINE_SPINLOCK(mt8173_clk_lock); + +static const struct mtk_fixed_factor root_clk_alias[] __initconst = { + FACTOR(CLK_TOP_CLKPH_MCK_O, "clkph_mck_o", "clk_null", 1, 1), + FACTOR(CLK_TOP_DPI, "dpi_ck", "clk_null", 1, 1), + FACTOR(CLK_TOP_USB_SYSPLL_125M, "usb_syspll_125m", "clk_null", 1, 1), + FACTOR(CLK_TOP_HDMITX_DIG_CTS, "hdmitx_dig_cts", "clk_null", 1, 1), +}; + +static const struct mtk_fixed_factor top_divs[] __initconst = { + FACTOR(CLK_TOP_ARMCA7PLL_754M, "armca7pll_754m", "armca7pll", 1, 2), + FACTOR(CLK_TOP_ARMCA7PLL_502M, "armca7pll_502m", "armca7pll", 1, 3), + + FACTOR(CLK_TOP_MAIN_H546M, "main_h546m", "mainpll", 1, 2), + FACTOR(CLK_TOP_MAIN_H364M, "main_h364m", "mainpll", 1, 3), + FACTOR(CLK_TOP_MAIN_H218P4M, "main_h218p4m", "mainpll", 1, 5), + FACTOR(CLK_TOP_MAIN_H156M, "main_h156m", "mainpll", 1, 7), + + FACTOR(CLK_TOP_TVDPLL_445P5M, "tvdpll_445p5m", "tvdpll", 1, 4), + FACTOR(CLK_TOP_TVDPLL_594M, "tvdpll_594m", "tvdpll", 1, 3), + + FACTOR(CLK_TOP_UNIV_624M, "univ_624m", "univpll", 1, 2), + FACTOR(CLK_TOP_UNIV_416M, "univ_416m", "univpll", 1, 3), + FACTOR(CLK_TOP_UNIV_249P6M, "univ_249p6m", "univpll", 1, 5), + FACTOR(CLK_TOP_UNIV_178P3M, "univ_178p3m", "univpll", 1, 7), + FACTOR(CLK_TOP_UNIV_48M, "univ_48m", "univpll", 1, 26), + + FACTOR(CLK_TOP_CLKRTC_EXT, "clkrtc_ext", "clk32k", 1, 1), + FACTOR(CLK_TOP_CLKRTC_INT, "clkrtc_int", "clk26m", 1, 793), + FACTOR(CLK_TOP_FPC, "fpc_ck", "clk26m", 1, 1), + + FACTOR(CLK_TOP_HDMITXPLL_D2, "hdmitxpll_d2", "hdmitx_dig_cts", 1, 2), + FACTOR(CLK_TOP_HDMITXPLL_D3, "hdmitxpll_d3", "hdmitx_dig_cts", 1, 3), + + FACTOR(CLK_TOP_ARMCA7PLL_D2, "armca7pll_d2", "armca7pll_754m", 1, 1), + FACTOR(CLK_TOP_ARMCA7PLL_D3, "armca7pll_d3", "armca7pll_502m", 1, 1), + + FACTOR(CLK_TOP_APLL1, "apll1_ck", "apll1", 1, 1), + FACTOR(CLK_TOP_APLL2, "apll2_ck", "apll2", 1, 1), + + FACTOR(CLK_TOP_DMPLL, "dmpll_ck", "clkph_mck_o", 1, 1), + FACTOR(CLK_TOP_DMPLL_D2, "dmpll_d2", "clkph_mck_o", 1, 2), + FACTOR(CLK_TOP_DMPLL_D4, "dmpll_d4", "clkph_mck_o", 1, 4), + FACTOR(CLK_TOP_DMPLL_D8, "dmpll_d8", "clkph_mck_o", 1, 8), + FACTOR(CLK_TOP_DMPLL_D16, "dmpll_d16", "clkph_mck_o", 1, 16), + + FACTOR(CLK_TOP_LVDSPLL_D2, "lvdspll_d2", "lvdspll", 1, 2), + FACTOR(CLK_TOP_LVDSPLL_D4, "lvdspll_d4", "lvdspll", 1, 4), + FACTOR(CLK_TOP_LVDSPLL_D8, "lvdspll_d8", "lvdspll", 1, 8), + + FACTOR(CLK_TOP_MMPLL, "mmpll_ck", "mmpll", 1, 1), + FACTOR(CLK_TOP_MMPLL_D2, "mmpll_d2", "mmpll", 1, 2), + + FACTOR(CLK_TOP_MSDCPLL, "msdcpll_ck", "msdcpll", 1, 1), + FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2), + FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4), + FACTOR(CLK_TOP_MSDCPLL2, "msdcpll2_ck", "msdcpll2", 1, 1), + FACTOR(CLK_TOP_MSDCPLL2_D2, "msdcpll2_d2", "msdcpll2", 1, 2), + FACTOR(CLK_TOP_MSDCPLL2_D4, "msdcpll2_d4", "msdcpll2", 1, 4), + + FACTOR(CLK_TOP_SYSPLL_D2, "syspll_d2", "main_h546m", 1, 1), + FACTOR(CLK_TOP_SYSPLL1_D2, "syspll1_d2", "main_h546m", 1, 2), + FACTOR(CLK_TOP_SYSPLL1_D4, "syspll1_d4", "main_h546m", 1, 4), + FACTOR(CLK_TOP_SYSPLL1_D8, "syspll1_d8", "main_h546m", 1, 8), + FACTOR(CLK_TOP_SYSPLL1_D16, "syspll1_d16", "main_h546m", 1, 16), + FACTOR(CLK_TOP_SYSPLL_D3, "syspll_d3", "main_h364m", 1, 1), + FACTOR(CLK_TOP_SYSPLL2_D2, "syspll2_d2", "main_h364m", 1, 2), + FACTOR(CLK_TOP_SYSPLL2_D4, "syspll2_d4", "main_h364m", 1, 4), + FACTOR(CLK_TOP_SYSPLL_D5, "syspll_d5", "main_h218p4m", 1, 1), + FACTOR(CLK_TOP_SYSPLL3_D2, "syspll3_d2", "main_h218p4m", 1, 2), + FACTOR(CLK_TOP_SYSPLL3_D4, "syspll3_d4", "main_h218p4m", 1, 4), + FACTOR(CLK_TOP_SYSPLL_D7, "syspll_d7", "main_h156m", 1, 1), + FACTOR(CLK_TOP_SYSPLL4_D2, "syspll4_d2", "main_h156m", 1, 2), + FACTOR(CLK_TOP_SYSPLL4_D4, "syspll4_d4", "main_h156m", 1, 4), + + FACTOR(CLK_TOP_TVDPLL, "tvdpll_ck", "tvdpll_594m", 1, 1), + FACTOR(CLK_TOP_TVDPLL_D2, "tvdpll_d2", "tvdpll_594m", 1, 2), + FACTOR(CLK_TOP_TVDPLL_D4, "tvdpll_d4", "tvdpll_594m", 1, 4), + FACTOR(CLK_TOP_TVDPLL_D8, "tvdpll_d8", "tvdpll_594m", 1, 8), + FACTOR(CLK_TOP_TVDPLL_D16, "tvdpll_d16", "tvdpll_594m", 1, 16), + + FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univ_624m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL1_D2, "univpll1_d2", "univ_624m", 1, 2), + FACTOR(CLK_TOP_UNIVPLL1_D4, "univpll1_d4", "univ_624m", 1, 4), + FACTOR(CLK_TOP_UNIVPLL1_D8, "univpll1_d8", "univ_624m", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univ_416m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL2_D2, "univpll2_d2", "univ_416m", 1, 2), + FACTOR(CLK_TOP_UNIVPLL2_D4, "univpll2_d4", "univ_416m", 1, 4), + FACTOR(CLK_TOP_UNIVPLL2_D8, "univpll2_d8", "univ_416m", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univ_249p6m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL3_D2, "univpll3_d2", "univ_249p6m", 1, 2), + FACTOR(CLK_TOP_UNIVPLL3_D4, "univpll3_d4", "univ_249p6m", 1, 4), + FACTOR(CLK_TOP_UNIVPLL3_D8, "univpll3_d8", "univ_249p6m", 1, 8), + FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univ_178p3m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL_D26, "univpll_d26", "univ_48m", 1, 1), + FACTOR(CLK_TOP_UNIVPLL_D52, "univpll_d52", "univ_48m", 1, 2), + + FACTOR(CLK_TOP_VCODECPLL, "vcodecpll_ck", "vcodecpll", 1, 3), + FACTOR(CLK_TOP_VCODECPLL_370P5, "vcodecpll_370p5", "vcodecpll", 1, 4), + + FACTOR(CLK_TOP_VENCPLL, "vencpll_ck", "vencpll", 1, 1), + FACTOR(CLK_TOP_VENCPLL_D2, "vencpll_d2", "vencpll", 1, 2), + FACTOR(CLK_TOP_VENCPLL_D4, "vencpll_d4", "vencpll", 1, 4), +}; + +static const char * const axi_parents[] __initconst = { + "clk26m", + "syspll1_d2", + "syspll_d5", + "syspll1_d4", + "univpll_d5", + "univpll2_d2", + "dmpll_d2", + "dmpll_d4" +}; + +static const char * const mem_parents[] __initconst = { + "clk26m", + "dmpll_ck" +}; + +static const char * const ddrphycfg_parents[] __initconst = { + "clk26m", + "syspll1_d8" +}; + +static const char * const mm_parents[] __initconst = { + "clk26m", + "vencpll_d2", + "main_h364m", + "syspll1_d2", + "syspll_d5", + "syspll1_d4", + "univpll1_d2", + "univpll2_d2", + "dmpll_d2" +}; + +static const char * const pwm_parents[] __initconst = { + "clk26m", + "univpll2_d4", + "univpll3_d2", + "univpll1_d4" +}; + +static const char * const vdec_parents[] __initconst = { + "clk26m", + "vcodecpll_ck", + "tvdpll_445p5m", + "univpll_d3", + "vencpll_d2", + "syspll_d3", + "univpll1_d2", + "mmpll_d2", + "dmpll_d2", + "dmpll_d4" +}; + +static const char * const venc_parents[] __initconst = { + "clk26m", + "vcodecpll_ck", + "tvdpll_445p5m", + "univpll_d3", + "vencpll_d2", + "syspll_d3", + "univpll1_d2", + "univpll2_d2", + "dmpll_d2", + "dmpll_d4" +}; + +static const char * const mfg_parents[] __initconst = { + "clk26m", + "mmpll_ck", + "dmpll_ck", + "clk26m", + "clk26m", + "clk26m", + "clk26m", + "clk26m", + "clk26m", + "syspll_d3", + "syspll1_d2", + "syspll_d5", + "univpll_d3", + "univpll1_d2", + "univpll_d5", + "univpll2_d2" +}; + +static const char * const camtg_parents[] __initconst = { + "clk26m", + "univpll_d26", + "univpll2_d2", + "syspll3_d2", + "syspll3_d4", + "univpll1_d4" +}; + +static const char * const uart_parents[] __initconst = { + "clk26m", + "univpll2_d8" +}; + +static const char * const spi_parents[] __initconst = { + "clk26m", + "syspll3_d2", + "syspll1_d4", + "syspll4_d2", + "univpll3_d2", + "univpll2_d4", + "univpll1_d8" +}; + +static const char * const usb20_parents[] __initconst = { + "clk26m", + "univpll1_d8", + "univpll3_d4" +}; + +static const char * const usb30_parents[] __initconst = { + "clk26m", + "univpll3_d2", + "usb_syspll_125m", + "univpll2_d4" +}; + +static const char * const msdc50_0_h_parents[] __initconst = { + "clk26m", + "syspll1_d2", + "syspll2_d2", + "syspll4_d2", + "univpll_d5", + "univpll1_d4" +}; + +static const char * const msdc50_0_parents[] __initconst = { + "clk26m", + "msdcpll_ck", + "msdcpll_d2", + "univpll1_d4", + "syspll2_d2", + "syspll_d7", + "msdcpll_d4", + "vencpll_d4", + "tvdpll_ck", + "univpll_d2", + "univpll1_d2", + "mmpll_ck", + "msdcpll2_ck", + "msdcpll2_d2", + "msdcpll2_d4" +}; + +static const char * const msdc30_1_parents[] __initconst = { + "clk26m", + "univpll2_d2", + "msdcpll_d4", + "univpll1_d4", + "syspll2_d2", + "syspll_d7", + "univpll_d7", + "vencpll_d4" +}; + +static const char * const msdc30_2_parents[] __initconst = { + "clk26m", + "univpll2_d2", + "msdcpll_d4", + "univpll1_d4", + "syspll2_d2", + "syspll_d7", + "univpll_d7", + "vencpll_d2" +}; + +static const char * const msdc30_3_parents[] __initconst = { + "clk26m", + "msdcpll2_ck", + "msdcpll2_d2", + "univpll2_d2", + "msdcpll2_d4", + "msdcpll_d4", + "univpll1_d4", + "syspll2_d2", + "syspll_d7", + "univpll_d7", + "vencpll_d4", + "msdcpll_ck", + "msdcpll_d2", + "msdcpll_d4" +}; + +static const char * const audio_parents[] __initconst = { + "clk26m", + "syspll3_d4", + "syspll4_d4", + "syspll1_d16" +}; + +static const char * const aud_intbus_parents[] __initconst = { + "clk26m", + "syspll1_d4", + "syspll4_d2", + "univpll3_d2", + "univpll2_d8", + "dmpll_d4", + "dmpll_d8" +}; + +static const char * const pmicspi_parents[] __initconst = { + "clk26m", + "syspll1_d8", + "syspll3_d4", + "syspll1_d16", + "univpll3_d4", + "univpll_d26", + "dmpll_d8", + "dmpll_d16" +}; + +static const char * const scp_parents[] __initconst = { + "clk26m", + "syspll1_d2", + "univpll_d5", + "syspll_d5", + "dmpll_d2", + "dmpll_d4" +}; + +static const char * const atb_parents[] __initconst = { + "clk26m", + "syspll1_d2", + "univpll_d5", + "dmpll_d2" +}; + +static const char * const venc_lt_parents[] __initconst = { + "clk26m", + "univpll_d3", + "vcodecpll_ck", + "tvdpll_445p5m", + "vencpll_d2", + "syspll_d3", + "univpll1_d2", + "univpll2_d2", + "syspll1_d2", + "univpll_d5", + "vcodecpll_370p5", + "dmpll_ck" +}; + +static const char * const dpi0_parents[] __initconst = { + "clk26m", + "tvdpll_d2", + "tvdpll_d4", + "clk26m", + "clk26m", + "tvdpll_d8", + "tvdpll_d16" +}; + +static const char * const irda_parents[] __initconst = { + "clk26m", + "univpll2_d4", + "syspll2_d4" +}; + +static const char * const cci400_parents[] __initconst = { + "clk26m", + "vencpll_ck", + "armca7pll_754m", + "armca7pll_502m", + "univpll_d2", + "syspll_d2", + "msdcpll_ck", + "dmpll_ck" +}; + +static const char * const aud_1_parents[] __initconst = { + "clk26m", + "apll1_ck", + "univpll2_d4", + "univpll2_d8" +}; + +static const char * const aud_2_parents[] __initconst = { + "clk26m", + "apll2_ck", + "univpll2_d4", + "univpll2_d8" +}; + +static const char * const mem_mfg_in_parents[] __initconst = { + "clk26m", + "mmpll_ck", + "dmpll_ck", + "clk26m" +}; + +static const char * const axi_mfg_in_parents[] __initconst = { + "clk26m", + "axi_sel", + "dmpll_d2" +}; + +static const char * const scam_parents[] __initconst = { + "clk26m", + "syspll3_d2", + "univpll2_d4", + "dmpll_d4" +}; + +static const char * const spinfi_ifr_parents[] __initconst = { + "clk26m", + "univpll2_d8", + "univpll3_d4", + "syspll4_d2", + "univpll2_d4", + "univpll3_d2", + "syspll1_d4", + "univpll1_d4" +}; + +static const char * const hdmi_parents[] __initconst = { + "clk26m", + "hdmitx_dig_cts", + "hdmitxpll_d2", + "hdmitxpll_d3" +}; + +static const char * const dpilvds_parents[] __initconst = { + "clk26m", + "lvdspll", + "lvdspll_d2", + "lvdspll_d4", + "lvdspll_d8", + "fpc_ck" +}; + +static const char * const msdc50_2_h_parents[] __initconst = { + "clk26m", + "syspll1_d2", + "syspll2_d2", + "syspll4_d2", + "univpll_d5", + "univpll1_d4" +}; + +static const char * const hdcp_parents[] __initconst = { + "clk26m", + "syspll4_d2", + "syspll3_d4", + "univpll2_d4" +}; + +static const char * const hdcp_24m_parents[] __initconst = { + "clk26m", + "univpll_d26", + "univpll_d52", + "univpll2_d8" +}; + +static const char * const rtc_parents[] __initconst = { + "clkrtc_int", + "clkrtc_ext", + "clk26m", + "univpll3_d8" +}; + +static const char * const i2s0_m_ck_parents[] __initconst = { + "apll1_div1", + "apll2_div1" +}; + +static const char * const i2s1_m_ck_parents[] __initconst = { + "apll1_div2", + "apll2_div2" +}; + +static const char * const i2s2_m_ck_parents[] __initconst = { + "apll1_div3", + "apll2_div3" +}; + +static const char * const i2s3_m_ck_parents[] __initconst = { + "apll1_div4", + "apll2_div4" +}; + +static const char * const i2s3_b_ck_parents[] __initconst = { + "apll1_div5", + "apll2_div5" +}; + +static const struct mtk_composite top_muxes[] __initconst = { + /* CLK_CFG_0 */ + MUX(CLK_TOP_AXI_SEL, "axi_sel", axi_parents, 0x0040, 0, 3), + MUX(CLK_TOP_MEM_SEL, "mem_sel", mem_parents, 0x0040, 8, 1), + MUX_GATE(CLK_TOP_DDRPHYCFG_SEL, "ddrphycfg_sel", ddrphycfg_parents, 0x0040, 16, 1, 23), + MUX_GATE(CLK_TOP_MM_SEL, "mm_sel", mm_parents, 0x0040, 24, 4, 31), + /* CLK_CFG_1 */ + MUX_GATE(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x0050, 0, 2, 7), + MUX_GATE(CLK_TOP_VDEC_SEL, "vdec_sel", vdec_parents, 0x0050, 8, 4, 15), + MUX_GATE(CLK_TOP_VENC_SEL, "venc_sel", venc_parents, 0x0050, 16, 4, 23), + MUX_GATE(CLK_TOP_MFG_SEL, "mfg_sel", mfg_parents, 0x0050, 24, 4, 31), + /* CLK_CFG_2 */ + MUX_GATE(CLK_TOP_CAMTG_SEL, "camtg_sel", camtg_parents, 0x0060, 0, 3, 7), + MUX_GATE(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x0060, 8, 1, 15), + MUX_GATE(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x0060, 16, 3, 23), + MUX_GATE(CLK_TOP_USB20_SEL, "usb20_sel", usb20_parents, 0x0060, 24, 2, 31), + /* CLK_CFG_3 */ + MUX_GATE(CLK_TOP_USB30_SEL, "usb30_sel", usb30_parents, 0x0070, 0, 2, 7), + MUX_GATE(CLK_TOP_MSDC50_0_H_SEL, "msdc50_0_h_sel", msdc50_0_h_parents, 0x0070, 8, 3, 15), + MUX_GATE(CLK_TOP_MSDC50_0_SEL, "msdc50_0_sel", msdc50_0_parents, 0x0070, 16, 4, 23), + MUX_GATE(CLK_TOP_MSDC30_1_SEL, "msdc30_1_sel", msdc30_1_parents, 0x0070, 24, 3, 31), + /* CLK_CFG_4 */ + MUX_GATE(CLK_TOP_MSDC30_2_SEL, "msdc30_2_sel", msdc30_2_parents, 0x0080, 0, 3, 7), + MUX_GATE(CLK_TOP_MSDC30_3_SEL, "msdc30_3_sel", msdc30_3_parents, 0x0080, 8, 4, 15), + MUX_GATE(CLK_TOP_AUDIO_SEL, "audio_sel", audio_parents, 0x0080, 16, 2, 23), + MUX_GATE(CLK_TOP_AUD_INTBUS_SEL, "aud_intbus_sel", aud_intbus_parents, 0x0080, 24, 3, 31), + /* CLK_CFG_5 */ + MUX_GATE(CLK_TOP_PMICSPI_SEL, "pmicspi_sel", pmicspi_parents, 0x0090, 0, 3, 7 /* 7:5 */), + MUX_GATE(CLK_TOP_SCP_SEL, "scp_sel", scp_parents, 0x0090, 8, 3, 15), + MUX_GATE(CLK_TOP_ATB_SEL, "atb_sel", atb_parents, 0x0090, 16, 2, 23), + MUX_GATE(CLK_TOP_VENC_LT_SEL, "venclt_sel", venc_lt_parents, 0x0090, 24, 4, 31), + /* CLK_CFG_6 */ + MUX_GATE(CLK_TOP_DPI0_SEL, "dpi0_sel", dpi0_parents, 0x00a0, 0, 3, 7), + MUX_GATE(CLK_TOP_IRDA_SEL, "irda_sel", irda_parents, 0x00a0, 8, 2, 15), + MUX_GATE(CLK_TOP_CCI400_SEL, "cci400_sel", cci400_parents, 0x00a0, 16, 3, 23), + MUX_GATE(CLK_TOP_AUD_1_SEL, "aud_1_sel", aud_1_parents, 0x00a0, 24, 2, 31), + /* CLK_CFG_7 */ + MUX_GATE(CLK_TOP_AUD_2_SEL, "aud_2_sel", aud_2_parents, 0x00b0, 0, 2, 7), + MUX_GATE(CLK_TOP_MEM_MFG_IN_SEL, "mem_mfg_in_sel", mem_mfg_in_parents, 0x00b0, 8, 2, 15), + MUX_GATE(CLK_TOP_AXI_MFG_IN_SEL, "axi_mfg_in_sel", axi_mfg_in_parents, 0x00b0, 16, 2, 23), + MUX_GATE(CLK_TOP_SCAM_SEL, "scam_sel", scam_parents, 0x00b0, 24, 2, 31), + /* CLK_CFG_12 */ + MUX_GATE(CLK_TOP_SPINFI_IFR_SEL, "spinfi_ifr_sel", spinfi_ifr_parents, 0x00c0, 0, 3, 7), + MUX_GATE(CLK_TOP_HDMI_SEL, "hdmi_sel", hdmi_parents, 0x00c0, 8, 2, 15), + MUX_GATE(CLK_TOP_DPILVDS_SEL, "dpilvds_sel", dpilvds_parents, 0x00c0, 24, 3, 31), + /* CLK_CFG_13 */ + MUX_GATE(CLK_TOP_MSDC50_2_H_SEL, "msdc50_2_h_sel", msdc50_2_h_parents, 0x00d0, 0, 3, 7), + MUX_GATE(CLK_TOP_HDCP_SEL, "hdcp_sel", hdcp_parents, 0x00d0, 8, 2, 15), + MUX_GATE(CLK_TOP_HDCP_24M_SEL, "hdcp_24m_sel", hdcp_24m_parents, 0x00d0, 16, 2, 23), + MUX(CLK_TOP_RTC_SEL, "rtc_sel", rtc_parents, 0x00d0, 24, 2), + + DIV_GATE(CLK_TOP_APLL1_DIV0, "apll1_div0", "aud_1_sel", 0x12c, 8, 0x120, 4, 24), + DIV_GATE(CLK_TOP_APLL1_DIV1, "apll1_div1", "aud_1_sel", 0x12c, 9, 0x124, 8, 0), + DIV_GATE(CLK_TOP_APLL1_DIV2, "apll1_div2", "aud_1_sel", 0x12c, 10, 0x124, 8, 8), + DIV_GATE(CLK_TOP_APLL1_DIV3, "apll1_div3", "aud_1_sel", 0x12c, 11, 0x124, 8, 16), + DIV_GATE(CLK_TOP_APLL1_DIV4, "apll1_div4", "aud_1_sel", 0x12c, 12, 0x124, 8, 24), + DIV_GATE(CLK_TOP_APLL1_DIV5, "apll1_div5", "apll1_div4", 0x12c, 13, 0x12c, 4, 0), + + DIV_GATE(CLK_TOP_APLL2_DIV0, "apll2_div0", "aud_2_sel", 0x12c, 16, 0x120, 4, 28), + DIV_GATE(CLK_TOP_APLL2_DIV1, "apll2_div1", "aud_2_sel", 0x12c, 17, 0x128, 8, 0), + DIV_GATE(CLK_TOP_APLL2_DIV2, "apll2_div2", "aud_2_sel", 0x12c, 18, 0x128, 8, 8), + DIV_GATE(CLK_TOP_APLL2_DIV3, "apll2_div3", "aud_2_sel", 0x12c, 19, 0x128, 8, 16), + DIV_GATE(CLK_TOP_APLL2_DIV4, "apll2_div4", "aud_2_sel", 0x12c, 20, 0x128, 8, 24), + DIV_GATE(CLK_TOP_APLL2_DIV5, "apll2_div5", "apll2_div4", 0x12c, 21, 0x12c, 4, 4), + + MUX(CLK_TOP_I2S0_M_SEL, "i2s0_m_ck_sel", i2s0_m_ck_parents, 0x120, 4, 1), + MUX(CLK_TOP_I2S1_M_SEL, "i2s1_m_ck_sel", i2s1_m_ck_parents, 0x120, 5, 1), + MUX(CLK_TOP_I2S2_M_SEL, "i2s2_m_ck_sel", i2s2_m_ck_parents, 0x120, 6, 1), + MUX(CLK_TOP_I2S3_M_SEL, "i2s3_m_ck_sel", i2s3_m_ck_parents, 0x120, 7, 1), + MUX(CLK_TOP_I2S3_B_SEL, "i2s3_b_ck_sel", i2s3_b_ck_parents, 0x120, 8, 1), +}; + +static const struct mtk_gate_regs infra_cg_regs = { + .set_ofs = 0x0040, + .clr_ofs = 0x0044, + .sta_ofs = 0x0048, +}; + +#define GATE_ICG(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &infra_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate infra_clks[] __initconst = { + GATE_ICG(CLK_INFRA_DBGCLK, "infra_dbgclk", "axi_sel", 0), + GATE_ICG(CLK_INFRA_SMI, "infra_smi", "mm_sel", 1), + GATE_ICG(CLK_INFRA_AUDIO, "infra_audio", "aud_intbus_sel", 5), + GATE_ICG(CLK_INFRA_GCE, "infra_gce", "axi_sel", 6), + GATE_ICG(CLK_INFRA_L2C_SRAM, "infra_l2c_sram", "axi_sel", 7), + GATE_ICG(CLK_INFRA_M4U, "infra_m4u", "mem_sel", 8), + GATE_ICG(CLK_INFRA_CPUM, "infra_cpum", "clk_null", 15), + GATE_ICG(CLK_INFRA_KP, "infra_kp", "axi_sel", 16), + GATE_ICG(CLK_INFRA_CEC, "infra_cec", "clk26m", 18), + GATE_ICG(CLK_INFRA_PMICSPI, "infra_pmicspi", "pmicspi_sel", 22), + GATE_ICG(CLK_INFRA_PMICWRAP, "infra_pmicwrap", "axi_sel", 23), +}; + +static const struct mtk_gate_regs peri0_cg_regs = { + .set_ofs = 0x0008, + .clr_ofs = 0x0010, + .sta_ofs = 0x0018, +}; + +static const struct mtk_gate_regs peri1_cg_regs = { + .set_ofs = 0x000c, + .clr_ofs = 0x0014, + .sta_ofs = 0x001c, +}; + +#define GATE_PERI0(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &peri0_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +#define GATE_PERI1(_id, _name, _parent, _shift) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .regs = &peri1_cg_regs, \ + .shift = _shift, \ + .ops = &mtk_clk_gate_ops_setclr, \ + } + +static const struct mtk_gate peri_gates[] __initconst = { + /* PERI0 */ + GATE_PERI0(CLK_PERI_NFI, "peri_nfi", "axi_sel", 0), + GATE_PERI0(CLK_PERI_THERM, "peri_therm", "axi_sel", 1), + GATE_PERI0(CLK_PERI_PWM1, "peri_pwm1", "axi_sel", 2), + GATE_PERI0(CLK_PERI_PWM2, "peri_pwm2", "axi_sel", 3), + GATE_PERI0(CLK_PERI_PWM3, "peri_pwm3", "axi_sel", 4), + GATE_PERI0(CLK_PERI_PWM4, "peri_pwm4", "axi_sel", 5), + GATE_PERI0(CLK_PERI_PWM5, "peri_pwm5", "axi_sel", 6), + GATE_PERI0(CLK_PERI_PWM6, "peri_pwm6", "axi_sel", 7), + GATE_PERI0(CLK_PERI_PWM7, "peri_pwm7", "axi_sel", 8), + GATE_PERI0(CLK_PERI_PWM, "peri_pwm", "axi_sel", 9), + GATE_PERI0(CLK_PERI_USB0, "peri_usb0", "usb20_sel", 10), + GATE_PERI0(CLK_PERI_USB1, "peri_usb1", "usb20_sel", 11), + GATE_PERI0(CLK_PERI_AP_DMA, "peri_ap_dma", "axi_sel", 12), + GATE_PERI0(CLK_PERI_MSDC30_0, "peri_msdc30_0", "msdc50_0_sel", 13), + GATE_PERI0(CLK_PERI_MSDC30_1, "peri_msdc30_1", "msdc30_1_sel", 14), + GATE_PERI0(CLK_PERI_MSDC30_2, "peri_msdc30_2", "msdc30_2_sel", 15), + GATE_PERI0(CLK_PERI_MSDC30_3, "peri_msdc30_3", "msdc30_3_sel", 16), + GATE_PERI0(CLK_PERI_NLI_ARB, "peri_nli_arb", "axi_sel", 17), + GATE_PERI0(CLK_PERI_IRDA, "peri_irda", "irda_sel", 18), + GATE_PERI0(CLK_PERI_UART0, "peri_uart0", "axi_sel", 19), + GATE_PERI0(CLK_PERI_UART1, "peri_uart1", "axi_sel", 20), + GATE_PERI0(CLK_PERI_UART2, "peri_uart2", "axi_sel", 21), + GATE_PERI0(CLK_PERI_UART3, "peri_uart3", "axi_sel", 22), + GATE_PERI0(CLK_PERI_I2C0, "peri_i2c0", "axi_sel", 23), + GATE_PERI0(CLK_PERI_I2C1, "peri_i2c1", "axi_sel", 24), + GATE_PERI0(CLK_PERI_I2C2, "peri_i2c2", "axi_sel", 25), + GATE_PERI0(CLK_PERI_I2C3, "peri_i2c3", "axi_sel", 26), + GATE_PERI0(CLK_PERI_I2C4, "peri_i2c4", "axi_sel", 27), + GATE_PERI0(CLK_PERI_AUXADC, "peri_auxadc", "clk26m", 28), + GATE_PERI0(CLK_PERI_SPI0, "peri_spi0", "spi_sel", 29), + GATE_PERI0(CLK_PERI_I2C5, "peri_i2c5", "axi_sel", 30), + GATE_PERI0(CLK_PERI_NFIECC, "peri_nfiecc", "axi_sel", 31), + /* PERI1 */ + GATE_PERI1(CLK_PERI_SPI, "peri_spi", "spi_sel", 0), + GATE_PERI1(CLK_PERI_IRRX, "peri_irrx", "spi_sel", 1), + GATE_PERI1(CLK_PERI_I2C6, "peri_i2c6", "axi_sel", 2), +}; + +static const char * const uart_ck_sel_parents[] __initconst = { + "clk26m", + "uart_sel", +}; + +static const struct mtk_composite peri_clks[] __initconst = { + MUX(CLK_PERI_UART0_SEL, "uart0_ck_sel", uart_ck_sel_parents, 0x40c, 0, 1), + MUX(CLK_PERI_UART1_SEL, "uart1_ck_sel", uart_ck_sel_parents, 0x40c, 1, 1), + MUX(CLK_PERI_UART2_SEL, "uart2_ck_sel", uart_ck_sel_parents, 0x40c, 2, 1), + MUX(CLK_PERI_UART3_SEL, "uart3_ck_sel", uart_ck_sel_parents, 0x40c, 3, 1), +}; + +static void __init mtk_topckgen_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + void __iomem *base; + int r; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK); + + mtk_clk_register_factors(root_clk_alias, ARRAY_SIZE(root_clk_alias), clk_data); + mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data); + mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base, + &mt8173_clk_lock, clk_data); + + clk_prepare_enable(clk_data->clks[CLK_TOP_CCI400_SEL]); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); +} +CLK_OF_DECLARE(mtk_topckgen, "mediatek,mt8173-topckgen", mtk_topckgen_init); + +static void __init mtk_infrasys_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + int r; + + clk_data = mtk_alloc_clk_data(CLK_INFRA_NR_CLK); + + mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks), + clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + mtk_register_reset_controller(node, 2, 0x30); +} +CLK_OF_DECLARE(mtk_infrasys, "mediatek,mt8173-infracfg", mtk_infrasys_init); + +static void __init mtk_pericfg_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + int r; + void __iomem *base; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + clk_data = mtk_alloc_clk_data(CLK_PERI_NR_CLK); + + mtk_clk_register_gates(node, peri_gates, ARRAY_SIZE(peri_gates), + clk_data); + mtk_clk_register_composites(peri_clks, ARRAY_SIZE(peri_clks), base, + &mt8173_clk_lock, clk_data); + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); + + mtk_register_reset_controller(node, 2, 0); +} +CLK_OF_DECLARE(mtk_pericfg, "mediatek,mt8173-pericfg", mtk_pericfg_init); + +#define MT8173_PLL_FMAX (3000UL * MHZ) + +#define CON0_MT8173_RST_BAR BIT(24) + +#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, _pd_shift, \ + _tuner_reg, _pcw_reg, _pcw_shift) { \ + .id = _id, \ + .name = _name, \ + .reg = _reg, \ + .pwr_reg = _pwr_reg, \ + .en_mask = _en_mask, \ + .flags = _flags, \ + .rst_bar_mask = CON0_MT8173_RST_BAR, \ + .fmax = MT8173_PLL_FMAX, \ + .pcwbits = _pcwbits, \ + .pd_reg = _pd_reg, \ + .pd_shift = _pd_shift, \ + .tuner_reg = _tuner_reg, \ + .pcw_reg = _pcw_reg, \ + .pcw_shift = _pcw_shift, \ + } + +static const struct mtk_pll_data plls[] = { + PLL(CLK_APMIXED_ARMCA15PLL, "armca15pll", 0x200, 0x20c, 0x00000001, 0, 21, 0x204, 24, 0x0, 0x204, 0), + PLL(CLK_APMIXED_ARMCA7PLL, "armca7pll", 0x210, 0x21c, 0x00000001, 0, 21, 0x214, 24, 0x0, 0x214, 0), + PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x220, 0x22c, 0xf0000101, HAVE_RST_BAR, 21, 0x220, 4, 0x0, 0x224, 0), + PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x230, 0x23c, 0xfe000001, HAVE_RST_BAR, 7, 0x230, 4, 0x0, 0x234, 14), + PLL(CLK_APMIXED_MMPLL, "mmpll", 0x240, 0x24c, 0x00000001, 0, 21, 0x244, 24, 0x0, 0x244, 0), + PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x250, 0x25c, 0x00000001, 0, 21, 0x250, 4, 0x0, 0x254, 0), + PLL(CLK_APMIXED_VENCPLL, "vencpll", 0x260, 0x26c, 0x00000001, 0, 21, 0x260, 4, 0x0, 0x264, 0), + PLL(CLK_APMIXED_TVDPLL, "tvdpll", 0x270, 0x27c, 0x00000001, 0, 21, 0x270, 4, 0x0, 0x274, 0), + PLL(CLK_APMIXED_MPLL, "mpll", 0x280, 0x28c, 0x00000001, 0, 21, 0x280, 4, 0x0, 0x284, 0), + PLL(CLK_APMIXED_VCODECPLL, "vcodecpll", 0x290, 0x29c, 0x00000001, 0, 21, 0x290, 4, 0x0, 0x294, 0), + PLL(CLK_APMIXED_APLL1, "apll1", 0x2a0, 0x2b0, 0x00000001, 0, 31, 0x2a0, 4, 0x2a4, 0x2a4, 0), + PLL(CLK_APMIXED_APLL2, "apll2", 0x2b4, 0x2c4, 0x00000001, 0, 31, 0x2b4, 4, 0x2b8, 0x2b8, 0), + PLL(CLK_APMIXED_LVDSPLL, "lvdspll", 0x2d0, 0x2dc, 0x00000001, 0, 21, 0x2d0, 4, 0x0, 0x2d4, 0), + PLL(CLK_APMIXED_MSDCPLL2, "msdcpll2", 0x2f0, 0x2fc, 0x00000001, 0, 21, 0x2f0, 4, 0x0, 0x2f4, 0), +}; + +static void __init mtk_apmixedsys_init(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + + clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK); + if (!clk_data) + return; + + mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data); + + clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMCA15PLL]); +} +CLK_OF_DECLARE(mtk_apmixedsys, "mediatek,mt8173-apmixedsys", + mtk_apmixedsys_init); diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c new file mode 100644 index 000000000000..18444aea63c9 --- /dev/null +++ b/drivers/clk/mediatek/clk-mtk.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/clkdev.h> +#include <linux/mfd/syscon.h> + +#include "clk-mtk.h" +#include "clk-gate.h" + +struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num) +{ + int i; + struct clk_onecell_data *clk_data; + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return NULL; + + clk_data->clks = kcalloc(clk_num, sizeof(*clk_data->clks), GFP_KERNEL); + if (!clk_data->clks) + goto err_out; + + clk_data->clk_num = clk_num; + + for (i = 0; i < clk_num; i++) + clk_data->clks[i] = ERR_PTR(-ENOENT); + + return clk_data; +err_out: + kfree(clk_data); + + return NULL; +} + +void mtk_clk_register_factors(const struct mtk_fixed_factor *clks, int num, + struct clk_onecell_data *clk_data) +{ + int i; + struct clk *clk; + + for (i = 0; i < num; i++) { + const struct mtk_fixed_factor *ff = &clks[i]; + + clk = clk_register_fixed_factor(NULL, ff->name, ff->parent_name, + CLK_SET_RATE_PARENT, ff->mult, ff->div); + + if (IS_ERR(clk)) { + pr_err("Failed to register clk %s: %ld\n", + ff->name, PTR_ERR(clk)); + continue; + } + + if (clk_data) + clk_data->clks[ff->id] = clk; + } +} + +int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks, + int num, struct clk_onecell_data *clk_data) +{ + int i; + struct clk *clk; + struct regmap *regmap; + + if (!clk_data) + return -ENOMEM; + + regmap = syscon_node_to_regmap(node); + if (IS_ERR(regmap)) { + pr_err("Cannot find regmap for %s: %ld\n", node->full_name, + PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + + for (i = 0; i < num; i++) { + const struct mtk_gate *gate = &clks[i]; + + clk = mtk_clk_register_gate(gate->name, gate->parent_name, + regmap, + gate->regs->set_ofs, + gate->regs->clr_ofs, + gate->regs->sta_ofs, + gate->shift, gate->ops); + + if (IS_ERR(clk)) { + pr_err("Failed to register clk %s: %ld\n", + gate->name, PTR_ERR(clk)); + continue; + } + + clk_data->clks[gate->id] = clk; + } + + return 0; +} + +struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, + void __iomem *base, spinlock_t *lock) +{ + struct clk *clk; + struct clk_mux *mux = NULL; + struct clk_gate *gate = NULL; + struct clk_divider *div = NULL; + struct clk_hw *mux_hw = NULL, *gate_hw = NULL, *div_hw = NULL; + const struct clk_ops *mux_ops = NULL, *gate_ops = NULL, *div_ops = NULL; + const char * const *parent_names; + const char *parent; + int num_parents; + int ret; + + if (mc->mux_shift >= 0) { + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + mux->reg = base + mc->mux_reg; + mux->mask = BIT(mc->mux_width) - 1; + mux->shift = mc->mux_shift; + mux->lock = lock; + + mux_hw = &mux->hw; + mux_ops = &clk_mux_ops; + + parent_names = mc->parent_names; + num_parents = mc->num_parents; + } else { + parent = mc->parent; + parent_names = &parent; + num_parents = 1; + } + + if (mc->gate_shift >= 0) { + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) { + ret = -ENOMEM; + goto err_out; + } + + gate->reg = base + mc->gate_reg; + gate->bit_idx = mc->gate_shift; + gate->flags = CLK_GATE_SET_TO_DISABLE; + gate->lock = lock; + + gate_hw = &gate->hw; + gate_ops = &clk_gate_ops; + } + + if (mc->divider_shift >= 0) { + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) { + ret = -ENOMEM; + goto err_out; + } + + div->reg = base + mc->divider_reg; + div->shift = mc->divider_shift; + div->width = mc->divider_width; + div->lock = lock; + + div_hw = &div->hw; + div_ops = &clk_divider_ops; + } + + clk = clk_register_composite(NULL, mc->name, parent_names, num_parents, + mux_hw, mux_ops, + div_hw, div_ops, + gate_hw, gate_ops, + mc->flags); + + if (IS_ERR(clk)) { + kfree(gate); + kfree(mux); + } + + return clk; +err_out: + kfree(mux); + + return ERR_PTR(ret); +} + +void mtk_clk_register_composites(const struct mtk_composite *mcs, + int num, void __iomem *base, spinlock_t *lock, + struct clk_onecell_data *clk_data) +{ + struct clk *clk; + int i; + + for (i = 0; i < num; i++) { + const struct mtk_composite *mc = &mcs[i]; + + clk = mtk_clk_register_composite(mc, base, lock); + + if (IS_ERR(clk)) { + pr_err("Failed to register clk %s: %ld\n", + mc->name, PTR_ERR(clk)); + continue; + } + + if (clk_data) + clk_data->clks[mc->id] = clk; + } +} diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h new file mode 100644 index 000000000000..9dda9d8ad10b --- /dev/null +++ b/drivers/clk/mediatek/clk-mtk.h @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __DRV_CLK_MTK_H +#define __DRV_CLK_MTK_H + +#include <linux/regmap.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> + +#define MAX_MUX_GATE_BIT 31 +#define INVALID_MUX_GATE_BIT (MAX_MUX_GATE_BIT + 1) + +#define MHZ (1000 * 1000) + +struct mtk_fixed_factor { + int id; + const char *name; + const char *parent_name; + int mult; + int div; +}; + +#define FACTOR(_id, _name, _parent, _mult, _div) { \ + .id = _id, \ + .name = _name, \ + .parent_name = _parent, \ + .mult = _mult, \ + .div = _div, \ + } + +extern void mtk_clk_register_factors(const struct mtk_fixed_factor *clks, + int num, struct clk_onecell_data *clk_data); + +struct mtk_composite { + int id; + const char *name; + const char * const *parent_names; + const char *parent; + unsigned flags; + + uint32_t mux_reg; + uint32_t divider_reg; + uint32_t gate_reg; + + signed char mux_shift; + signed char mux_width; + signed char gate_shift; + + signed char divider_shift; + signed char divider_width; + + signed char num_parents; +}; + +#define MUX_GATE(_id, _name, _parents, _reg, _shift, _width, _gate) { \ + .id = _id, \ + .name = _name, \ + .mux_reg = _reg, \ + .mux_shift = _shift, \ + .mux_width = _width, \ + .gate_reg = _reg, \ + .gate_shift = _gate, \ + .divider_shift = -1, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .flags = CLK_SET_RATE_PARENT, \ + } + +#define MUX(_id, _name, _parents, _reg, _shift, _width) { \ + .id = _id, \ + .name = _name, \ + .mux_reg = _reg, \ + .mux_shift = _shift, \ + .mux_width = _width, \ + .gate_shift = -1, \ + .divider_shift = -1, \ + .parent_names = _parents, \ + .num_parents = ARRAY_SIZE(_parents), \ + .flags = CLK_SET_RATE_PARENT, \ + } + +#define DIV_GATE(_id, _name, _parent, _gate_reg, _gate_shift, _div_reg, _div_width, _div_shift) { \ + .id = _id, \ + .parent = _parent, \ + .name = _name, \ + .divider_reg = _div_reg, \ + .divider_shift = _div_shift, \ + .divider_width = _div_width, \ + .gate_reg = _gate_reg, \ + .gate_shift = _gate_shift, \ + .mux_shift = -1, \ + .flags = 0, \ + } + +struct clk *mtk_clk_register_composite(const struct mtk_composite *mc, + void __iomem *base, spinlock_t *lock); + +void mtk_clk_register_composites(const struct mtk_composite *mcs, + int num, void __iomem *base, spinlock_t *lock, + struct clk_onecell_data *clk_data); + +struct mtk_gate_regs { + u32 sta_ofs; + u32 clr_ofs; + u32 set_ofs; +}; + +struct mtk_gate { + int id; + const char *name; + const char *parent_name; + const struct mtk_gate_regs *regs; + int shift; + const struct clk_ops *ops; +}; + +int mtk_clk_register_gates(struct device_node *node, const struct mtk_gate *clks, + int num, struct clk_onecell_data *clk_data); + +struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num); + +#define HAVE_RST_BAR BIT(0) + +struct mtk_pll_data { + int id; + const char *name; + uint32_t reg; + uint32_t pwr_reg; + uint32_t en_mask; + uint32_t pd_reg; + uint32_t tuner_reg; + int pd_shift; + unsigned int flags; + const struct clk_ops *ops; + u32 rst_bar_mask; + unsigned long fmax; + int pcwbits; + uint32_t pcw_reg; + int pcw_shift; +}; + +void __init mtk_clk_register_plls(struct device_node *node, + const struct mtk_pll_data *plls, int num_plls, + struct clk_onecell_data *clk_data); + +#ifdef CONFIG_RESET_CONTROLLER +void mtk_register_reset_controller(struct device_node *np, + unsigned int num_regs, int regofs); +#else +static inline void mtk_register_reset_controller(struct device_node *np, + unsigned int num_regs, int regofs) +{ +} +#endif + +#endif /* __DRV_CLK_MTK_H */ diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c new file mode 100644 index 000000000000..44409e98c52f --- /dev/null +++ b/drivers/clk/mediatek/clk-pll.c @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/clkdev.h> +#include <linux/delay.h> + +#include "clk-mtk.h" + +#define REG_CON0 0 +#define REG_CON1 4 + +#define CON0_BASE_EN BIT(0) +#define CON0_PWR_ON BIT(0) +#define CON0_ISO_EN BIT(1) +#define CON0_PCW_CHG BIT(31) + +#define AUDPLL_TUNER_EN BIT(31) + +#define POSTDIV_MASK 0x7 +#define INTEGER_BITS 7 + +/* + * MediaTek PLLs are configured through their pcw value. The pcw value describes + * a divider in the PLL feedback loop which consists of 7 bits for the integer + * part and the remaining bits (if present) for the fractional part. Also they + * have a 3 bit power-of-two post divider. + */ + +struct mtk_clk_pll { + struct clk_hw hw; + void __iomem *base_addr; + void __iomem *pd_addr; + void __iomem *pwr_addr; + void __iomem *tuner_addr; + void __iomem *pcw_addr; + const struct mtk_pll_data *data; +}; + +static inline struct mtk_clk_pll *to_mtk_clk_pll(struct clk_hw *hw) +{ + return container_of(hw, struct mtk_clk_pll, hw); +} + +static int mtk_pll_is_prepared(struct clk_hw *hw) +{ + struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); + + return (readl(pll->base_addr + REG_CON0) & CON0_BASE_EN) != 0; +} + +static unsigned long __mtk_pll_recalc_rate(struct mtk_clk_pll *pll, u32 fin, + u32 pcw, int postdiv) +{ + int pcwbits = pll->data->pcwbits; + int pcwfbits; + u64 vco; + u8 c = 0; + + /* The fractional part of the PLL divider. */ + pcwfbits = pcwbits > INTEGER_BITS ? pcwbits - INTEGER_BITS : 0; + + vco = (u64)fin * pcw; + + if (pcwfbits && (vco & GENMASK(pcwfbits - 1, 0))) + c = 1; + + vco >>= pcwfbits; + + if (c) + vco++; + + return ((unsigned long)vco + postdiv - 1) / postdiv; +} + +static void mtk_pll_set_rate_regs(struct mtk_clk_pll *pll, u32 pcw, + int postdiv) +{ + u32 con1, pd, val; + int pll_en; + + /* set postdiv */ + pd = readl(pll->pd_addr); + pd &= ~(POSTDIV_MASK << pll->data->pd_shift); + pd |= (ffs(postdiv) - 1) << pll->data->pd_shift; + writel(pd, pll->pd_addr); + + pll_en = readl(pll->base_addr + REG_CON0) & CON0_BASE_EN; + + /* set pcw */ + val = readl(pll->pcw_addr); + + val &= ~GENMASK(pll->data->pcw_shift + pll->data->pcwbits - 1, + pll->data->pcw_shift); + val |= pcw << pll->data->pcw_shift; + writel(val, pll->pcw_addr); + + con1 = readl(pll->base_addr + REG_CON1); + + if (pll_en) + con1 |= CON0_PCW_CHG; + + writel(con1, pll->base_addr + REG_CON1); + if (pll->tuner_addr) + writel(con1 + 1, pll->tuner_addr); + + if (pll_en) + udelay(20); +} + +/* + * mtk_pll_calc_values - calculate good values for a given input frequency. + * @pll: The pll + * @pcw: The pcw value (output) + * @postdiv: The post divider (output) + * @freq: The desired target frequency + * @fin: The input frequency + * + */ +static void mtk_pll_calc_values(struct mtk_clk_pll *pll, u32 *pcw, u32 *postdiv, + u32 freq, u32 fin) +{ + unsigned long fmin = 1000 * MHZ; + u64 _pcw; + u32 val; + + if (freq > pll->data->fmax) + freq = pll->data->fmax; + + for (val = 0; val < 4; val++) { + *postdiv = 1 << val; + if (freq * *postdiv >= fmin) + break; + } + + /* _pcw = freq * postdiv / fin * 2^pcwfbits */ + _pcw = ((u64)freq << val) << (pll->data->pcwbits - INTEGER_BITS); + do_div(_pcw, fin); + + *pcw = (u32)_pcw; +} + +static int mtk_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); + u32 pcw = 0; + u32 postdiv; + + mtk_pll_calc_values(pll, &pcw, &postdiv, rate, parent_rate); + mtk_pll_set_rate_regs(pll, pcw, postdiv); + + return 0; +} + +static unsigned long mtk_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); + u32 postdiv; + u32 pcw; + + postdiv = (readl(pll->pd_addr) >> pll->data->pd_shift) & POSTDIV_MASK; + postdiv = 1 << postdiv; + + pcw = readl(pll->pcw_addr) >> pll->data->pcw_shift; + pcw &= GENMASK(pll->data->pcwbits - 1, 0); + + return __mtk_pll_recalc_rate(pll, parent_rate, pcw, postdiv); +} + +static long mtk_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); + u32 pcw = 0; + int postdiv; + + mtk_pll_calc_values(pll, &pcw, &postdiv, rate, *prate); + + return __mtk_pll_recalc_rate(pll, *prate, pcw, postdiv); +} + +static int mtk_pll_prepare(struct clk_hw *hw) +{ + struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); + u32 r; + + r = readl(pll->pwr_addr) | CON0_PWR_ON; + writel(r, pll->pwr_addr); + udelay(1); + + r = readl(pll->pwr_addr) & ~CON0_ISO_EN; + writel(r, pll->pwr_addr); + udelay(1); + + r = readl(pll->base_addr + REG_CON0); + r |= pll->data->en_mask; + writel(r, pll->base_addr + REG_CON0); + + if (pll->tuner_addr) { + r = readl(pll->tuner_addr) | AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } + + udelay(20); + + if (pll->data->flags & HAVE_RST_BAR) { + r = readl(pll->base_addr + REG_CON0); + r |= pll->data->rst_bar_mask; + writel(r, pll->base_addr + REG_CON0); + } + + return 0; +} + +static void mtk_pll_unprepare(struct clk_hw *hw) +{ + struct mtk_clk_pll *pll = to_mtk_clk_pll(hw); + u32 r; + + if (pll->data->flags & HAVE_RST_BAR) { + r = readl(pll->base_addr + REG_CON0); + r &= ~pll->data->rst_bar_mask; + writel(r, pll->base_addr + REG_CON0); + } + + if (pll->tuner_addr) { + r = readl(pll->tuner_addr) & ~AUDPLL_TUNER_EN; + writel(r, pll->tuner_addr); + } + + r = readl(pll->base_addr + REG_CON0); + r &= ~CON0_BASE_EN; + writel(r, pll->base_addr + REG_CON0); + + r = readl(pll->pwr_addr) | CON0_ISO_EN; + writel(r, pll->pwr_addr); + + r = readl(pll->pwr_addr) & ~CON0_PWR_ON; + writel(r, pll->pwr_addr); +} + +static const struct clk_ops mtk_pll_ops = { + .is_prepared = mtk_pll_is_prepared, + .prepare = mtk_pll_prepare, + .unprepare = mtk_pll_unprepare, + .recalc_rate = mtk_pll_recalc_rate, + .round_rate = mtk_pll_round_rate, + .set_rate = mtk_pll_set_rate, +}; + +static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data, + void __iomem *base) +{ + struct mtk_clk_pll *pll; + struct clk_init_data init = {}; + struct clk *clk; + const char *parent_name = "clk26m"; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + pll->base_addr = base + data->reg; + pll->pwr_addr = base + data->pwr_reg; + pll->pd_addr = base + data->pd_reg; + pll->pcw_addr = base + data->pcw_reg; + if (data->tuner_reg) + pll->tuner_addr = base + data->tuner_reg; + pll->hw.init = &init; + pll->data = data; + + init.name = data->name; + init.ops = &mtk_pll_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + + clk = clk_register(NULL, &pll->hw); + + if (IS_ERR(clk)) + kfree(pll); + + return clk; +} + +void __init mtk_clk_register_plls(struct device_node *node, + const struct mtk_pll_data *plls, int num_plls, struct clk_onecell_data *clk_data) +{ + void __iomem *base; + int r, i; + struct clk *clk; + + base = of_iomap(node, 0); + if (!base) { + pr_err("%s(): ioremap failed\n", __func__); + return; + } + + for (i = 0; i < num_plls; i++) { + const struct mtk_pll_data *pll = &plls[i]; + + clk = mtk_clk_register_pll(pll, base); + + if (IS_ERR(clk)) { + pr_err("Failed to register clk %s: %ld\n", + pll->name, PTR_ERR(clk)); + continue; + } + + clk_data->clks[pll->id] = clk; + } + + r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); + if (r) + pr_err("%s(): could not register clock provider: %d\n", + __func__, r); +} diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c new file mode 100644 index 000000000000..9e9fe4b19ac4 --- /dev/null +++ b/drivers/clk/mediatek/reset.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> +#include <linux/slab.h> + +#include "clk-mtk.h" + +struct mtk_reset { + struct regmap *regmap; + int regofs; + struct reset_controller_dev rcdev; +}; + +static int mtk_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); + + return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2), + BIT(id % 32), ~0); +} + +static int mtk_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct mtk_reset *data = container_of(rcdev, struct mtk_reset, rcdev); + + return regmap_update_bits(data->regmap, data->regofs + ((id / 32) << 2), + BIT(id % 32), 0); +} + +static int mtk_reset(struct reset_controller_dev *rcdev, + unsigned long id) +{ + int ret; + + ret = mtk_reset_assert(rcdev, id); + if (ret) + return ret; + + return mtk_reset_deassert(rcdev, id); +} + +static struct reset_control_ops mtk_reset_ops = { + .assert = mtk_reset_assert, + .deassert = mtk_reset_deassert, + .reset = mtk_reset, +}; + +void mtk_register_reset_controller(struct device_node *np, + unsigned int num_regs, int regofs) +{ + struct mtk_reset *data; + int ret; + struct regmap *regmap; + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) { + pr_err("Cannot find regmap for %s: %ld\n", np->full_name, + PTR_ERR(regmap)); + return; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return; + + data->regmap = regmap; + data->regofs = regofs; + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = num_regs * 32; + data->rcdev.ops = &mtk_reset_ops; + data->rcdev.of_node = np; + + ret = reset_controller_register(&data->rcdev); + if (ret) { + pr_err("could not register reset controller: %d\n", ret); + kfree(data); + return; + } +} diff --git a/drivers/clk/meson/Makefile b/drivers/clk/meson/Makefile new file mode 100644 index 000000000000..6d45531df9ab --- /dev/null +++ b/drivers/clk/meson/Makefile @@ -0,0 +1,6 @@ +# +# Makefile for Meson specific clk +# + +obj-y += clkc.o clk-pll.o clk-cpu.o +obj-y += meson8b-clkc.o diff --git a/drivers/clk/meson/clk-cpu.c b/drivers/clk/meson/clk-cpu.c new file mode 100644 index 000000000000..71ad493b94df --- /dev/null +++ b/drivers/clk/meson/clk-cpu.c @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +/* + * CPU clock path: + * + * +-[/N]-----|3| + * MUX2 +--[/3]-+----------|2| MUX1 + * [sys_pll]---|1| |--[/2]------------|1|-|1| + * | |---+------------------|0| | |----- [a5_clk] + * +--|0| | | + * [xtal]---+-------------------------------|0| + * + * + * + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/clk-provider.h> + +#define MESON_CPU_CLK_CNTL1 0x00 +#define MESON_CPU_CLK_CNTL 0x40 + +#define MESON_CPU_CLK_MUX1 BIT(7) +#define MESON_CPU_CLK_MUX2 BIT(0) + +#define MESON_N_WIDTH 9 +#define MESON_N_SHIFT 20 +#define MESON_SEL_WIDTH 2 +#define MESON_SEL_SHIFT 2 + +#include "clkc.h" + +struct meson_clk_cpu { + struct notifier_block clk_nb; + const struct clk_div_table *div_table; + struct clk_hw hw; + void __iomem *base; + u16 reg_off; +}; +#define to_meson_clk_cpu_hw(_hw) container_of(_hw, struct meson_clk_cpu, hw) +#define to_meson_clk_cpu_nb(_nb) container_of(_nb, struct meson_clk_cpu, clk_nb) + +static long meson_clk_cpu_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw); + + return divider_round_rate(hw, rate, prate, clk_cpu->div_table, + MESON_N_WIDTH, CLK_DIVIDER_ROUND_CLOSEST); +} + +static int meson_clk_cpu_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw); + unsigned int div, sel, N = 0; + u32 reg; + + div = DIV_ROUND_UP(parent_rate, rate); + + if (div <= 3) { + sel = div - 1; + } else { + sel = 3; + N = div / 2; + } + + reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1); + reg = PARM_SET(MESON_N_WIDTH, MESON_N_SHIFT, reg, N); + writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1); + + reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL); + reg = PARM_SET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg, sel); + writel(reg, clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL); + + return 0; +} + +static unsigned long meson_clk_cpu_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_hw(hw); + unsigned int N, sel; + unsigned int div = 1; + u32 reg; + + reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL1); + N = PARM_GET(MESON_N_WIDTH, MESON_N_SHIFT, reg); + + reg = readl(clk_cpu->base + clk_cpu->reg_off + MESON_CPU_CLK_CNTL); + sel = PARM_GET(MESON_SEL_WIDTH, MESON_SEL_SHIFT, reg); + + if (sel < 3) + div = sel + 1; + else + div = 2 * N; + + return parent_rate / div; +} + +static int meson_clk_cpu_pre_rate_change(struct meson_clk_cpu *clk_cpu, + struct clk_notifier_data *ndata) +{ + u32 cpu_clk_cntl; + + /* switch MUX1 to xtal */ + cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off + + MESON_CPU_CLK_CNTL); + cpu_clk_cntl &= ~MESON_CPU_CLK_MUX1; + writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off + + MESON_CPU_CLK_CNTL); + udelay(100); + + /* switch MUX2 to sys-pll */ + cpu_clk_cntl |= MESON_CPU_CLK_MUX2; + writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off + + MESON_CPU_CLK_CNTL); + + return 0; +} + +static int meson_clk_cpu_post_rate_change(struct meson_clk_cpu *clk_cpu, + struct clk_notifier_data *ndata) +{ + u32 cpu_clk_cntl; + + /* switch MUX1 to divisors' output */ + cpu_clk_cntl = readl(clk_cpu->base + clk_cpu->reg_off + + MESON_CPU_CLK_CNTL); + cpu_clk_cntl |= MESON_CPU_CLK_MUX1; + writel(cpu_clk_cntl, clk_cpu->base + clk_cpu->reg_off + + MESON_CPU_CLK_CNTL); + udelay(100); + + return 0; +} + +/* + * This clock notifier is called when the frequency of the of the parent + * PLL clock is to be changed. We use the xtal input as temporary parent + * while the PLL frequency is stabilized. + */ +static int meson_clk_cpu_notifier_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct clk_notifier_data *ndata = data; + struct meson_clk_cpu *clk_cpu = to_meson_clk_cpu_nb(nb); + int ret = 0; + + if (event == PRE_RATE_CHANGE) + ret = meson_clk_cpu_pre_rate_change(clk_cpu, ndata); + else if (event == POST_RATE_CHANGE) + ret = meson_clk_cpu_post_rate_change(clk_cpu, ndata); + + return notifier_from_errno(ret); +} + +static const struct clk_ops meson_clk_cpu_ops = { + .recalc_rate = meson_clk_cpu_recalc_rate, + .round_rate = meson_clk_cpu_round_rate, + .set_rate = meson_clk_cpu_set_rate, +}; + +struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf, + void __iomem *reg_base, + spinlock_t *lock) +{ + struct clk *clk; + struct clk *pclk; + struct meson_clk_cpu *clk_cpu; + struct clk_init_data init; + int ret; + + clk_cpu = kzalloc(sizeof(*clk_cpu), GFP_KERNEL); + if (!clk_cpu) + return ERR_PTR(-ENOMEM); + + clk_cpu->base = reg_base; + clk_cpu->reg_off = clk_conf->reg_off; + clk_cpu->div_table = clk_conf->conf.div_table; + clk_cpu->clk_nb.notifier_call = meson_clk_cpu_notifier_cb; + + init.name = clk_conf->clk_name; + init.ops = &meson_clk_cpu_ops; + init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE; + init.flags |= CLK_SET_RATE_PARENT; + init.parent_names = clk_conf->clks_parent; + init.num_parents = 1; + + clk_cpu->hw.init = &init; + + pclk = __clk_lookup(clk_conf->clks_parent[0]); + if (!pclk) { + pr_err("%s: could not lookup parent clock %s\n", + __func__, clk_conf->clks_parent[0]); + ret = -EINVAL; + goto free_clk; + } + + ret = clk_notifier_register(pclk, &clk_cpu->clk_nb); + if (ret) { + pr_err("%s: failed to register clock notifier for %s\n", + __func__, clk_conf->clk_name); + goto free_clk; + } + + clk = clk_register(NULL, &clk_cpu->hw); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); + goto unregister_clk_nb; + } + + return clk; + +unregister_clk_nb: + clk_notifier_unregister(pclk, &clk_cpu->clk_nb); +free_clk: + kfree(clk_cpu); + + return ERR_PTR(ret); +} + diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c new file mode 100644 index 000000000000..664edf0708ea --- /dev/null +++ b/drivers/clk/meson/clk-pll.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +/* + * In the most basic form, a Meson PLL is composed as follows: + * + * PLL + * +------------------------------+ + * | | + * in -----[ /N ]---[ *M ]---[ >>OD ]----->> out + * | ^ ^ | + * +------------------------------+ + * | | + * FREF VCO + * + * out = (in * M / N) >> OD + */ + +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include "clkc.h" + +#define MESON_PLL_RESET BIT(29) +#define MESON_PLL_LOCK BIT(31) + +struct meson_clk_pll { + struct clk_hw hw; + void __iomem *base; + struct pll_conf *conf; + unsigned int rate_count; + spinlock_t *lock; +}; +#define to_meson_clk_pll(_hw) container_of(_hw, struct meson_clk_pll, hw) + +static unsigned long meson_clk_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct meson_clk_pll *pll = to_meson_clk_pll(hw); + struct parm *p; + unsigned long parent_rate_mhz = parent_rate / 1000000; + unsigned long rate_mhz; + u16 n, m, od; + u32 reg; + + p = &pll->conf->n; + reg = readl(pll->base + p->reg_off); + n = PARM_GET(p->width, p->shift, reg); + + p = &pll->conf->m; + reg = readl(pll->base + p->reg_off); + m = PARM_GET(p->width, p->shift, reg); + + p = &pll->conf->od; + reg = readl(pll->base + p->reg_off); + od = PARM_GET(p->width, p->shift, reg); + + rate_mhz = (parent_rate_mhz * m / n) >> od; + + return rate_mhz * 1000000; +} + +static long meson_clk_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct meson_clk_pll *pll = to_meson_clk_pll(hw); + const struct pll_rate_table *rate_table = pll->conf->rate_table; + int i; + + for (i = 0; i < pll->rate_count; i++) { + if (rate <= rate_table[i].rate) + return rate_table[i].rate; + } + + /* else return the smallest value */ + return rate_table[0].rate; +} + +static const struct pll_rate_table *meson_clk_get_pll_settings(struct meson_clk_pll *pll, + unsigned long rate) +{ + const struct pll_rate_table *rate_table = pll->conf->rate_table; + int i; + + for (i = 0; i < pll->rate_count; i++) { + if (rate == rate_table[i].rate) + return &rate_table[i]; + } + return NULL; +} + +static int meson_clk_pll_wait_lock(struct meson_clk_pll *pll, + struct parm *p_n) +{ + int delay = 24000000; + u32 reg; + + while (delay > 0) { + reg = readl(pll->base + p_n->reg_off); + + if (reg & MESON_PLL_LOCK) + return 0; + delay--; + } + return -ETIMEDOUT; +} + +static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct meson_clk_pll *pll = to_meson_clk_pll(hw); + struct parm *p; + const struct pll_rate_table *rate_set; + unsigned long old_rate; + int ret = 0; + u32 reg; + + if (parent_rate == 0 || rate == 0) + return -EINVAL; + + old_rate = rate; + + rate_set = meson_clk_get_pll_settings(pll, rate); + if (!rate_set) + return -EINVAL; + + /* PLL reset */ + p = &pll->conf->n; + reg = readl(pll->base + p->reg_off); + writel(reg | MESON_PLL_RESET, pll->base + p->reg_off); + + reg = PARM_SET(p->width, p->shift, reg, rate_set->n); + writel(reg, pll->base + p->reg_off); + + p = &pll->conf->m; + reg = readl(pll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, rate_set->m); + writel(reg, pll->base + p->reg_off); + + p = &pll->conf->od; + reg = readl(pll->base + p->reg_off); + reg = PARM_SET(p->width, p->shift, reg, rate_set->od); + writel(reg, pll->base + p->reg_off); + + p = &pll->conf->n; + ret = meson_clk_pll_wait_lock(pll, p); + if (ret) { + pr_warn("%s: pll did not lock, trying to restore old rate %lu\n", + __func__, old_rate); + meson_clk_pll_set_rate(hw, old_rate, parent_rate); + } + + return ret; +} + +static const struct clk_ops meson_clk_pll_ops = { + .recalc_rate = meson_clk_pll_recalc_rate, + .round_rate = meson_clk_pll_round_rate, + .set_rate = meson_clk_pll_set_rate, +}; + +static const struct clk_ops meson_clk_pll_ro_ops = { + .recalc_rate = meson_clk_pll_recalc_rate, +}; + +struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf, + void __iomem *reg_base, + spinlock_t *lock) +{ + struct clk *clk; + struct meson_clk_pll *clk_pll; + struct clk_init_data init; + + clk_pll = kzalloc(sizeof(*clk_pll), GFP_KERNEL); + if (!clk_pll) + return ERR_PTR(-ENOMEM); + + clk_pll->base = reg_base + clk_conf->reg_off; + clk_pll->lock = lock; + clk_pll->conf = clk_conf->conf.pll; + + init.name = clk_conf->clk_name; + init.flags = clk_conf->flags | CLK_GET_RATE_NOCACHE; + + init.parent_names = &clk_conf->clks_parent[0]; + init.num_parents = 1; + init.ops = &meson_clk_pll_ro_ops; + + /* If no rate_table is specified we assume the PLL is read-only */ + if (clk_pll->conf->rate_table) { + int len; + + for (len = 0; clk_pll->conf->rate_table[len].rate != 0; ) + len++; + + clk_pll->rate_count = len; + init.ops = &meson_clk_pll_ops; + } + + clk_pll->hw.init = &init; + + clk = clk_register(NULL, &clk_pll->hw); + if (IS_ERR(clk)) + kfree(clk_pll); + + return clk; +} diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c new file mode 100644 index 000000000000..b8c511c5e7a7 --- /dev/null +++ b/drivers/clk/meson/clkc.c @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include "clkc.h" + +static DEFINE_SPINLOCK(clk_lock); + +static struct clk **clks; +static struct clk_onecell_data clk_data; + +struct clk ** __init meson_clk_init(struct device_node *np, + unsigned long nr_clks) +{ + clks = kcalloc(nr_clks, sizeof(*clks), GFP_KERNEL); + if (!clks) + return ERR_PTR(-ENOMEM); + + clk_data.clks = clks; + clk_data.clk_num = nr_clks; + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + + return clks; +} + +static void meson_clk_add_lookup(struct clk *clk, unsigned int id) +{ + if (clks && id) + clks[id] = clk; +} + +static struct clk * __init +meson_clk_register_composite(const struct clk_conf *clk_conf, + void __iomem *clk_base) +{ + struct clk *clk; + struct clk_mux *mux = NULL; + struct clk_divider *div = NULL; + struct clk_gate *gate = NULL; + const struct clk_ops *mux_ops = NULL; + const struct composite_conf *composite_conf; + + composite_conf = clk_conf->conf.composite; + + if (clk_conf->num_parents > 1) { + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + mux->reg = clk_base + clk_conf->reg_off + + composite_conf->mux_parm.reg_off; + mux->shift = composite_conf->mux_parm.shift; + mux->mask = BIT(composite_conf->mux_parm.width) - 1; + mux->flags = composite_conf->mux_flags; + mux->lock = &clk_lock; + mux->table = composite_conf->mux_table; + mux_ops = (composite_conf->mux_flags & CLK_MUX_READ_ONLY) ? + &clk_mux_ro_ops : &clk_mux_ops; + } + + if (MESON_PARM_APPLICABLE(&composite_conf->div_parm)) { + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) { + clk = ERR_PTR(-ENOMEM); + goto error; + } + + div->reg = clk_base + clk_conf->reg_off + + composite_conf->div_parm.reg_off; + div->shift = composite_conf->div_parm.shift; + div->width = composite_conf->div_parm.width; + div->lock = &clk_lock; + div->flags = composite_conf->div_flags; + div->table = composite_conf->div_table; + } + + if (MESON_PARM_APPLICABLE(&composite_conf->gate_parm)) { + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) { + clk = ERR_PTR(-ENOMEM); + goto error; + } + + gate->reg = clk_base + clk_conf->reg_off + + composite_conf->div_parm.reg_off; + gate->bit_idx = composite_conf->gate_parm.shift; + gate->flags = composite_conf->gate_flags; + gate->lock = &clk_lock; + } + + clk = clk_register_composite(NULL, clk_conf->clk_name, + clk_conf->clks_parent, + clk_conf->num_parents, + mux ? &mux->hw : NULL, mux_ops, + div ? &div->hw : NULL, &clk_divider_ops, + gate ? &gate->hw : NULL, &clk_gate_ops, + clk_conf->flags); + if (IS_ERR(clk)) + goto error; + + return clk; + +error: + kfree(gate); + kfree(div); + kfree(mux); + + return clk; +} + +static struct clk * __init +meson_clk_register_fixed_factor(const struct clk_conf *clk_conf, + void __iomem *clk_base) +{ + struct clk *clk; + const struct fixed_fact_conf *fixed_fact_conf; + const struct parm *p; + unsigned int mult, div; + u32 reg; + + fixed_fact_conf = &clk_conf->conf.fixed_fact; + + mult = clk_conf->conf.fixed_fact.mult; + div = clk_conf->conf.fixed_fact.div; + + if (!mult) { + mult = 1; + p = &fixed_fact_conf->mult_parm; + if (MESON_PARM_APPLICABLE(p)) { + reg = readl(clk_base + clk_conf->reg_off + p->reg_off); + mult = PARM_GET(p->width, p->shift, reg); + } + } + + if (!div) { + div = 1; + p = &fixed_fact_conf->div_parm; + if (MESON_PARM_APPLICABLE(p)) { + reg = readl(clk_base + clk_conf->reg_off + p->reg_off); + mult = PARM_GET(p->width, p->shift, reg); + } + } + + clk = clk_register_fixed_factor(NULL, + clk_conf->clk_name, + clk_conf->clks_parent[0], + clk_conf->flags, + mult, div); + + return clk; +} + +static struct clk * __init +meson_clk_register_fixed_rate(const struct clk_conf *clk_conf, + void __iomem *clk_base) +{ + struct clk *clk; + const struct fixed_rate_conf *fixed_rate_conf; + const struct parm *r; + unsigned long rate; + u32 reg; + + fixed_rate_conf = &clk_conf->conf.fixed_rate; + rate = fixed_rate_conf->rate; + + if (!rate) { + r = &fixed_rate_conf->rate_parm; + reg = readl(clk_base + clk_conf->reg_off + r->reg_off); + rate = PARM_GET(r->width, r->shift, reg); + } + + rate *= 1000000; + + clk = clk_register_fixed_rate(NULL, + clk_conf->clk_name, + clk_conf->num_parents + ? clk_conf->clks_parent[0] : NULL, + clk_conf->flags, rate); + + return clk; +} + +void __init meson_clk_register_clks(const struct clk_conf *clk_confs, + size_t nr_confs, + void __iomem *clk_base) +{ + unsigned int i; + struct clk *clk = NULL; + + for (i = 0; i < nr_confs; i++) { + const struct clk_conf *clk_conf = &clk_confs[i]; + + switch (clk_conf->clk_type) { + case CLK_FIXED_RATE: + clk = meson_clk_register_fixed_rate(clk_conf, + clk_base); + break; + case CLK_FIXED_FACTOR: + clk = meson_clk_register_fixed_factor(clk_conf, + clk_base); + break; + case CLK_COMPOSITE: + clk = meson_clk_register_composite(clk_conf, + clk_base); + break; + case CLK_CPU: + clk = meson_clk_register_cpu(clk_conf, clk_base, + &clk_lock); + break; + case CLK_PLL: + clk = meson_clk_register_pll(clk_conf, clk_base, + &clk_lock); + break; + default: + clk = NULL; + } + + if (!clk) { + pr_err("%s: unknown clock type %d\n", __func__, + clk_conf->clk_type); + continue; + } + + if (IS_ERR(clk)) { + pr_warn("%s: Unable to create %s clock\n", __func__, + clk_conf->clk_name); + continue; + } + + meson_clk_add_lookup(clk, clk_conf->clk_id); + } +} diff --git a/drivers/clk/meson/clkc.h b/drivers/clk/meson/clkc.h new file mode 100644 index 000000000000..609ae92cc13f --- /dev/null +++ b/drivers/clk/meson/clkc.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __CLKC_H +#define __CLKC_H + +#define PMASK(width) GENMASK(width - 1, 0) +#define SETPMASK(width, shift) GENMASK(shift + width - 1, shift) +#define CLRPMASK(width, shift) (~SETPMASK(width, shift)) + +#define PARM_GET(width, shift, reg) \ + (((reg) & SETPMASK(width, shift)) >> (shift)) +#define PARM_SET(width, shift, reg, val) \ + (((reg) & CLRPMASK(width, shift)) | (val << (shift))) + +#define MESON_PARM_APPLICABLE(p) (!!((p)->width)) + +struct parm { + u16 reg_off; + u8 shift; + u8 width; +}; +#define PARM(_r, _s, _w) \ + { \ + .reg_off = (_r), \ + .shift = (_s), \ + .width = (_w), \ + } \ + +struct pll_rate_table { + unsigned long rate; + u16 m; + u16 n; + u16 od; +}; +#define PLL_RATE(_r, _m, _n, _od) \ + { \ + .rate = (_r), \ + .m = (_m), \ + .n = (_n), \ + .od = (_od), \ + } \ + +struct pll_conf { + const struct pll_rate_table *rate_table; + struct parm m; + struct parm n; + struct parm od; +}; + +struct fixed_fact_conf { + unsigned int div; + unsigned int mult; + struct parm div_parm; + struct parm mult_parm; +}; + +struct fixed_rate_conf { + unsigned long rate; + struct parm rate_parm; +}; + +struct composite_conf { + struct parm mux_parm; + struct parm div_parm; + struct parm gate_parm; + struct clk_div_table *div_table; + u32 *mux_table; + u8 mux_flags; + u8 div_flags; + u8 gate_flags; +}; + +#define PNAME(x) static const char *x[] + +enum clk_type { + CLK_FIXED_FACTOR, + CLK_FIXED_RATE, + CLK_COMPOSITE, + CLK_CPU, + CLK_PLL, +}; + +struct clk_conf { + u16 reg_off; + enum clk_type clk_type; + unsigned int clk_id; + const char *clk_name; + const char **clks_parent; + int num_parents; + unsigned long flags; + union { + struct fixed_fact_conf fixed_fact; + struct fixed_rate_conf fixed_rate; + const struct composite_conf *composite; + struct pll_conf *pll; + const struct clk_div_table *div_table; + } conf; +}; + +#define FIXED_RATE_P(_ro, _ci, _cn, _f, _c) \ + { \ + .reg_off = (_ro), \ + .clk_type = CLK_FIXED_RATE, \ + .clk_id = (_ci), \ + .clk_name = (_cn), \ + .flags = (_f), \ + .conf.fixed_rate.rate_parm = _c, \ + } \ + +#define FIXED_RATE(_ci, _cn, _f, _r) \ + { \ + .clk_type = CLK_FIXED_RATE, \ + .clk_id = (_ci), \ + .clk_name = (_cn), \ + .flags = (_f), \ + .conf.fixed_rate.rate = (_r), \ + } \ + +#define PLL(_ro, _ci, _cn, _cp, _f, _c) \ + { \ + .reg_off = (_ro), \ + .clk_type = CLK_PLL, \ + .clk_id = (_ci), \ + .clk_name = (_cn), \ + .clks_parent = (_cp), \ + .num_parents = ARRAY_SIZE(_cp), \ + .flags = (_f), \ + .conf.pll = (_c), \ + } \ + +#define FIXED_FACTOR_DIV(_ci, _cn, _cp, _f, _d) \ + { \ + .clk_type = CLK_FIXED_FACTOR, \ + .clk_id = (_ci), \ + .clk_name = (_cn), \ + .clks_parent = (_cp), \ + .num_parents = ARRAY_SIZE(_cp), \ + .conf.fixed_fact.div = (_d), \ + } \ + +#define CPU(_ro, _ci, _cn, _cp, _dt) \ + { \ + .reg_off = (_ro), \ + .clk_type = CLK_CPU, \ + .clk_id = (_ci), \ + .clk_name = (_cn), \ + .clks_parent = (_cp), \ + .num_parents = ARRAY_SIZE(_cp), \ + .conf.div_table = (_dt), \ + } \ + +#define COMPOSITE(_ro, _ci, _cn, _cp, _f, _c) \ + { \ + .reg_off = (_ro), \ + .clk_type = CLK_COMPOSITE, \ + .clk_id = (_ci), \ + .clk_name = (_cn), \ + .clks_parent = (_cp), \ + .num_parents = ARRAY_SIZE(_cp), \ + .flags = (_f), \ + .conf.composite = (_c), \ + } \ + +struct clk **meson_clk_init(struct device_node *np, unsigned long nr_clks); +void meson_clk_register_clks(const struct clk_conf *clk_confs, + unsigned int nr_confs, void __iomem *clk_base); +struct clk *meson_clk_register_cpu(const struct clk_conf *clk_conf, + void __iomem *reg_base, spinlock_t *lock); +struct clk *meson_clk_register_pll(const struct clk_conf *clk_conf, + void __iomem *reg_base, spinlock_t *lock); + +#endif /* __CLKC_H */ diff --git a/drivers/clk/meson/meson8b-clkc.c b/drivers/clk/meson/meson8b-clkc.c new file mode 100644 index 000000000000..61f6d55c4ac7 --- /dev/null +++ b/drivers/clk/meson/meson8b-clkc.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk-provider.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <dt-bindings/clock/meson8b-clkc.h> + +#include "clkc.h" + +#define MESON8B_REG_CTL0_ADDR 0x0000 +#define MESON8B_REG_SYS_CPU_CNTL1 0x015c +#define MESON8B_REG_HHI_MPEG 0x0174 +#define MESON8B_REG_MALI 0x01b0 +#define MESON8B_REG_PLL_FIXED 0x0280 +#define MESON8B_REG_PLL_SYS 0x0300 +#define MESON8B_REG_PLL_VID 0x0320 + +static const struct pll_rate_table sys_pll_rate_table[] = { + PLL_RATE(312000000, 52, 1, 2), + PLL_RATE(336000000, 56, 1, 2), + PLL_RATE(360000000, 60, 1, 2), + PLL_RATE(384000000, 64, 1, 2), + PLL_RATE(408000000, 68, 1, 2), + PLL_RATE(432000000, 72, 1, 2), + PLL_RATE(456000000, 76, 1, 2), + PLL_RATE(480000000, 80, 1, 2), + PLL_RATE(504000000, 84, 1, 2), + PLL_RATE(528000000, 88, 1, 2), + PLL_RATE(552000000, 92, 1, 2), + PLL_RATE(576000000, 96, 1, 2), + PLL_RATE(600000000, 50, 1, 1), + PLL_RATE(624000000, 52, 1, 1), + PLL_RATE(648000000, 54, 1, 1), + PLL_RATE(672000000, 56, 1, 1), + PLL_RATE(696000000, 58, 1, 1), + PLL_RATE(720000000, 60, 1, 1), + PLL_RATE(744000000, 62, 1, 1), + PLL_RATE(768000000, 64, 1, 1), + PLL_RATE(792000000, 66, 1, 1), + PLL_RATE(816000000, 68, 1, 1), + PLL_RATE(840000000, 70, 1, 1), + PLL_RATE(864000000, 72, 1, 1), + PLL_RATE(888000000, 74, 1, 1), + PLL_RATE(912000000, 76, 1, 1), + PLL_RATE(936000000, 78, 1, 1), + PLL_RATE(960000000, 80, 1, 1), + PLL_RATE(984000000, 82, 1, 1), + PLL_RATE(1008000000, 84, 1, 1), + PLL_RATE(1032000000, 86, 1, 1), + PLL_RATE(1056000000, 88, 1, 1), + PLL_RATE(1080000000, 90, 1, 1), + PLL_RATE(1104000000, 92, 1, 1), + PLL_RATE(1128000000, 94, 1, 1), + PLL_RATE(1152000000, 96, 1, 1), + PLL_RATE(1176000000, 98, 1, 1), + PLL_RATE(1200000000, 50, 1, 0), + PLL_RATE(1224000000, 51, 1, 0), + PLL_RATE(1248000000, 52, 1, 0), + PLL_RATE(1272000000, 53, 1, 0), + PLL_RATE(1296000000, 54, 1, 0), + PLL_RATE(1320000000, 55, 1, 0), + PLL_RATE(1344000000, 56, 1, 0), + PLL_RATE(1368000000, 57, 1, 0), + PLL_RATE(1392000000, 58, 1, 0), + PLL_RATE(1416000000, 59, 1, 0), + PLL_RATE(1440000000, 60, 1, 0), + PLL_RATE(1464000000, 61, 1, 0), + PLL_RATE(1488000000, 62, 1, 0), + PLL_RATE(1512000000, 63, 1, 0), + PLL_RATE(1536000000, 64, 1, 0), + { /* sentinel */ }, +}; + +static const struct clk_div_table cpu_div_table[] = { + { .val = 1, .div = 1 }, + { .val = 2, .div = 2 }, + { .val = 3, .div = 3 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 6 }, + { .val = 4, .div = 8 }, + { .val = 5, .div = 10 }, + { .val = 6, .div = 12 }, + { .val = 7, .div = 14 }, + { .val = 8, .div = 16 }, + { /* sentinel */ }, +}; + +PNAME(p_xtal) = { "xtal" }; +PNAME(p_fclk_div) = { "fixed_pll" }; +PNAME(p_cpu_clk) = { "sys_pll" }; +PNAME(p_clk81) = { "fclk_div3", "fclk_div4", "fclk_div5" }; +PNAME(p_mali) = { "fclk_div3", "fclk_div4", "fclk_div5", + "fclk_div7", "zero" }; + +static u32 mux_table_clk81[] = { 6, 5, 7 }; +static u32 mux_table_mali[] = { 6, 5, 7, 4, 0 }; + +static struct pll_conf pll_confs = { + .m = PARM(0x00, 0, 9), + .n = PARM(0x00, 9, 5), + .od = PARM(0x00, 16, 2), +}; + +static struct pll_conf sys_pll_conf = { + .m = PARM(0x00, 0, 9), + .n = PARM(0x00, 9, 5), + .od = PARM(0x00, 16, 2), + .rate_table = sys_pll_rate_table, +}; + +static const struct composite_conf clk81_conf __initconst = { + .mux_table = mux_table_clk81, + .mux_flags = CLK_MUX_READ_ONLY, + .mux_parm = PARM(0x00, 12, 3), + .div_parm = PARM(0x00, 0, 7), + .gate_parm = PARM(0x00, 7, 1), +}; + +static const struct composite_conf mali_conf __initconst = { + .mux_table = mux_table_mali, + .mux_parm = PARM(0x00, 9, 3), + .div_parm = PARM(0x00, 0, 7), + .gate_parm = PARM(0x00, 8, 1), +}; + +static const struct clk_conf meson8b_xtal_conf __initconst = + FIXED_RATE_P(MESON8B_REG_CTL0_ADDR, CLKID_XTAL, "xtal", + CLK_IS_ROOT, PARM(0x00, 4, 7)); + +static const struct clk_conf meson8b_clk_confs[] __initconst = { + FIXED_RATE(CLKID_ZERO, "zero", CLK_IS_ROOT, 0), + PLL(MESON8B_REG_PLL_FIXED, CLKID_PLL_FIXED, "fixed_pll", + p_xtal, 0, &pll_confs), + PLL(MESON8B_REG_PLL_VID, CLKID_PLL_VID, "vid_pll", + p_xtal, 0, &pll_confs), + PLL(MESON8B_REG_PLL_SYS, CLKID_PLL_SYS, "sys_pll", + p_xtal, 0, &sys_pll_conf), + FIXED_FACTOR_DIV(CLKID_FCLK_DIV2, "fclk_div2", p_fclk_div, 0, 2), + FIXED_FACTOR_DIV(CLKID_FCLK_DIV3, "fclk_div3", p_fclk_div, 0, 3), + FIXED_FACTOR_DIV(CLKID_FCLK_DIV4, "fclk_div4", p_fclk_div, 0, 4), + FIXED_FACTOR_DIV(CLKID_FCLK_DIV5, "fclk_div5", p_fclk_div, 0, 5), + FIXED_FACTOR_DIV(CLKID_FCLK_DIV7, "fclk_div7", p_fclk_div, 0, 7), + CPU(MESON8B_REG_SYS_CPU_CNTL1, CLKID_CPUCLK, "a5_clk", p_cpu_clk, + cpu_div_table), + COMPOSITE(MESON8B_REG_HHI_MPEG, CLKID_CLK81, "clk81", p_clk81, + CLK_SET_RATE_NO_REPARENT | CLK_IGNORE_UNUSED, &clk81_conf), + COMPOSITE(MESON8B_REG_MALI, CLKID_MALI, "mali", p_mali, + CLK_IGNORE_UNUSED, &mali_conf), +}; + +static void __init meson8b_clkc_init(struct device_node *np) +{ + void __iomem *clk_base; + + if (!meson_clk_init(np, CLK_NR_CLKS)) + return; + + /* XTAL */ + clk_base = of_iomap(np, 0); + if (!clk_base) { + pr_err("%s: Unable to map xtal base\n", __func__); + return; + } + + meson_clk_register_clks(&meson8b_xtal_conf, 1, clk_base); + iounmap(clk_base); + + /* Generic clocks and PLLs */ + clk_base = of_iomap(np, 1); + if (!clk_base) { + pr_err("%s: Unable to map clk base\n", __func__); + return; + } + + meson_clk_register_clks(meson8b_clk_confs, + ARRAY_SIZE(meson8b_clk_confs), + clk_base); +} +CLK_OF_DECLARE(meson8b_clock, "amlogic,meson8b-clkc", meson8b_clkc_init); diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile index 3caaf7cc169c..9d4bc41e4239 100644 --- a/drivers/clk/mmp/Makefile +++ b/drivers/clk/mmp/Makefile @@ -12,3 +12,5 @@ obj-$(CONFIG_MACH_MMP2_DT) += clk-of-mmp2.o obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o obj-$(CONFIG_CPU_MMP2) += clk-mmp2.o + +obj-y += clk-of-pxa1928.o diff --git a/drivers/clk/mmp/clk-apbc.c b/drivers/clk/mmp/clk-apbc.c index d14120eaa71f..09d41c717c52 100644 --- a/drivers/clk/mmp/clk-apbc.c +++ b/drivers/clk/mmp/clk-apbc.c @@ -115,7 +115,7 @@ static void clk_apbc_unprepare(struct clk_hw *hw) spin_unlock_irqrestore(apbc->lock, flags); } -struct clk_ops clk_apbc_ops = { +static struct clk_ops clk_apbc_ops = { .prepare = clk_apbc_prepare, .unprepare = clk_apbc_unprepare, }; diff --git a/drivers/clk/mmp/clk-apmu.c b/drivers/clk/mmp/clk-apmu.c index abe182b2377f..cdcf2d7f321e 100644 --- a/drivers/clk/mmp/clk-apmu.c +++ b/drivers/clk/mmp/clk-apmu.c @@ -61,7 +61,7 @@ static void clk_apmu_disable(struct clk_hw *hw) spin_unlock_irqrestore(apmu->lock, flags); } -struct clk_ops clk_apmu_ops = { +static struct clk_ops clk_apmu_ops = { .enable = clk_apmu_enable, .disable = clk_apmu_disable, }; diff --git a/drivers/clk/mmp/clk-mmp2.c b/drivers/clk/mmp/clk-mmp2.c index 5c90a4230fa3..09d2832fbd78 100644 --- a/drivers/clk/mmp/clk-mmp2.c +++ b/drivers/clk/mmp/clk-mmp2.c @@ -63,10 +63,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = { }; static struct mmp_clk_factor_tbl uart_factor_tbl[] = { - {.num = 14634, .den = 2165}, /*14.745MHZ */ + {.num = 8125, .den = 1536}, /*14.745MHZ */ {.num = 3521, .den = 689}, /*19.23MHZ */ - {.num = 9679, .den = 5728}, /*58.9824MHZ */ - {.num = 15850, .den = 9451}, /*59.429MHZ */ }; static const char *uart_parent[] = {"uart_pll", "vctcxo"}; diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 2cbc2b43ae52..251533d87c65 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -30,6 +30,7 @@ #define APBC_TWSI4 0x7c #define APBC_TWSI5 0x80 #define APBC_KPC 0x18 +#define APBC_TIMER 0x24 #define APBC_UART0 0x2c #define APBC_UART1 0x30 #define APBC_UART2 0x34 @@ -98,10 +99,8 @@ static struct mmp_clk_factor_masks uart_factor_masks = { }; static struct mmp_clk_factor_tbl uart_factor_tbl[] = { - {.num = 14634, .den = 2165}, /*14.745MHZ */ + {.num = 8125, .den = 1536}, /*14.745MHZ */ {.num = 3521, .den = 689}, /*19.23MHZ */ - {.num = 9679, .den = 5728}, /*58.9824MHZ */ - {.num = 15850, .den = 9451}, /*59.429MHZ */ }; static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit) @@ -134,6 +133,9 @@ static DEFINE_SPINLOCK(ssp2_lock); static DEFINE_SPINLOCK(ssp3_lock); static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; +static DEFINE_SPINLOCK(timer_lock); +static const char *timer_parent_names[] = {"clk32", "vctcxo_2", "vctcxo_4", "vctcxo"}; + static DEFINE_SPINLOCK(reset_lock); static struct mmp_param_mux_clk apbc_mux_clks[] = { @@ -145,6 +147,7 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = { {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock}, {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock}, {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock}, + {0, "timer_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER, 4, 3, 0, &timer_lock}, }; static struct mmp_param_gate_clk apbc_gate_clks[] = { @@ -170,6 +173,7 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = { {MMP2_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x7, 0x3, 0x0, 0, &ssp1_lock}, {MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock}, {MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock}, + {MMP2_CLK_TIMER, "timer_clk", "timer_mux", CLK_SET_RATE_PARENT, APBC_TIMER, 0x7, 0x3, 0x0, 0, &timer_lock}, }; static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit) diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c index 5b1810dc4bd2..64eaf4141c69 100644 --- a/drivers/clk/mmp/clk-of-pxa168.c +++ b/drivers/clk/mmp/clk-of-pxa168.c @@ -32,6 +32,7 @@ #define APBC_PWM1 0x10 #define APBC_PWM2 0x14 #define APBC_PWM3 0x18 +#define APBC_TIMER 0x34 #define APBC_SSP0 0x81c #define APBC_SSP1 0x820 #define APBC_SSP2 0x84c @@ -58,6 +59,7 @@ static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = { {PXA168_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768}, {PXA168_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000}, {PXA168_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000}, + {PXA168_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000}, }; static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = { @@ -70,6 +72,7 @@ static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = { {PXA168_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0}, {PXA168_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0}, {PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0}, + {PXA168_CLK_PLL1_192, "pll1_192", "pll1_96", 1, 2, 0}, {PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0}, {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0}, {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0}, @@ -119,6 +122,9 @@ static DEFINE_SPINLOCK(ssp3_lock); static DEFINE_SPINLOCK(ssp4_lock); static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"}; +static DEFINE_SPINLOCK(timer_lock); +static const char *timer_parent_names[] = {"pll1_48", "clk32", "pll1_96", "pll1_192"}; + static DEFINE_SPINLOCK(reset_lock); static struct mmp_param_mux_clk apbc_mux_clks[] = { @@ -130,6 +136,7 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = { {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock}, {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock}, {0, "ssp4_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP4, 4, 3, 0, &ssp4_lock}, + {0, "timer_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER, 4, 3, 0, &timer_lock}, }; static struct mmp_param_gate_clk apbc_gate_clks[] = { @@ -151,6 +158,7 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = { {PXA168_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x3, 0x3, 0x0, 0, &ssp2_lock}, {PXA168_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x3, 0x3, 0x0, 0, &ssp3_lock}, {PXA168_CLK_SSP4, "ssp4_clk", "ssp4_mux", CLK_SET_RATE_PARENT, APBC_SSP4, 0x3, 0x3, 0x0, 0, &ssp4_lock}, + {PXA168_CLK_TIMER, "timer_clk", "timer_mux", CLK_SET_RATE_PARENT, APBC_TIMER, 0x3, 0x3, 0x0, 0, &timer_lock}, }; static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit) diff --git a/drivers/clk/mmp/clk-of-pxa1928.c b/drivers/clk/mmp/clk-of-pxa1928.c new file mode 100644 index 000000000000..433a5ae1eae0 --- /dev/null +++ b/drivers/clk/mmp/clk-of-pxa1928.c @@ -0,0 +1,265 @@ +/* + * pxa1928 clock framework source file + * + * Copyright (C) 2015 Linaro, Ltd. + * Rob Herring <[email protected]> + * + * Based on drivers/clk/mmp/clk-of-mmp2.c: + * Copyright (C) 2012 Marvell + * Chao Xie <[email protected]> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include <dt-bindings/clock/marvell,pxa1928.h> + +#include "clk.h" +#include "reset.h" + +#define MPMU_UART_PLL 0x14 + +struct pxa1928_clk_unit { + struct mmp_clk_unit unit; + void __iomem *mpmu_base; + void __iomem *apmu_base; + void __iomem *apbc_base; + void __iomem *apbcp_base; +}; + +static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = { + {0, "clk32", NULL, CLK_IS_ROOT, 32768}, + {0, "vctcxo", NULL, CLK_IS_ROOT, 26000000}, + {0, "pll1_624", NULL, CLK_IS_ROOT, 624000000}, + {0, "pll5p", NULL, CLK_IS_ROOT, 832000000}, + {0, "pll5", NULL, CLK_IS_ROOT, 1248000000}, + {0, "usb_pll", NULL, CLK_IS_ROOT, 480000000}, +}; + +static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = { + {0, "pll1_d2", "pll1_624", 1, 2, 0}, + {0, "pll1_d9", "pll1_624", 1, 9, 0}, + {0, "pll1_d12", "pll1_624", 1, 12, 0}, + {0, "pll1_d16", "pll1_624", 1, 16, 0}, + {0, "pll1_d20", "pll1_624", 1, 20, 0}, + {0, "pll1_416", "pll1_624", 2, 3, 0}, + {0, "vctcxo_d2", "vctcxo", 1, 2, 0}, + {0, "vctcxo_d4", "vctcxo", 1, 4, 0}, +}; + +static struct mmp_clk_factor_masks uart_factor_masks = { + .factor = 2, + .num_mask = 0x1fff, + .den_mask = 0x1fff, + .num_shift = 16, + .den_shift = 0, +}; + +static struct mmp_clk_factor_tbl uart_factor_tbl[] = { + {.num = 832, .den = 234}, /*58.5MHZ */ + {.num = 1, .den = 1}, /*26MHZ */ +}; + +static void pxa1928_pll_init(struct pxa1928_clk_unit *pxa_unit) +{ + struct clk *clk; + struct mmp_clk_unit *unit = &pxa_unit->unit; + + mmp_register_fixed_rate_clks(unit, fixed_rate_clks, + ARRAY_SIZE(fixed_rate_clks)); + + mmp_register_fixed_factor_clks(unit, fixed_factor_clks, + ARRAY_SIZE(fixed_factor_clks)); + + clk = mmp_clk_register_factor("uart_pll", "pll1_416", + CLK_SET_RATE_PARENT, + pxa_unit->mpmu_base + MPMU_UART_PLL, + &uart_factor_masks, uart_factor_tbl, + ARRAY_SIZE(uart_factor_tbl), NULL); +} + +static DEFINE_SPINLOCK(uart0_lock); +static DEFINE_SPINLOCK(uart1_lock); +static DEFINE_SPINLOCK(uart2_lock); +static DEFINE_SPINLOCK(uart3_lock); +static const char *uart_parent_names[] = {"uart_pll", "vctcxo"}; + +static DEFINE_SPINLOCK(ssp0_lock); +static DEFINE_SPINLOCK(ssp1_lock); +static const char *ssp_parent_names[] = {"vctcxo_d4", "vctcxo_d2", "vctcxo", "pll1_d12"}; + +static DEFINE_SPINLOCK(reset_lock); + +static struct mmp_param_mux_clk apbc_mux_clks[] = { + {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART0 * 4, 4, 3, 0, &uart0_lock}, + {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART1 * 4, 4, 3, 0, &uart1_lock}, + {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART2 * 4, 4, 3, 0, &uart2_lock}, + {0, "uart3_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_UART3 * 4, 4, 3, 0, &uart3_lock}, + {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_SSP0 * 4, 4, 3, 0, &ssp0_lock}, + {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_SSP1 * 4, 4, 3, 0, &ssp1_lock}, +}; + +static struct mmp_param_gate_clk apbc_gate_clks[] = { + {PXA1928_CLK_TWSI0, "twsi0_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI0 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_TWSI1, "twsi1_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI1 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_TWSI2, "twsi2_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI2 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_TWSI3, "twsi3_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI3 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_TWSI4, "twsi4_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI4 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_TWSI5, "twsi5_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_TWSI5 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_GPIO * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, PXA1928_CLK_KPC * 4, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL}, + {PXA1928_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, PXA1928_CLK_RTC * 4, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL}, + {PXA1928_CLK_PWM0, "pwm0_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM0 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_PWM1, "pwm1_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM1 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_PWM2, "pwm2_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM2 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + {PXA1928_CLK_PWM3, "pwm3_clk", "vctcxo", CLK_SET_RATE_PARENT, PXA1928_CLK_PWM3 * 4, 0x3, 0x3, 0x0, 0, &reset_lock}, + /* The gate clocks has mux parent. */ + {PXA1928_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART0 * 4, 0x3, 0x3, 0x0, 0, &uart0_lock}, + {PXA1928_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART1 * 4, 0x3, 0x3, 0x0, 0, &uart1_lock}, + {PXA1928_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART2 * 4, 0x3, 0x3, 0x0, 0, &uart2_lock}, + {PXA1928_CLK_UART3, "uart3_clk", "uart3_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_UART3 * 4, 0x3, 0x3, 0x0, 0, &uart3_lock}, + {PXA1928_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_SSP0 * 4, 0x3, 0x3, 0x0, 0, &ssp0_lock}, + {PXA1928_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, PXA1928_CLK_SSP1 * 4, 0x3, 0x3, 0x0, 0, &ssp1_lock}, +}; + +static void pxa1928_apb_periph_clk_init(struct pxa1928_clk_unit *pxa_unit) +{ + struct mmp_clk_unit *unit = &pxa_unit->unit; + + mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base, + ARRAY_SIZE(apbc_mux_clks)); + + mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base, + ARRAY_SIZE(apbc_gate_clks)); +} + +static DEFINE_SPINLOCK(sdh0_lock); +static DEFINE_SPINLOCK(sdh1_lock); +static DEFINE_SPINLOCK(sdh2_lock); +static DEFINE_SPINLOCK(sdh3_lock); +static DEFINE_SPINLOCK(sdh4_lock); +static const char *sdh_parent_names[] = {"pll1_624", "pll5p", "pll5", "pll1_416"}; + +static DEFINE_SPINLOCK(usb_lock); + +static struct mmp_param_mux_clk apmu_mux_clks[] = { + {0, "sdh_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, PXA1928_CLK_SDH0 * 4, 8, 2, 0, &sdh0_lock}, +}; + +static struct mmp_param_div_clk apmu_div_clks[] = { + {0, "sdh_div", "sdh_mux", 0, PXA1928_CLK_SDH0 * 4, 10, 4, CLK_DIVIDER_ONE_BASED, &sdh0_lock}, +}; + +static struct mmp_param_gate_clk apmu_gate_clks[] = { + {PXA1928_CLK_USB, "usb_clk", "usb_pll", 0, PXA1928_CLK_USB * 4, 0x9, 0x9, 0x0, 0, &usb_lock}, + {PXA1928_CLK_HSIC, "hsic_clk", "usb_pll", 0, PXA1928_CLK_HSIC * 4, 0x9, 0x9, 0x0, 0, &usb_lock}, + /* The gate clocks has mux parent. */ + {PXA1928_CLK_SDH0, "sdh0_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH0 * 4, 0x1b, 0x1b, 0x0, 0, &sdh0_lock}, + {PXA1928_CLK_SDH1, "sdh1_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH1 * 4, 0x1b, 0x1b, 0x0, 0, &sdh1_lock}, + {PXA1928_CLK_SDH2, "sdh2_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH2 * 4, 0x1b, 0x1b, 0x0, 0, &sdh2_lock}, + {PXA1928_CLK_SDH3, "sdh3_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH3 * 4, 0x1b, 0x1b, 0x0, 0, &sdh3_lock}, + {PXA1928_CLK_SDH4, "sdh4_clk", "sdh_div", CLK_SET_RATE_PARENT, PXA1928_CLK_SDH4 * 4, 0x1b, 0x1b, 0x0, 0, &sdh4_lock}, +}; + +static void pxa1928_axi_periph_clk_init(struct pxa1928_clk_unit *pxa_unit) +{ + struct mmp_clk_unit *unit = &pxa_unit->unit; + + mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base, + ARRAY_SIZE(apmu_mux_clks)); + + mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base, + ARRAY_SIZE(apmu_div_clks)); + + mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base, + ARRAY_SIZE(apmu_gate_clks)); +} + +static void pxa1928_clk_reset_init(struct device_node *np, + struct pxa1928_clk_unit *pxa_unit) +{ + struct mmp_clk_reset_cell *cells; + int i, base, nr_resets; + + nr_resets = ARRAY_SIZE(apbc_gate_clks); + cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL); + if (!cells) + return; + + base = 0; + for (i = 0; i < nr_resets; i++) { + cells[base + i].clk_id = apbc_gate_clks[i].id; + cells[base + i].reg = + pxa_unit->apbc_base + apbc_gate_clks[i].offset; + cells[base + i].flags = 0; + cells[base + i].lock = apbc_gate_clks[i].lock; + cells[base + i].bits = 0x4; + } + + mmp_clk_reset_register(np, cells, nr_resets); +} + +static void __init pxa1928_mpmu_clk_init(struct device_node *np) +{ + struct pxa1928_clk_unit *pxa_unit; + + pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL); + if (!pxa_unit) + return; + + pxa_unit->mpmu_base = of_iomap(np, 0); + if (!pxa_unit->mpmu_base) { + pr_err("failed to map mpmu registers\n"); + return; + } + + pxa1928_pll_init(pxa_unit); +} +CLK_OF_DECLARE(pxa1928_mpmu_clk, "marvell,pxa1928-mpmu", pxa1928_mpmu_clk_init); + +static void __init pxa1928_apmu_clk_init(struct device_node *np) +{ + struct pxa1928_clk_unit *pxa_unit; + + pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL); + if (!pxa_unit) + return; + + pxa_unit->apmu_base = of_iomap(np, 0); + if (!pxa_unit->apmu_base) { + pr_err("failed to map apmu registers\n"); + return; + } + + mmp_clk_init(np, &pxa_unit->unit, PXA1928_APMU_NR_CLKS); + + pxa1928_axi_periph_clk_init(pxa_unit); +} +CLK_OF_DECLARE(pxa1928_apmu_clk, "marvell,pxa1928-apmu", pxa1928_apmu_clk_init); + +static void __init pxa1928_apbc_clk_init(struct device_node *np) +{ + struct pxa1928_clk_unit *pxa_unit; + + pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL); + if (!pxa_unit) + return; + + pxa_unit->apbc_base = of_iomap(np, 0); + if (!pxa_unit->apbc_base) { + pr_err("failed to map apbc registers\n"); + return; + } + + mmp_clk_init(np, &pxa_unit->unit, PXA1928_APBC_NR_CLKS); + + pxa1928_apb_periph_clk_init(pxa_unit); + pxa1928_clk_reset_init(np, pxa_unit); +} +CLK_OF_DECLARE(pxa1928_apbc_clk, "marvell,pxa1928-apbc", pxa1928_apbc_clk_init); diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c index 5e3c80dad336..13d6173326a4 100644 --- a/drivers/clk/mmp/clk-of-pxa910.c +++ b/drivers/clk/mmp/clk-of-pxa910.c @@ -35,6 +35,8 @@ #define APBC_SSP0 0x1c #define APBC_SSP1 0x20 #define APBC_SSP2 0x4c +#define APBC_TIMER0 0x30 +#define APBC_TIMER1 0x44 #define APBCP_TWSI1 0x28 #define APBCP_UART2 0x1c #define APMU_SDH0 0x54 @@ -57,6 +59,7 @@ static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = { {PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768}, {PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000}, {PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000}, + {PXA910_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000}, }; static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = { @@ -69,6 +72,7 @@ static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = { {PXA910_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0}, {PXA910_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0}, {PXA910_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0}, + {PXA910_CLK_PLL1_192, "pll1_192", "pll1_96", 1, 2, 0}, {PXA910_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0}, {PXA910_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0}, {PXA910_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0}, @@ -115,6 +119,10 @@ static DEFINE_SPINLOCK(ssp0_lock); static DEFINE_SPINLOCK(ssp1_lock); static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"}; +static DEFINE_SPINLOCK(timer0_lock); +static DEFINE_SPINLOCK(timer1_lock); +static const char *timer_parent_names[] = {"pll1_48", "clk32", "pll1_96"}; + static DEFINE_SPINLOCK(reset_lock); static struct mmp_param_mux_clk apbc_mux_clks[] = { @@ -122,6 +130,8 @@ static struct mmp_param_mux_clk apbc_mux_clks[] = { {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock}, {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock}, {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock}, + {0, "timer0_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER0, 4, 3, 0, &timer0_lock}, + {0, "timer1_mux", timer_parent_names, ARRAY_SIZE(timer_parent_names), CLK_SET_RATE_PARENT, APBC_TIMER1, 4, 3, 0, &timer1_lock}, }; static struct mmp_param_mux_clk apbcp_mux_clks[] = { @@ -142,6 +152,8 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = { {PXA910_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock}, {PXA910_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock}, {PXA910_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock}, + {PXA910_CLK_TIMER0, "timer0_clk", "timer0_mux", CLK_SET_RATE_PARENT, APBC_TIMER0, 0x3, 0x3, 0x0, 0, &timer0_lock}, + {PXA910_CLK_TIMER1, "timer1_clk", "timer1_mux", CLK_SET_RATE_PARENT, APBC_TIMER1, 0x3, 0x3, 0x0, 0, &timer1_lock}, }; static struct mmp_param_gate_clk apbcp_gate_clks[] = { diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c index 756f0f39d6a3..2c7c1085f883 100644 --- a/drivers/clk/mvebu/armada-370.c +++ b/drivers/clk/mvebu/armada-370.c @@ -163,6 +163,7 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = { { "pex1", "pex1_en", 9, 0 }, { "sata0", NULL, 15, 0 }, { "sdio", NULL, 17, 0 }, + { "crypto", NULL, 23, CLK_IGNORE_UNUSED }, { "tdm", NULL, 25, 0 }, { "ddr", NULL, 28, CLK_IGNORE_UNUSED }, { "sata1", NULL, 30, 0 }, diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c index 22d136aa699f..32216f9b7f03 100644 --- a/drivers/clk/mxs/clk-imx23.c +++ b/drivers/clk/mxs/clk-imx23.c @@ -77,12 +77,12 @@ static void __init clk_misc_init(void) writel_relaxed(30 << BP_FRAC_IOFRAC, FRAC + SET); } -static const char *sel_pll[] __initdata = { "pll", "ref_xtal", }; -static const char *sel_cpu[] __initdata = { "ref_cpu", "ref_xtal", }; -static const char *sel_pix[] __initdata = { "ref_pix", "ref_xtal", }; -static const char *sel_io[] __initdata = { "ref_io", "ref_xtal", }; -static const char *cpu_sels[] __initdata = { "cpu_pll", "cpu_xtal", }; -static const char *emi_sels[] __initdata = { "emi_pll", "emi_xtal", }; +static const char *const sel_pll[] __initconst = { "pll", "ref_xtal", }; +static const char *const sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", }; +static const char *const sel_pix[] __initconst = { "ref_pix", "ref_xtal", }; +static const char *const sel_io[] __initconst = { "ref_io", "ref_xtal", }; +static const char *const cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", }; +static const char *const emi_sels[] __initconst = { "emi_pll", "emi_xtal", }; enum imx23_clk { ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel, diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c index b1be3746ce95..a68670868baa 100644 --- a/drivers/clk/mxs/clk-imx28.c +++ b/drivers/clk/mxs/clk-imx28.c @@ -125,15 +125,15 @@ static void __init clk_misc_init(void) writel_relaxed(val, FRAC0); } -static const char *sel_cpu[] __initdata = { "ref_cpu", "ref_xtal", }; -static const char *sel_io0[] __initdata = { "ref_io0", "ref_xtal", }; -static const char *sel_io1[] __initdata = { "ref_io1", "ref_xtal", }; -static const char *sel_pix[] __initdata = { "ref_pix", "ref_xtal", }; -static const char *sel_gpmi[] __initdata = { "ref_gpmi", "ref_xtal", }; -static const char *sel_pll0[] __initdata = { "pll0", "ref_xtal", }; -static const char *cpu_sels[] __initdata = { "cpu_pll", "cpu_xtal", }; -static const char *emi_sels[] __initdata = { "emi_pll", "emi_xtal", }; -static const char *ptp_sels[] __initdata = { "ref_xtal", "pll0", }; +static const char *const sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", }; +static const char *const sel_io0[] __initconst = { "ref_io0", "ref_xtal", }; +static const char *const sel_io1[] __initconst = { "ref_io1", "ref_xtal", }; +static const char *const sel_pix[] __initconst = { "ref_pix", "ref_xtal", }; +static const char *const sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", }; +static const char *const sel_pll0[] __initconst = { "pll0", "ref_xtal", }; +static const char *const cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", }; +static const char *const emi_sels[] __initconst = { "emi_pll", "emi_xtal", }; +static const char *const ptp_sels[] __initconst = { "ref_xtal", "pll0", }; enum imx28_clk { ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1, diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h index ef10ad9b5daa..f07d821dd75d 100644 --- a/drivers/clk/mxs/clk.h +++ b/drivers/clk/mxs/clk.h @@ -49,7 +49,7 @@ static inline struct clk *mxs_clk_gate(const char *name, } static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg, - u8 shift, u8 width, const char **parent_names, int num_parents) + u8 shift, u8 width, const char *const *parent_names, int num_parents) { return clk_register_mux(NULL, name, parent_names, num_parents, CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT, diff --git a/drivers/clk/nxp/Makefile b/drivers/clk/nxp/Makefile new file mode 100644 index 000000000000..7f608b0ad7b4 --- /dev/null +++ b/drivers/clk/nxp/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-cgu.o +obj-$(CONFIG_ARCH_LPC18XX) += clk-lpc18xx-ccu.o diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c new file mode 100644 index 000000000000..eeaee97da110 --- /dev/null +++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c @@ -0,0 +1,293 @@ +/* + * Clk driver for NXP LPC18xx/LPC43xx Clock Control Unit (CCU) + * + * Copyright (C) 2015 Joachim Eastwood <[email protected]> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include <dt-bindings/clock/lpc18xx-ccu.h> + +/* Bit defines for CCU branch configuration register */ +#define LPC18XX_CCU_RUN BIT(0) +#define LPC18XX_CCU_AUTO BIT(1) +#define LPC18XX_CCU_DIV BIT(5) +#define LPC18XX_CCU_DIVSTAT BIT(27) + +/* CCU branch feature bits */ +#define CCU_BRANCH_IS_BUS BIT(0) +#define CCU_BRANCH_HAVE_DIV2 BIT(1) + +#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw) + +struct lpc18xx_branch_clk_data { + const char **name; + int num; +}; + +struct lpc18xx_clk_branch { + const char *base_name; + const char *name; + u16 offset; + u16 flags; + struct clk *clk; + struct clk_gate gate; +}; + +static struct lpc18xx_clk_branch clk_branches[] = { + {"base_apb3_clk", "apb3_bus", CLK_APB3_BUS, CCU_BRANCH_IS_BUS}, + {"base_apb3_clk", "apb3_i2c1", CLK_APB3_I2C1, 0}, + {"base_apb3_clk", "apb3_dac", CLK_APB3_DAC, 0}, + {"base_apb3_clk", "apb3_adc0", CLK_APB3_ADC0, 0}, + {"base_apb3_clk", "apb3_adc1", CLK_APB3_ADC1, 0}, + {"base_apb3_clk", "apb3_can0", CLK_APB3_CAN0, 0}, + + {"base_apb1_clk", "apb1_bus", CLK_APB1_BUS, CCU_BRANCH_IS_BUS}, + {"base_apb1_clk", "apb1_mc_pwm", CLK_APB1_MOTOCON_PWM, 0}, + {"base_apb1_clk", "apb1_i2c0", CLK_APB1_I2C0, 0}, + {"base_apb1_clk", "apb1_i2s", CLK_APB1_I2S, 0}, + {"base_apb1_clk", "apb1_can1", CLK_APB1_CAN1, 0}, + + {"base_spifi_clk", "spifi", CLK_SPIFI, 0}, + + {"base_cpu_clk", "cpu_bus", CLK_CPU_BUS, CCU_BRANCH_IS_BUS}, + {"base_cpu_clk", "cpu_spifi", CLK_CPU_SPIFI, 0}, + {"base_cpu_clk", "cpu_gpio", CLK_CPU_GPIO, 0}, + {"base_cpu_clk", "cpu_lcd", CLK_CPU_LCD, 0}, + {"base_cpu_clk", "cpu_ethernet", CLK_CPU_ETHERNET, 0}, + {"base_cpu_clk", "cpu_usb0", CLK_CPU_USB0, 0}, + {"base_cpu_clk", "cpu_emc", CLK_CPU_EMC, 0}, + {"base_cpu_clk", "cpu_sdio", CLK_CPU_SDIO, 0}, + {"base_cpu_clk", "cpu_dma", CLK_CPU_DMA, 0}, + {"base_cpu_clk", "cpu_core", CLK_CPU_CORE, 0}, + {"base_cpu_clk", "cpu_sct", CLK_CPU_SCT, 0}, + {"base_cpu_clk", "cpu_usb1", CLK_CPU_USB1, 0}, + {"base_cpu_clk", "cpu_emcdiv", CLK_CPU_EMCDIV, CCU_BRANCH_HAVE_DIV2}, + {"base_cpu_clk", "cpu_flasha", CLK_CPU_FLASHA, CCU_BRANCH_HAVE_DIV2}, + {"base_cpu_clk", "cpu_flashb", CLK_CPU_FLASHB, CCU_BRANCH_HAVE_DIV2}, + {"base_cpu_clk", "cpu_m0app", CLK_CPU_M0APP, CCU_BRANCH_HAVE_DIV2}, + {"base_cpu_clk", "cpu_adchs", CLK_CPU_ADCHS, CCU_BRANCH_HAVE_DIV2}, + {"base_cpu_clk", "cpu_eeprom", CLK_CPU_EEPROM, CCU_BRANCH_HAVE_DIV2}, + {"base_cpu_clk", "cpu_wwdt", CLK_CPU_WWDT, 0}, + {"base_cpu_clk", "cpu_uart0", CLK_CPU_UART0, 0}, + {"base_cpu_clk", "cpu_uart1", CLK_CPU_UART1, 0}, + {"base_cpu_clk", "cpu_ssp0", CLK_CPU_SSP0, 0}, + {"base_cpu_clk", "cpu_timer0", CLK_CPU_TIMER0, 0}, + {"base_cpu_clk", "cpu_timer1", CLK_CPU_TIMER1, 0}, + {"base_cpu_clk", "cpu_scu", CLK_CPU_SCU, 0}, + {"base_cpu_clk", "cpu_creg", CLK_CPU_CREG, 0}, + {"base_cpu_clk", "cpu_ritimer", CLK_CPU_RITIMER, 0}, + {"base_cpu_clk", "cpu_uart2", CLK_CPU_UART2, 0}, + {"base_cpu_clk", "cpu_uart3", CLK_CPU_UART3, 0}, + {"base_cpu_clk", "cpu_timer2", CLK_CPU_TIMER2, 0}, + {"base_cpu_clk", "cpu_timer3", CLK_CPU_TIMER3, 0}, + {"base_cpu_clk", "cpu_ssp1", CLK_CPU_SSP1, 0}, + {"base_cpu_clk", "cpu_qei", CLK_CPU_QEI, 0}, + + {"base_periph_clk", "periph_bus", CLK_PERIPH_BUS, CCU_BRANCH_IS_BUS}, + {"base_periph_clk", "periph_core", CLK_PERIPH_CORE, 0}, + {"base_periph_clk", "periph_sgpio", CLK_PERIPH_SGPIO, 0}, + + {"base_usb0_clk", "usb0", CLK_USB0, 0}, + {"base_usb1_clk", "usb1", CLK_USB1, 0}, + {"base_spi_clk", "spi", CLK_SPI, 0}, + {"base_adchs_clk", "adchs", CLK_ADCHS, 0}, + + {"base_audio_clk", "audio", CLK_AUDIO, 0}, + {"base_uart3_clk", "apb2_uart3", CLK_APB2_UART3, 0}, + {"base_uart2_clk", "apb2_uart2", CLK_APB2_UART2, 0}, + {"base_uart1_clk", "apb0_uart1", CLK_APB0_UART1, 0}, + {"base_uart0_clk", "apb0_uart0", CLK_APB0_UART0, 0}, + {"base_ssp1_clk", "apb2_ssp1", CLK_APB2_SSP1, 0}, + {"base_ssp0_clk", "apb0_ssp0", CLK_APB0_SSP0, 0}, + {"base_sdio_clk", "sdio", CLK_SDIO, 0}, +}; + +static struct clk *lpc18xx_ccu_branch_clk_get(struct of_phandle_args *clkspec, + void *data) +{ + struct lpc18xx_branch_clk_data *clk_data = data; + unsigned int offset = clkspec->args[0]; + int i, j; + + for (i = 0; i < ARRAY_SIZE(clk_branches); i++) { + if (clk_branches[i].offset != offset) + continue; + + for (j = 0; j < clk_data->num; j++) { + if (!strcmp(clk_branches[i].base_name, clk_data->name[j])) + return clk_branches[i].clk; + } + } + + pr_err("%s: invalid clock offset %d\n", __func__, offset); + + return ERR_PTR(-EINVAL); +} + +static int lpc18xx_ccu_gate_endisable(struct clk_hw *hw, bool enable) +{ + struct clk_gate *gate = to_clk_gate(hw); + u32 val; + + /* + * Divider field is write only, so divider stat field must + * be read so divider field can be set accordingly. + */ + val = clk_readl(gate->reg); + if (val & LPC18XX_CCU_DIVSTAT) + val |= LPC18XX_CCU_DIV; + + if (enable) { + val |= LPC18XX_CCU_RUN; + } else { + /* + * To safely disable a branch clock a squence of two separate + * writes must be used. First write should set the AUTO bit + * and the next write should clear the RUN bit. + */ + val |= LPC18XX_CCU_AUTO; + clk_writel(val, gate->reg); + + val &= ~LPC18XX_CCU_RUN; + } + + clk_writel(val, gate->reg); + + return 0; +} + +static int lpc18xx_ccu_gate_enable(struct clk_hw *hw) +{ + return lpc18xx_ccu_gate_endisable(hw, true); +} + +static void lpc18xx_ccu_gate_disable(struct clk_hw *hw) +{ + lpc18xx_ccu_gate_endisable(hw, false); +} + +static int lpc18xx_ccu_gate_is_enabled(struct clk_hw *hw) +{ + struct clk_gate *gate = to_clk_gate(hw); + + return clk_readl(gate->reg) & LPC18XX_CCU_RUN; +} + +static const struct clk_ops lpc18xx_ccu_gate_ops = { + .enable = lpc18xx_ccu_gate_enable, + .disable = lpc18xx_ccu_gate_disable, + .is_enabled = lpc18xx_ccu_gate_is_enabled, +}; + +static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *branch, + void __iomem *reg_base, + const char *parent) +{ + const struct clk_ops *div_ops = NULL; + struct clk_divider *div = NULL; + struct clk_hw *div_hw = NULL; + + if (branch->flags & CCU_BRANCH_HAVE_DIV2) { + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return; + + div->reg = branch->offset + reg_base; + div->flags = CLK_DIVIDER_READ_ONLY; + div->shift = 27; + div->width = 1; + + div_hw = &div->hw; + div_ops = &clk_divider_ops; + } + + branch->gate.reg = branch->offset + reg_base; + branch->gate.bit_idx = 0; + + branch->clk = clk_register_composite(NULL, branch->name, &parent, 1, + NULL, NULL, + div_hw, div_ops, + &branch->gate.hw, &lpc18xx_ccu_gate_ops, 0); + if (IS_ERR(branch->clk)) { + kfree(div); + pr_warn("%s: failed to register %s\n", __func__, branch->name); + return; + } + + /* Grab essential branch clocks for CPU and SDRAM */ + switch (branch->offset) { + case CLK_CPU_EMC: + case CLK_CPU_CORE: + case CLK_CPU_CREG: + case CLK_CPU_EMCDIV: + clk_prepare_enable(branch->clk); + } +} + +static void lpc18xx_ccu_register_branch_clks(void __iomem *reg_base, + const char *base_name) +{ + const char *parent = base_name; + int i; + + for (i = 0; i < ARRAY_SIZE(clk_branches); i++) { + if (strcmp(clk_branches[i].base_name, base_name)) + continue; + + lpc18xx_ccu_register_branch_gate_div(&clk_branches[i], reg_base, + parent); + + if (clk_branches[i].flags & CCU_BRANCH_IS_BUS) + parent = clk_branches[i].name; + } +} + +static void __init lpc18xx_ccu_init(struct device_node *np) +{ + struct lpc18xx_branch_clk_data *clk_data; + void __iomem *reg_base; + int i, ret; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_warn("%s: failed to map address range\n", __func__); + return; + } + + clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; + + clk_data->num = of_property_count_strings(np, "clock-names"); + clk_data->name = kcalloc(clk_data->num, sizeof(char *), GFP_KERNEL); + if (!clk_data->name) { + kfree(clk_data); + return; + } + + for (i = 0; i < clk_data->num; i++) { + ret = of_property_read_string_index(np, "clock-names", i, + &clk_data->name[i]); + if (ret) { + pr_warn("%s: failed to get clock name at idx %d\n", + __func__, i); + continue; + } + + lpc18xx_ccu_register_branch_clks(reg_base, clk_data->name[i]); + } + + of_clk_add_provider(np, lpc18xx_ccu_branch_clk_get, clk_data); +} +CLK_OF_DECLARE(lpc18xx_ccu, "nxp,lpc1850-ccu", lpc18xx_ccu_init); diff --git a/drivers/clk/nxp/clk-lpc18xx-cgu.c b/drivers/clk/nxp/clk-lpc18xx-cgu.c new file mode 100644 index 000000000000..81e9e1c788f4 --- /dev/null +++ b/drivers/clk/nxp/clk-lpc18xx-cgu.c @@ -0,0 +1,635 @@ +/* + * Clk driver for NXP LPC18xx/LPC43xx Clock Generation Unit (CGU) + * + * Copyright (C) 2015 Joachim Eastwood <[email protected]> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#include <dt-bindings/clock/lpc18xx-cgu.h> + +/* Clock Generation Unit (CGU) registers */ +#define LPC18XX_CGU_XTAL_OSC_CTRL 0x018 +#define LPC18XX_CGU_PLL0USB_STAT 0x01c +#define LPC18XX_CGU_PLL0USB_CTRL 0x020 +#define LPC18XX_CGU_PLL0USB_MDIV 0x024 +#define LPC18XX_CGU_PLL0USB_NP_DIV 0x028 +#define LPC18XX_CGU_PLL0AUDIO_STAT 0x02c +#define LPC18XX_CGU_PLL0AUDIO_CTRL 0x030 +#define LPC18XX_CGU_PLL0AUDIO_MDIV 0x034 +#define LPC18XX_CGU_PLL0AUDIO_NP_DIV 0x038 +#define LPC18XX_CGU_PLL0AUDIO_FRAC 0x03c +#define LPC18XX_CGU_PLL1_STAT 0x040 +#define LPC18XX_CGU_PLL1_CTRL 0x044 +#define LPC18XX_PLL1_CTRL_FBSEL BIT(6) +#define LPC18XX_PLL1_CTRL_DIRECT BIT(7) +#define LPC18XX_CGU_IDIV_CTRL(n) (0x048 + (n) * sizeof(u32)) +#define LPC18XX_CGU_BASE_CLK(id) (0x05c + (id) * sizeof(u32)) +#define LPC18XX_CGU_PLL_CTRL_OFFSET 0x4 + +/* PLL0 bits common to both audio and USB PLL */ +#define LPC18XX_PLL0_STAT_LOCK BIT(0) +#define LPC18XX_PLL0_CTRL_PD BIT(0) +#define LPC18XX_PLL0_CTRL_BYPASS BIT(1) +#define LPC18XX_PLL0_CTRL_DIRECTI BIT(2) +#define LPC18XX_PLL0_CTRL_DIRECTO BIT(3) +#define LPC18XX_PLL0_CTRL_CLKEN BIT(4) +#define LPC18XX_PLL0_MDIV_MDEC_MASK 0x1ffff +#define LPC18XX_PLL0_MDIV_SELP_SHIFT 17 +#define LPC18XX_PLL0_MDIV_SELI_SHIFT 22 +#define LPC18XX_PLL0_MSEL_MAX BIT(15) + +/* Register value that gives PLL0 post/pre dividers equal to 1 */ +#define LPC18XX_PLL0_NP_DIVS_1 0x00302062 + +enum { + CLK_SRC_OSC32, + CLK_SRC_IRC, + CLK_SRC_ENET_RX_CLK, + CLK_SRC_ENET_TX_CLK, + CLK_SRC_GP_CLKIN, + CLK_SRC_RESERVED1, + CLK_SRC_OSC, + CLK_SRC_PLL0USB, + CLK_SRC_PLL0AUDIO, + CLK_SRC_PLL1, + CLK_SRC_RESERVED2, + CLK_SRC_RESERVED3, + CLK_SRC_IDIVA, + CLK_SRC_IDIVB, + CLK_SRC_IDIVC, + CLK_SRC_IDIVD, + CLK_SRC_IDIVE, + CLK_SRC_MAX +}; + +static const char *clk_src_names[CLK_SRC_MAX] = { + [CLK_SRC_OSC32] = "osc32", + [CLK_SRC_IRC] = "irc", + [CLK_SRC_ENET_RX_CLK] = "enet_rx_clk", + [CLK_SRC_ENET_TX_CLK] = "enet_tx_clk", + [CLK_SRC_GP_CLKIN] = "gp_clkin", + [CLK_SRC_OSC] = "osc", + [CLK_SRC_PLL0USB] = "pll0usb", + [CLK_SRC_PLL0AUDIO] = "pll0audio", + [CLK_SRC_PLL1] = "pll1", + [CLK_SRC_IDIVA] = "idiva", + [CLK_SRC_IDIVB] = "idivb", + [CLK_SRC_IDIVC] = "idivc", + [CLK_SRC_IDIVD] = "idivd", + [CLK_SRC_IDIVE] = "idive", +}; + +static const char *clk_base_names[BASE_CLK_MAX] = { + [BASE_SAFE_CLK] = "base_safe_clk", + [BASE_USB0_CLK] = "base_usb0_clk", + [BASE_PERIPH_CLK] = "base_periph_clk", + [BASE_USB1_CLK] = "base_usb1_clk", + [BASE_CPU_CLK] = "base_cpu_clk", + [BASE_SPIFI_CLK] = "base_spifi_clk", + [BASE_SPI_CLK] = "base_spi_clk", + [BASE_PHY_RX_CLK] = "base_phy_rx_clk", + [BASE_PHY_TX_CLK] = "base_phy_tx_clk", + [BASE_APB1_CLK] = "base_apb1_clk", + [BASE_APB3_CLK] = "base_apb3_clk", + [BASE_LCD_CLK] = "base_lcd_clk", + [BASE_ADCHS_CLK] = "base_adchs_clk", + [BASE_SDIO_CLK] = "base_sdio_clk", + [BASE_SSP0_CLK] = "base_ssp0_clk", + [BASE_SSP1_CLK] = "base_ssp1_clk", + [BASE_UART0_CLK] = "base_uart0_clk", + [BASE_UART1_CLK] = "base_uart1_clk", + [BASE_UART2_CLK] = "base_uart2_clk", + [BASE_UART3_CLK] = "base_uart3_clk", + [BASE_OUT_CLK] = "base_out_clk", + [BASE_AUDIO_CLK] = "base_audio_clk", + [BASE_CGU_OUT0_CLK] = "base_cgu_out0_clk", + [BASE_CGU_OUT1_CLK] = "base_cgu_out1_clk", +}; + +static u32 lpc18xx_cgu_pll0_src_ids[] = { + CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK, + CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC, + CLK_SRC_PLL1, CLK_SRC_IDIVA, CLK_SRC_IDIVB, CLK_SRC_IDIVC, + CLK_SRC_IDIVD, CLK_SRC_IDIVE, +}; + +static u32 lpc18xx_cgu_pll1_src_ids[] = { + CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK, + CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC, + CLK_SRC_PLL0USB, CLK_SRC_PLL0AUDIO, CLK_SRC_IDIVA, + CLK_SRC_IDIVB, CLK_SRC_IDIVC, CLK_SRC_IDIVD, CLK_SRC_IDIVE, +}; + +static u32 lpc18xx_cgu_idiva_src_ids[] = { + CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK, + CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC, + CLK_SRC_PLL0USB, CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1 +}; + +static u32 lpc18xx_cgu_idivbcde_src_ids[] = { + CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK, + CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC, + CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1, CLK_SRC_IDIVA, +}; + +static u32 lpc18xx_cgu_base_irc_src_ids[] = {CLK_SRC_IRC}; + +static u32 lpc18xx_cgu_base_usb0_src_ids[] = {CLK_SRC_PLL0USB}; + +static u32 lpc18xx_cgu_base_common_src_ids[] = { + CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK, + CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC, + CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1, CLK_SRC_IDIVA, + CLK_SRC_IDIVB, CLK_SRC_IDIVC, CLK_SRC_IDIVD, CLK_SRC_IDIVE, +}; + +static u32 lpc18xx_cgu_base_all_src_ids[] = { + CLK_SRC_OSC32, CLK_SRC_IRC, CLK_SRC_ENET_RX_CLK, + CLK_SRC_ENET_TX_CLK, CLK_SRC_GP_CLKIN, CLK_SRC_OSC, + CLK_SRC_PLL0USB, CLK_SRC_PLL0AUDIO, CLK_SRC_PLL1, + CLK_SRC_IDIVA, CLK_SRC_IDIVB, CLK_SRC_IDIVC, + CLK_SRC_IDIVD, CLK_SRC_IDIVE, +}; + +struct lpc18xx_cgu_src_clk_div { + u8 clk_id; + u8 n_parents; + struct clk_divider div; + struct clk_mux mux; + struct clk_gate gate; +}; + +#define LPC1XX_CGU_SRC_CLK_DIV(_id, _width, _table) \ +{ \ + .clk_id = CLK_SRC_ ##_id, \ + .n_parents = ARRAY_SIZE(lpc18xx_cgu_ ##_table), \ + .div = { \ + .shift = 2, \ + .width = _width, \ + }, \ + .mux = { \ + .mask = 0x1f, \ + .shift = 24, \ + .table = lpc18xx_cgu_ ##_table, \ + }, \ + .gate = { \ + .bit_idx = 0, \ + .flags = CLK_GATE_SET_TO_DISABLE, \ + }, \ +} + +static struct lpc18xx_cgu_src_clk_div lpc18xx_cgu_src_clk_divs[] = { + LPC1XX_CGU_SRC_CLK_DIV(IDIVA, 2, idiva_src_ids), + LPC1XX_CGU_SRC_CLK_DIV(IDIVB, 4, idivbcde_src_ids), + LPC1XX_CGU_SRC_CLK_DIV(IDIVC, 4, idivbcde_src_ids), + LPC1XX_CGU_SRC_CLK_DIV(IDIVD, 4, idivbcde_src_ids), + LPC1XX_CGU_SRC_CLK_DIV(IDIVE, 8, idivbcde_src_ids), +}; + +struct lpc18xx_cgu_base_clk { + u8 clk_id; + u8 n_parents; + struct clk_mux mux; + struct clk_gate gate; +}; + +#define LPC1XX_CGU_BASE_CLK(_id, _table, _flags) \ +{ \ + .clk_id = BASE_ ##_id ##_CLK, \ + .n_parents = ARRAY_SIZE(lpc18xx_cgu_ ##_table), \ + .mux = { \ + .mask = 0x1f, \ + .shift = 24, \ + .table = lpc18xx_cgu_ ##_table, \ + .flags = _flags, \ + }, \ + .gate = { \ + .bit_idx = 0, \ + .flags = CLK_GATE_SET_TO_DISABLE, \ + }, \ +} + +static struct lpc18xx_cgu_base_clk lpc18xx_cgu_base_clks[] = { + LPC1XX_CGU_BASE_CLK(SAFE, base_irc_src_ids, CLK_MUX_READ_ONLY), + LPC1XX_CGU_BASE_CLK(USB0, base_usb0_src_ids, 0), + LPC1XX_CGU_BASE_CLK(PERIPH, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(USB1, base_all_src_ids, 0), + LPC1XX_CGU_BASE_CLK(CPU, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(SPIFI, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(SPI, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(PHY_RX, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(PHY_TX, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(APB1, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(APB3, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(LCD, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(ADCHS, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(SDIO, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(SSP0, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(SSP1, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(UART0, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(UART1, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(UART2, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(UART3, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(OUT, base_all_src_ids, 0), + { /* 21 reserved */ }, + { /* 22 reserved */ }, + { /* 23 reserved */ }, + { /* 24 reserved */ }, + LPC1XX_CGU_BASE_CLK(AUDIO, base_common_src_ids, 0), + LPC1XX_CGU_BASE_CLK(CGU_OUT0, base_all_src_ids, 0), + LPC1XX_CGU_BASE_CLK(CGU_OUT1, base_all_src_ids, 0), +}; + +struct lpc18xx_pll { + struct clk_hw hw; + void __iomem *reg; + spinlock_t *lock; + u8 flags; +}; + +#define to_lpc_pll(hw) container_of(hw, struct lpc18xx_pll, hw) + +struct lpc18xx_cgu_pll_clk { + u8 clk_id; + u8 n_parents; + u8 reg_offset; + struct clk_mux mux; + struct clk_gate gate; + struct lpc18xx_pll pll; + const struct clk_ops *pll_ops; +}; + +#define LPC1XX_CGU_CLK_PLL(_id, _table, _pll_ops) \ +{ \ + .clk_id = CLK_SRC_ ##_id, \ + .n_parents = ARRAY_SIZE(lpc18xx_cgu_ ##_table), \ + .reg_offset = LPC18XX_CGU_ ##_id ##_STAT, \ + .mux = { \ + .mask = 0x1f, \ + .shift = 24, \ + .table = lpc18xx_cgu_ ##_table, \ + }, \ + .gate = { \ + .bit_idx = 0, \ + .flags = CLK_GATE_SET_TO_DISABLE, \ + }, \ + .pll_ops = &lpc18xx_ ##_pll_ops, \ +} + +/* + * PLL0 uses a special register value encoding. The compute functions below + * are taken or derived from the LPC1850 user manual (section 12.6.3.3). + */ + +/* Compute PLL0 multiplier from decoded version */ +static u32 lpc18xx_pll0_mdec2msel(u32 x) +{ + int i; + + switch (x) { + case 0x18003: return 1; + case 0x10003: return 2; + default: + for (i = LPC18XX_PLL0_MSEL_MAX + 1; x != 0x4000 && i > 0; i--) + x = ((x ^ x >> 14) & 1) | (x << 1 & 0x7fff); + return i; + } +} +/* Compute PLL0 decoded multiplier from binary version */ +static u32 lpc18xx_pll0_msel2mdec(u32 msel) +{ + u32 i, x = 0x4000; + + switch (msel) { + case 0: return 0; + case 1: return 0x18003; + case 2: return 0x10003; + default: + for (i = msel; i <= LPC18XX_PLL0_MSEL_MAX; i++) + x = ((x ^ x >> 1) & 1) << 14 | (x >> 1 & 0xffff); + return x; + } +} + +/* Compute PLL0 bandwidth SELI reg from multiplier */ +static u32 lpc18xx_pll0_msel2seli(u32 msel) +{ + u32 tmp; + + if (msel > 16384) return 1; + if (msel > 8192) return 2; + if (msel > 2048) return 4; + if (msel >= 501) return 8; + if (msel >= 60) { + tmp = 1024 / (msel + 9); + return ((1024 == (tmp * (msel + 9))) == 0) ? tmp * 4 : (tmp + 1) * 4; + } + + return (msel & 0x3c) + 4; +} + +/* Compute PLL0 bandwidth SELP reg from multiplier */ +static u32 lpc18xx_pll0_msel2selp(u32 msel) +{ + if (msel < 60) + return (msel >> 1) + 1; + + return 31; +} + +static unsigned long lpc18xx_pll0_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct lpc18xx_pll *pll = to_lpc_pll(hw); + u32 ctrl, mdiv, msel, npdiv; + + ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + mdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_MDIV); + npdiv = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV); + + if (ctrl & LPC18XX_PLL0_CTRL_BYPASS) + return parent_rate; + + if (npdiv != LPC18XX_PLL0_NP_DIVS_1) { + pr_warn("%s: pre/post dividers not supported\n", __func__); + return 0; + } + + msel = lpc18xx_pll0_mdec2msel(mdiv & LPC18XX_PLL0_MDIV_MDEC_MASK); + if (msel) + return 2 * msel * parent_rate; + + pr_warn("%s: unable to calculate rate\n", __func__); + + return 0; +} + +static long lpc18xx_pll0_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + unsigned long m; + + if (*prate < rate) { + pr_warn("%s: pll dividers not supported\n", __func__); + return -EINVAL; + } + + m = DIV_ROUND_UP_ULL(*prate, rate * 2); + if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { + pr_warn("%s: unable to support rate %lu\n", __func__, rate); + return -EINVAL; + } + + return 2 * *prate * m; +} + +static int lpc18xx_pll0_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct lpc18xx_pll *pll = to_lpc_pll(hw); + u32 ctrl, stat, m; + int retry = 3; + + if (parent_rate < rate) { + pr_warn("%s: pll dividers not supported\n", __func__); + return -EINVAL; + } + + m = DIV_ROUND_UP_ULL(parent_rate, rate * 2); + if (m <= 0 && m > LPC18XX_PLL0_MSEL_MAX) { + pr_warn("%s: unable to support rate %lu\n", __func__, rate); + return -EINVAL; + } + + m = lpc18xx_pll0_msel2mdec(m); + m |= lpc18xx_pll0_msel2selp(m) << LPC18XX_PLL0_MDIV_SELP_SHIFT; + m |= lpc18xx_pll0_msel2seli(m) << LPC18XX_PLL0_MDIV_SELI_SHIFT; + + /* Power down PLL, disable clk output and dividers */ + ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + ctrl |= LPC18XX_PLL0_CTRL_PD; + ctrl &= ~(LPC18XX_PLL0_CTRL_BYPASS | LPC18XX_PLL0_CTRL_DIRECTI | + LPC18XX_PLL0_CTRL_DIRECTO | LPC18XX_PLL0_CTRL_CLKEN); + clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + + /* Configure new PLL settings */ + clk_writel(m, pll->reg + LPC18XX_CGU_PLL0USB_MDIV); + clk_writel(LPC18XX_PLL0_NP_DIVS_1, pll->reg + LPC18XX_CGU_PLL0USB_NP_DIV); + + /* Power up PLL and wait for lock */ + ctrl &= ~LPC18XX_PLL0_CTRL_PD; + clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + do { + udelay(10); + stat = clk_readl(pll->reg + LPC18XX_CGU_PLL0USB_STAT); + if (stat & LPC18XX_PLL0_STAT_LOCK) { + ctrl |= LPC18XX_PLL0_CTRL_CLKEN; + clk_writel(ctrl, pll->reg + LPC18XX_CGU_PLL0USB_CTRL); + + return 0; + } + } while (retry--); + + pr_warn("%s: unable to lock pll\n", __func__); + + return -EINVAL; +} + +static const struct clk_ops lpc18xx_pll0_ops = { + .recalc_rate = lpc18xx_pll0_recalc_rate, + .round_rate = lpc18xx_pll0_round_rate, + .set_rate = lpc18xx_pll0_set_rate, +}; + +static unsigned long lpc18xx_pll1_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct lpc18xx_pll *pll = to_lpc_pll(hw); + u16 msel, nsel, psel; + bool direct, fbsel; + u32 stat, ctrl; + + stat = clk_readl(pll->reg + LPC18XX_CGU_PLL1_STAT); + ctrl = clk_readl(pll->reg + LPC18XX_CGU_PLL1_CTRL); + + direct = (ctrl & LPC18XX_PLL1_CTRL_DIRECT) ? true : false; + fbsel = (ctrl & LPC18XX_PLL1_CTRL_FBSEL) ? true : false; + + msel = ((ctrl >> 16) & 0xff) + 1; + nsel = ((ctrl >> 12) & 0x3) + 1; + + if (direct || fbsel) + return msel * (parent_rate / nsel); + + psel = (ctrl >> 8) & 0x3; + psel = 1 << psel; + + return (msel / (2 * psel)) * (parent_rate / nsel); +} + +static const struct clk_ops lpc18xx_pll1_ops = { + .recalc_rate = lpc18xx_pll1_recalc_rate, +}; + +static struct lpc18xx_cgu_pll_clk lpc18xx_cgu_src_clk_plls[] = { + LPC1XX_CGU_CLK_PLL(PLL0USB, pll0_src_ids, pll0_ops), + LPC1XX_CGU_CLK_PLL(PLL0AUDIO, pll0_src_ids, pll0_ops), + LPC1XX_CGU_CLK_PLL(PLL1, pll1_src_ids, pll1_ops), +}; + +static void lpc18xx_fill_parent_names(const char **parent, u32 *id, int size) +{ + int i; + + for (i = 0; i < size; i++) + parent[i] = clk_src_names[id[i]]; +} + +static struct clk *lpc18xx_cgu_register_div(struct lpc18xx_cgu_src_clk_div *clk, + void __iomem *base, int n) +{ + void __iomem *reg = base + LPC18XX_CGU_IDIV_CTRL(n); + const char *name = clk_src_names[clk->clk_id]; + const char *parents[CLK_SRC_MAX]; + + clk->div.reg = reg; + clk->mux.reg = reg; + clk->gate.reg = reg; + + lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents); + + return clk_register_composite(NULL, name, parents, clk->n_parents, + &clk->mux.hw, &clk_mux_ops, + &clk->div.hw, &clk_divider_ops, + &clk->gate.hw, &clk_gate_ops, 0); +} + + +static struct clk *lpc18xx_register_base_clk(struct lpc18xx_cgu_base_clk *clk, + void __iomem *reg_base, int n) +{ + void __iomem *reg = reg_base + LPC18XX_CGU_BASE_CLK(n); + const char *name = clk_base_names[clk->clk_id]; + const char *parents[CLK_SRC_MAX]; + + if (clk->n_parents == 0) + return ERR_PTR(-ENOENT); + + clk->mux.reg = reg; + clk->gate.reg = reg; + + lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents); + + /* SAFE_CLK can not be turned off */ + if (n == BASE_SAFE_CLK) + return clk_register_composite(NULL, name, parents, clk->n_parents, + &clk->mux.hw, &clk_mux_ops, + NULL, NULL, NULL, NULL, 0); + + return clk_register_composite(NULL, name, parents, clk->n_parents, + &clk->mux.hw, &clk_mux_ops, + NULL, NULL, + &clk->gate.hw, &clk_gate_ops, 0); +} + + +static struct clk *lpc18xx_cgu_register_pll(struct lpc18xx_cgu_pll_clk *clk, + void __iomem *base) +{ + const char *name = clk_src_names[clk->clk_id]; + const char *parents[CLK_SRC_MAX]; + + clk->pll.reg = base; + clk->mux.reg = base + clk->reg_offset + LPC18XX_CGU_PLL_CTRL_OFFSET; + clk->gate.reg = base + clk->reg_offset + LPC18XX_CGU_PLL_CTRL_OFFSET; + + lpc18xx_fill_parent_names(parents, clk->mux.table, clk->n_parents); + + return clk_register_composite(NULL, name, parents, clk->n_parents, + &clk->mux.hw, &clk_mux_ops, + &clk->pll.hw, clk->pll_ops, + &clk->gate.hw, &clk_gate_ops, 0); +} + +static void __init lpc18xx_cgu_register_source_clks(struct device_node *np, + void __iomem *base) +{ + const char *parents[CLK_SRC_MAX]; + struct clk *clk; + int i; + + /* Register the internal 12 MHz RC oscillator (IRC) */ + clk = clk_register_fixed_rate(NULL, clk_src_names[CLK_SRC_IRC], + NULL, CLK_IS_ROOT, 12000000); + if (IS_ERR(clk)) + pr_warn("%s: failed to register irc clk\n", __func__); + + /* Register crystal oscillator controlller */ + parents[0] = of_clk_get_parent_name(np, 0); + clk = clk_register_gate(NULL, clk_src_names[CLK_SRC_OSC], parents[0], + 0, base + LPC18XX_CGU_XTAL_OSC_CTRL, + 0, CLK_GATE_SET_TO_DISABLE, NULL); + if (IS_ERR(clk)) + pr_warn("%s: failed to register osc clk\n", __func__); + + /* Register all PLLs */ + for (i = 0; i < ARRAY_SIZE(lpc18xx_cgu_src_clk_plls); i++) { + clk = lpc18xx_cgu_register_pll(&lpc18xx_cgu_src_clk_plls[i], + base); + if (IS_ERR(clk)) + pr_warn("%s: failed to register pll (%d)\n", __func__, i); + } + + /* Register all clock dividers A-E */ + for (i = 0; i < ARRAY_SIZE(lpc18xx_cgu_src_clk_divs); i++) { + clk = lpc18xx_cgu_register_div(&lpc18xx_cgu_src_clk_divs[i], + base, i); + if (IS_ERR(clk)) + pr_warn("%s: failed to register div %d\n", __func__, i); + } +} + +static struct clk *clk_base[BASE_CLK_MAX]; +static struct clk_onecell_data clk_base_data = { + .clks = clk_base, + .clk_num = BASE_CLK_MAX, +}; + +static void __init lpc18xx_cgu_register_base_clks(void __iomem *reg_base) +{ + int i; + + for (i = BASE_SAFE_CLK; i < BASE_CLK_MAX; i++) { + clk_base[i] = lpc18xx_register_base_clk(&lpc18xx_cgu_base_clks[i], + reg_base, i); + if (IS_ERR(clk_base[i]) && PTR_ERR(clk_base[i]) != -ENOENT) + pr_warn("%s: register base clk %d failed\n", __func__, i); + } +} + +static void __init lpc18xx_cgu_init(struct device_node *np) +{ + void __iomem *reg_base; + + reg_base = of_iomap(np, 0); + if (!reg_base) { + pr_warn("%s: failed to map address range\n", __func__); + return; + } + + lpc18xx_cgu_register_source_clks(np, reg_base); + lpc18xx_cgu_register_base_clks(reg_base); + + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_base_data); +} +CLK_OF_DECLARE(lpc18xx_cgu, "nxp,lpc1850-cgu", lpc18xx_cgu_init); diff --git a/drivers/clk/pistachio/clk-pll.c b/drivers/clk/pistachio/clk-pll.c index de537560bf70..e17dada0dd21 100644 --- a/drivers/clk/pistachio/clk-pll.c +++ b/drivers/clk/pistachio/clk-pll.c @@ -6,9 +6,12 @@ * version 2, as published by the Free Software Foundation. */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + #include <linux/clk-provider.h> #include <linux/io.h> #include <linux/kernel.h> +#include <linux/printk.h> #include <linux/slab.h> #include "clk.h" @@ -50,6 +53,18 @@ #define PLL_CTRL4 0x10 #define PLL_FRAC_CTRL4_BYPASS BIT(28) +#define MIN_PFD 9600000UL +#define MIN_VCO_LA 400000000UL +#define MAX_VCO_LA 1600000000UL +#define MIN_VCO_FRAC_INT 600000000UL +#define MAX_VCO_FRAC_INT 1600000000UL +#define MIN_VCO_FRAC_FRAC 600000000UL +#define MAX_VCO_FRAC_FRAC 2400000000UL +#define MIN_OUTPUT_LA 8000000UL +#define MAX_OUTPUT_LA 1600000000UL +#define MIN_OUTPUT_FRAC 12000000UL +#define MAX_OUTPUT_FRAC 1600000000UL + struct pistachio_clk_pll { struct clk_hw hw; void __iomem *base; @@ -67,6 +82,12 @@ static inline void pll_writel(struct pistachio_clk_pll *pll, u32 val, u32 reg) writel(val, pll->base + reg); } +static inline void pll_lock(struct pistachio_clk_pll *pll) +{ + while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK)) + cpu_relax(); +} + static inline u32 do_div_round_closest(u64 dividend, u32 divisor) { dividend += divisor / 2; @@ -124,6 +145,8 @@ static int pll_gf40lp_frac_enable(struct clk_hw *hw) val &= ~PLL_FRAC_CTRL4_BYPASS; pll_writel(pll, val, PLL_CTRL4); + pll_lock(pll); + return 0; } @@ -149,16 +172,29 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate, { struct pistachio_clk_pll *pll = to_pistachio_pll(hw); struct pistachio_pll_rate_table *params; - bool was_enabled; - u32 val; + int enabled = pll_gf40lp_frac_is_enabled(hw); + u32 val, vco, old_postdiv1, old_postdiv2; + const char *name = __clk_get_name(hw->clk); + + if (rate < MIN_OUTPUT_FRAC || rate > MAX_OUTPUT_FRAC) + return -EINVAL; params = pll_get_params(pll, parent_rate, rate); - if (!params) + if (!params || !params->refdiv) return -EINVAL; - was_enabled = pll_gf40lp_frac_is_enabled(hw); - if (!was_enabled) - pll_gf40lp_frac_enable(hw); + vco = params->fref * params->fbdiv / params->refdiv; + if (vco < MIN_VCO_FRAC_FRAC || vco > MAX_VCO_FRAC_FRAC) + pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco, + MIN_VCO_FRAC_FRAC, MAX_VCO_FRAC_FRAC); + + val = params->fref / params->refdiv; + if (val < MIN_PFD) + pr_warn("%s: PFD %u is too low (min %lu)\n", + name, val, MIN_PFD); + if (val > vco / 16) + pr_warn("%s: PFD %u is too high (max %u)\n", + name, val, vco / 16); val = pll_readl(pll, PLL_CTRL1); val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) | @@ -168,6 +204,19 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate, pll_writel(pll, val, PLL_CTRL1); val = pll_readl(pll, PLL_CTRL2); + + old_postdiv1 = (val >> PLL_FRAC_CTRL2_POSTDIV1_SHIFT) & + PLL_FRAC_CTRL2_POSTDIV1_MASK; + old_postdiv2 = (val >> PLL_FRAC_CTRL2_POSTDIV2_SHIFT) & + PLL_FRAC_CTRL2_POSTDIV2_MASK; + if (enabled && + (params->postdiv1 != old_postdiv1 || + params->postdiv2 != old_postdiv2)) + pr_warn("%s: changing postdiv while PLL is enabled\n", name); + + if (params->postdiv2 > params->postdiv1) + pr_warn("%s: postdiv2 should not exceed postdiv1\n", name); + val &= ~((PLL_FRAC_CTRL2_FRAC_MASK << PLL_FRAC_CTRL2_FRAC_SHIFT) | (PLL_FRAC_CTRL2_POSTDIV1_MASK << PLL_FRAC_CTRL2_POSTDIV1_SHIFT) | @@ -178,11 +227,8 @@ static int pll_gf40lp_frac_set_rate(struct clk_hw *hw, unsigned long rate, (params->postdiv2 << PLL_FRAC_CTRL2_POSTDIV2_SHIFT); pll_writel(pll, val, PLL_CTRL2); - while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK)) - cpu_relax(); - - if (!was_enabled) - pll_gf40lp_frac_disable(hw); + if (enabled) + pll_lock(pll); return 0; } @@ -241,6 +287,8 @@ static int pll_gf40lp_laint_enable(struct clk_hw *hw) val &= ~PLL_INT_CTRL2_BYPASS; pll_writel(pll, val, PLL_CTRL2); + pll_lock(pll); + return 0; } @@ -266,18 +314,44 @@ static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate, { struct pistachio_clk_pll *pll = to_pistachio_pll(hw); struct pistachio_pll_rate_table *params; - bool was_enabled; - u32 val; + int enabled = pll_gf40lp_laint_is_enabled(hw); + u32 val, vco, old_postdiv1, old_postdiv2; + const char *name = __clk_get_name(hw->clk); + + if (rate < MIN_OUTPUT_LA || rate > MAX_OUTPUT_LA) + return -EINVAL; params = pll_get_params(pll, parent_rate, rate); - if (!params) + if (!params || !params->refdiv) return -EINVAL; - was_enabled = pll_gf40lp_laint_is_enabled(hw); - if (!was_enabled) - pll_gf40lp_laint_enable(hw); + vco = params->fref * params->fbdiv / params->refdiv; + if (vco < MIN_VCO_LA || vco > MAX_VCO_LA) + pr_warn("%s: VCO %u is out of range %lu..%lu\n", name, vco, + MIN_VCO_LA, MAX_VCO_LA); + + val = params->fref / params->refdiv; + if (val < MIN_PFD) + pr_warn("%s: PFD %u is too low (min %lu)\n", + name, val, MIN_PFD); + if (val > vco / 16) + pr_warn("%s: PFD %u is too high (max %u)\n", + name, val, vco / 16); val = pll_readl(pll, PLL_CTRL1); + + old_postdiv1 = (val >> PLL_INT_CTRL1_POSTDIV1_SHIFT) & + PLL_INT_CTRL1_POSTDIV1_MASK; + old_postdiv2 = (val >> PLL_INT_CTRL1_POSTDIV2_SHIFT) & + PLL_INT_CTRL1_POSTDIV2_MASK; + if (enabled && + (params->postdiv1 != old_postdiv1 || + params->postdiv2 != old_postdiv2)) + pr_warn("%s: changing postdiv while PLL is enabled\n", name); + + if (params->postdiv2 > params->postdiv1) + pr_warn("%s: postdiv2 should not exceed postdiv1\n", name); + val &= ~((PLL_CTRL1_REFDIV_MASK << PLL_CTRL1_REFDIV_SHIFT) | (PLL_CTRL1_FBDIV_MASK << PLL_CTRL1_FBDIV_SHIFT) | (PLL_INT_CTRL1_POSTDIV1_MASK << PLL_INT_CTRL1_POSTDIV1_SHIFT) | @@ -288,11 +362,8 @@ static int pll_gf40lp_laint_set_rate(struct clk_hw *hw, unsigned long rate, (params->postdiv2 << PLL_INT_CTRL1_POSTDIV2_SHIFT); pll_writel(pll, val, PLL_CTRL1); - while (!(pll_readl(pll, PLL_STATUS) & PLL_STATUS_LOCK)) - cpu_relax(); - - if (!was_enabled) - pll_gf40lp_laint_disable(hw); + if (enabled) + pll_lock(pll); return 0; } diff --git a/drivers/clk/pxa/clk-pxa.h b/drivers/clk/pxa/clk-pxa.h index b04c5b9c0ea8..d1de805df867 100644 --- a/drivers/clk/pxa/clk-pxa.h +++ b/drivers/clk/pxa/clk-pxa.h @@ -14,7 +14,7 @@ #define _CLK_PXA_ #define PARENTS(name) \ - static const char *name ## _parents[] __initdata + static const char *const name ## _parents[] __initconst #define MUX_RO_RATE_RO_OPS(name, clk_name) \ static struct clk_hw name ## _mux_hw; \ static struct clk_hw name ## _rate_hw; \ @@ -72,7 +72,7 @@ struct desc_clk_cken { const char *name; const char *dev_id; const char *con_id; - const char **parent_names; + const char * const *parent_names; struct clk_fixed_factor lp; struct clk_fixed_factor hp; struct clk_gate gate; diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c index 8539c4fd34cc..fb7721bd37e6 100644 --- a/drivers/clk/rockchip/clk-cpu.c +++ b/drivers/clk/rockchip/clk-cpu.c @@ -231,7 +231,7 @@ static int rockchip_cpuclk_notifier_cb(struct notifier_block *nb, } struct clk *rockchip_clk_register_cpuclk(const char *name, - const char **parent_names, u8 num_parents, + const char *const *parent_names, u8 num_parents, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock) diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index c842e3b60f21..e9f8df324e7c 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -120,7 +120,7 @@ static const struct clk_ops rockchip_mmc_clk_ops = { }; struct clk *rockchip_clk_register_mmc(const char *name, - const char **parent_names, u8 num_parents, + const char *const *parent_names, u8 num_parents, void __iomem *reg, int shift) { struct clk_init_data init; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index f8d3baf275b2..76027261f7ed 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -329,10 +329,10 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = { */ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type, - const char *name, const char **parent_names, u8 num_parents, - void __iomem *base, int con_offset, int grf_lock_offset, - int lock_shift, int mode_offset, int mode_shift, - struct rockchip_pll_rate_table *rate_table, + const char *name, const char *const *parent_names, + u8 num_parents, void __iomem *base, int con_offset, + int grf_lock_offset, int lock_shift, int mode_offset, + int mode_shift, struct rockchip_pll_rate_table *rate_table, u8 clk_pll_flags, spinlock_t *lock) { const char *pll_parents[3]; diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index 556ce041d371..e4f9d472f1ff 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -26,7 +26,7 @@ enum rk3188_plls { apll, cpll, dpll, gpll, }; -struct rockchip_pll_rate_table rk3188_pll_rates[] = { +static struct rockchip_pll_rate_table rk3188_pll_rates[] = { RK3066_PLL_RATE(2208000000, 1, 92, 1), RK3066_PLL_RATE(2184000000, 1, 91, 1), RK3066_PLL_RATE(2160000000, 1, 90, 1), diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index d17eb4528a28..4f817ed9e6ee 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -27,7 +27,7 @@ enum rk3288_plls { apll, dpll, cpll, gpll, npll, }; -struct rockchip_pll_rate_table rk3288_pll_rates[] = { +static struct rockchip_pll_rate_table rk3288_pll_rates[] = { RK3066_PLL_RATE(2208000000, 1, 92, 1), RK3066_PLL_RATE(2184000000, 1, 91, 1), RK3066_PLL_RATE(2160000000, 1, 90, 1), diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c index edb5d489ae61..052b94db0ff9 100644 --- a/drivers/clk/rockchip/clk.c +++ b/drivers/clk/rockchip/clk.c @@ -39,7 +39,7 @@ * sometimes without one of those components. */ static struct clk *rockchip_clk_register_branch(const char *name, - const char **parent_names, u8 num_parents, void __iomem *base, + const char *const *parent_names, u8 num_parents, void __iomem *base, int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags, u8 div_shift, u8 div_width, u8 div_flags, struct clk_div_table *div_table, int gate_offset, @@ -103,8 +103,8 @@ static struct clk *rockchip_clk_register_branch(const char *name, } static struct clk *rockchip_clk_register_frac_branch(const char *name, - const char **parent_names, u8 num_parents, void __iomem *base, - int muxdiv_offset, u8 div_flags, + const char *const *parent_names, u8 num_parents, + void __iomem *base, int muxdiv_offset, u8 div_flags, int gate_offset, u8 gate_shift, u8 gate_flags, unsigned long flags, spinlock_t *lock) { @@ -297,7 +297,7 @@ void __init rockchip_clk_register_branches( } void __init rockchip_clk_register_armclk(unsigned int lookup_id, - const char *name, const char **parent_names, + const char *name, const char *const *parent_names, u8 num_parents, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h index e63cafe893e1..6b092673048a 100644 --- a/drivers/clk/rockchip/clk.h +++ b/drivers/clk/rockchip/clk.h @@ -108,7 +108,7 @@ struct rockchip_pll_rate_table { struct rockchip_pll_clock { unsigned int id; const char *name; - const char **parent_names; + const char *const *parent_names; u8 num_parents; unsigned long flags; int con_offset; @@ -140,10 +140,10 @@ struct rockchip_pll_clock { } struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type, - const char *name, const char **parent_names, u8 num_parents, - void __iomem *base, int con_offset, int grf_lock_offset, - int lock_shift, int reg_mode, int mode_shift, - struct rockchip_pll_rate_table *rate_table, + const char *name, const char *const *parent_names, + u8 num_parents, void __iomem *base, int con_offset, + int grf_lock_offset, int lock_shift, int reg_mode, + int mode_shift, struct rockchip_pll_rate_table *rate_table, u8 clk_pll_flags, spinlock_t *lock); struct rockchip_cpuclk_clksel { @@ -173,16 +173,16 @@ struct rockchip_cpuclk_reg_data { }; struct clk *rockchip_clk_register_cpuclk(const char *name, - const char **parent_names, u8 num_parents, + const char *const *parent_names, u8 num_parents, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates, void __iomem *reg_base, spinlock_t *lock); struct clk *rockchip_clk_register_mmc(const char *name, - const char **parent_names, u8 num_parents, + const char *const *parent_names, u8 num_parents, void __iomem *reg, int shift); -#define PNAME(x) static const char *x[] __initdata +#define PNAME(x) static const char *const x[] __initconst enum rockchip_clk_branch_type { branch_composite, @@ -197,7 +197,7 @@ struct rockchip_clk_branch { unsigned int id; enum rockchip_clk_branch_type branch_type; const char *name; - const char **parent_names; + const char *const *parent_names; u8 num_parents; unsigned long flags; int muxdiv_offset; @@ -403,7 +403,7 @@ void rockchip_clk_register_branches(struct rockchip_clk_branch *clk_list, void rockchip_clk_register_plls(struct rockchip_pll_clock *pll_list, unsigned int nr_pll, int grf_lock_offset); void rockchip_clk_register_armclk(unsigned int lookup_id, const char *name, - const char **parent_names, u8 num_parents, + const char *const *parent_names, u8 num_parents, const struct rockchip_cpuclk_reg_data *reg_data, const struct rockchip_cpuclk_rate_table *rates, int nrates); diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile index a17683b2cf27..5f6833ea355d 100644 --- a/drivers/clk/samsung/Makefile +++ b/drivers/clk/samsung/Makefile @@ -2,7 +2,7 @@ # Samsung Clock specific Makefile # -obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o +obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o clk-cpu.o obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o obj-$(CONFIG_SOC_EXYNOS4415) += clk-exynos4415.o diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c new file mode 100644 index 000000000000..3a1fe07cfe9e --- /dev/null +++ b/drivers/clk/samsung/clk-cpu.c @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * Author: Thomas Abraham <[email protected]> + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Bartlomiej Zolnierkiewicz <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file contains the utility function to register CPU clock for Samsung + * Exynos platforms. A CPU clock is defined as a clock supplied to a CPU or a + * group of CPUs. The CPU clock is typically derived from a hierarchy of clock + * blocks which includes mux and divider blocks. There are a number of other + * auxiliary clocks supplied to the CPU domain such as the debug blocks and AXI + * clock for CPU domain. The rates of these auxiliary clocks are related to the + * CPU clock rate and this relation is usually specified in the hardware manual + * of the SoC or supplied after the SoC characterization. + * + * The below implementation of the CPU clock allows the rate changes of the CPU + * clock and the corresponding rate changes of the auxillary clocks of the CPU + * domain. The platform clock driver provides a clock register configuration + * for each configurable rate which is then used to program the clock hardware + * registers to acheive a fast co-oridinated rate change for all the CPU domain + * clocks. + * + * On a rate change request for the CPU clock, the rate change is propagated + * upto the PLL supplying the clock to the CPU domain clock blocks. While the + * CPU domain PLL is reconfigured, the CPU domain clocks are driven using an + * alternate clock source. If required, the alternate clock source is divided + * down in order to keep the output clock rate within the previous OPP limits. +*/ + +#include <linux/errno.h> +#include "clk-cpu.h" + +#define E4210_SRC_CPU 0x0 +#define E4210_STAT_CPU 0x200 +#define E4210_DIV_CPU0 0x300 +#define E4210_DIV_CPU1 0x304 +#define E4210_DIV_STAT_CPU0 0x400 +#define E4210_DIV_STAT_CPU1 0x404 + +#define E4210_DIV0_RATIO0_MASK 0x7 +#define E4210_DIV1_HPM_MASK (0x7 << 4) +#define E4210_DIV1_COPY_MASK (0x7 << 0) +#define E4210_MUX_HPM_MASK (1 << 20) +#define E4210_DIV0_ATB_SHIFT 16 +#define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT) + +#define MAX_DIV 8 +#define DIV_MASK 7 +#define DIV_MASK_ALL 0xffffffff +#define MUX_MASK 7 + +/* + * Helper function to wait until divider(s) have stabilized after the divider + * value has changed. + */ +static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(10); + + do { + if (!(readl(div_reg) & mask)) + return; + } while (time_before(jiffies, timeout)); + + if (!(readl(div_reg) & mask)) + return; + + pr_err("%s: timeout in divider stablization\n", __func__); +} + +/* + * Helper function to wait until mux has stabilized after the mux selection + * value was changed. + */ +static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos, + unsigned long mux_value) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(10); + + do { + if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value) + return; + } while (time_before(jiffies, timeout)); + + if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value) + return; + + pr_err("%s: re-parenting mux timed-out\n", __func__); +} + +/* common round rate callback useable for all types of CPU clocks */ +static long exynos_cpuclk_round_rate(struct clk_hw *hw, + unsigned long drate, unsigned long *prate) +{ + struct clk *parent = __clk_get_parent(hw->clk); + *prate = __clk_round_rate(parent, drate); + return *prate; +} + +/* common recalc rate callback useable for all types of CPU clocks */ +static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + /* + * The CPU clock output (armclk) rate is the same as its parent + * rate. Although there exist certain dividers inside the CPU + * clock block that could be used to divide the parent clock, + * the driver does not make use of them currently, except during + * frequency transitions. + */ + return parent_rate; +} + +static const struct clk_ops exynos_cpuclk_clk_ops = { + .recalc_rate = exynos_cpuclk_recalc_rate, + .round_rate = exynos_cpuclk_round_rate, +}; + +/* + * Helper function to set the 'safe' dividers for the CPU clock. The parameters + * div and mask contain the divider value and the register bit mask of the + * dividers to be programmed. + */ +static void exynos_set_safe_div(void __iomem *base, unsigned long div, + unsigned long mask) +{ + unsigned long div0; + + div0 = readl(base + E4210_DIV_CPU0); + div0 = (div0 & ~mask) | (div & mask); + writel(div0, base + E4210_DIV_CPU0); + wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, mask); +} + +/* handler for pre-rate change notification from parent clock */ +static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata, + struct exynos_cpuclk *cpuclk, void __iomem *base) +{ + const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; + unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent); + unsigned long alt_div = 0, alt_div_mask = DIV_MASK; + unsigned long div0, div1 = 0, mux_reg; + + /* find out the divider values to use for clock data */ + while ((cfg_data->prate * 1000) != ndata->new_rate) { + if (cfg_data->prate == 0) + return -EINVAL; + cfg_data++; + } + + spin_lock(cpuclk->lock); + + /* + * For the selected PLL clock frequency, get the pre-defined divider + * values. If the clock for sclk_hpm is not sourced from apll, then + * the values for DIV_COPY and DIV_HPM dividers need not be set. + */ + div0 = cfg_data->div0; + if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + div1 = cfg_data->div1; + if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) + div1 = readl(base + E4210_DIV_CPU1) & + (E4210_DIV1_HPM_MASK | E4210_DIV1_COPY_MASK); + } + + /* + * If the old parent clock speed is less than the clock speed of + * the alternate parent, then it should be ensured that at no point + * the armclk speed is more than the old_prate until the dividers are + * set. Also workaround the issue of the dividers being set to lower + * values before the parent clock speed is set to new lower speed + * (this can result in too high speed of armclk output clocks). + */ + if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) { + unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate); + + alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; + WARN_ON(alt_div >= MAX_DIV); + + if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + /* + * In Exynos4210, ATB clock parent is also mout_core. So + * ATB clock also needs to be mantained at safe speed. + */ + alt_div |= E4210_DIV0_ATB_MASK; + alt_div_mask |= E4210_DIV0_ATB_MASK; + } + exynos_set_safe_div(base, alt_div, alt_div_mask); + div0 |= alt_div; + } + + /* select sclk_mpll as the alternate parent */ + mux_reg = readl(base + E4210_SRC_CPU); + writel(mux_reg | (1 << 16), base + E4210_SRC_CPU); + wait_until_mux_stable(base + E4210_STAT_CPU, 16, 2); + + /* alternate parent is active now. set the dividers */ + writel(div0, base + E4210_DIV_CPU0); + wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); + + if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { + writel(div1, base + E4210_DIV_CPU1); + wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, + DIV_MASK_ALL); + } + + spin_unlock(cpuclk->lock); + return 0; +} + +/* handler for post-rate change notification from parent clock */ +static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata, + struct exynos_cpuclk *cpuclk, void __iomem *base) +{ + const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg; + unsigned long div = 0, div_mask = DIV_MASK; + unsigned long mux_reg; + + /* find out the divider values to use for clock data */ + if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + while ((cfg_data->prate * 1000) != ndata->new_rate) { + if (cfg_data->prate == 0) + return -EINVAL; + cfg_data++; + } + } + + spin_lock(cpuclk->lock); + + /* select mout_apll as the alternate parent */ + mux_reg = readl(base + E4210_SRC_CPU); + writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); + wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); + + if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { + div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); + div_mask |= E4210_DIV0_ATB_MASK; + } + + exynos_set_safe_div(base, div, div_mask); + spin_unlock(cpuclk->lock); + return 0; +} + +/* + * This notifier function is called for the pre-rate and post-rate change + * notifications of the parent clock of cpuclk. + */ +static int exynos_cpuclk_notifier_cb(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct clk_notifier_data *ndata = data; + struct exynos_cpuclk *cpuclk; + void __iomem *base; + int err = 0; + + cpuclk = container_of(nb, struct exynos_cpuclk, clk_nb); + base = cpuclk->ctrl_base; + + if (event == PRE_RATE_CHANGE) + err = exynos_cpuclk_pre_rate_change(ndata, cpuclk, base); + else if (event == POST_RATE_CHANGE) + err = exynos_cpuclk_post_rate_change(ndata, cpuclk, base); + + return notifier_from_errno(err); +} + +/* helper function to register a CPU clock */ +int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, + unsigned int lookup_id, const char *name, const char *parent, + const char *alt_parent, unsigned long offset, + const struct exynos_cpuclk_cfg_data *cfg, + unsigned long num_cfgs, unsigned long flags) +{ + struct exynos_cpuclk *cpuclk; + struct clk_init_data init; + struct clk *clk; + int ret = 0; + + cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL); + if (!cpuclk) + return -ENOMEM; + + init.name = name; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = &parent; + init.num_parents = 1; + init.ops = &exynos_cpuclk_clk_ops; + + cpuclk->hw.init = &init; + cpuclk->ctrl_base = ctx->reg_base + offset; + cpuclk->lock = &ctx->lock; + cpuclk->flags = flags; + cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb; + + cpuclk->alt_parent = __clk_lookup(alt_parent); + if (!cpuclk->alt_parent) { + pr_err("%s: could not lookup alternate parent %s\n", + __func__, alt_parent); + ret = -EINVAL; + goto free_cpuclk; + } + + clk = __clk_lookup(parent); + if (!clk) { + pr_err("%s: could not lookup parent clock %s\n", + __func__, parent); + ret = -EINVAL; + goto free_cpuclk; + } + + ret = clk_notifier_register(clk, &cpuclk->clk_nb); + if (ret) { + pr_err("%s: failed to register clock notifier for %s\n", + __func__, name); + goto free_cpuclk; + } + + cpuclk->cfg = kmemdup(cfg, sizeof(*cfg) * num_cfgs, GFP_KERNEL); + if (!cpuclk->cfg) { + pr_err("%s: could not allocate memory for cpuclk data\n", + __func__); + ret = -ENOMEM; + goto unregister_clk_nb; + } + + clk = clk_register(NULL, &cpuclk->hw); + if (IS_ERR(clk)) { + pr_err("%s: could not register cpuclk %s\n", __func__, name); + ret = PTR_ERR(clk); + goto free_cpuclk_data; + } + + samsung_clk_add_lookup(ctx, clk, lookup_id); + return 0; + +free_cpuclk_data: + kfree(cpuclk->cfg); +unregister_clk_nb: + clk_notifier_unregister(__clk_lookup(parent), &cpuclk->clk_nb); +free_cpuclk: + kfree(cpuclk); + return ret; +} diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h new file mode 100644 index 000000000000..37874d3c3165 --- /dev/null +++ b/drivers/clk/samsung/clk-cpu.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2014 Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Common Clock Framework support for all PLL's in Samsung platforms +*/ + +#ifndef __SAMSUNG_CLK_CPU_H +#define __SAMSUNG_CLK_CPU_H + +#include "clk.h" + +/** + * struct exynos_cpuclk_data: config data to setup cpu clocks. + * @prate: frequency of the primary parent clock (in KHz). + * @div0: value to be programmed in the div_cpu0 register. + * @div1: value to be programmed in the div_cpu1 register. + * + * This structure holds the divider configuration data for dividers in the CPU + * clock domain. The parent frequency at which these divider values are valid is + * specified in @prate. The @prate is the frequency of the primary parent clock. + * For CPU clock domains that do not have a DIV1 register, the @div1 member + * value is not used. + */ +struct exynos_cpuclk_cfg_data { + unsigned long prate; + unsigned long div0; + unsigned long div1; +}; + +/** + * struct exynos_cpuclk: information about clock supplied to a CPU core. + * @hw: handle between CCF and CPU clock. + * @alt_parent: alternate parent clock to use when switching the speed + * of the primary parent clock. + * @ctrl_base: base address of the clock controller. + * @lock: cpu clock domain register access lock. + * @cfg: cpu clock rate configuration data. + * @num_cfgs: number of array elements in @cfg array. + * @clk_nb: clock notifier registered for changes in clock speed of the + * primary parent clock. + * @flags: configuration flags for the CPU clock. + * + * This structure holds information required for programming the CPU clock for + * various clock speeds. + */ +struct exynos_cpuclk { + struct clk_hw hw; + struct clk *alt_parent; + void __iomem *ctrl_base; + spinlock_t *lock; + const struct exynos_cpuclk_cfg_data *cfg; + const unsigned long num_cfgs; + struct notifier_block clk_nb; + unsigned long flags; + +/* The CPU clock registers has DIV1 configuration register */ +#define CLK_CPU_HAS_DIV1 (1 << 0) +/* When ALT parent is active, debug clocks need safe divider values */ +#define CLK_CPU_NEEDS_DEBUG_ALT_DIV (1 << 1) +}; + +extern int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx, + unsigned int lookup_id, const char *name, + const char *parent, const char *alt_parent, + unsigned long offset, + const struct exynos_cpuclk_cfg_data *cfg, + unsigned long num_cfgs, unsigned long flags); + +#endif /* __SAMSUNG_CLK_CPU_H */ diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 714d6ba782c8..cae2c048488d 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -19,6 +19,7 @@ #include <linux/syscore_ops.h> #include "clk.h" +#include "clk-cpu.h" /* Exynos4 clock controller register offsets */ #define SRC_LEFTBUS 0x4200 @@ -534,7 +535,8 @@ static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initda /* list of mux clocks supported in all exynos4 soc's */ static struct samsung_mux_clock exynos4_mux_clks[] __initdata = { MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, - CLK_SET_RATE_PARENT, 0, "mout_apll"), + CLK_SET_RATE_PARENT | CLK_RECALC_NEW_RATES, 0, + "mout_apll"), MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1), MUX(0, "mout_mfc1", sclk_evpll_p, SRC_MFC, 4, 1), MUX(0, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1), @@ -1378,6 +1380,22 @@ static void __init exynos4x12_core_down_clock(void) __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2); } +#define E4210_CPU_DIV0(apll, pclk_dbg, atb, periph, corem1, corem0) \ + (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \ + ((periph) << 12) | ((corem1) << 8) | ((corem0) << 4)) +#define E4210_CPU_DIV1(hpm, copy) \ + (((hpm) << 4) | ((copy) << 0)) + +static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = { + { 1200000, E4210_CPU_DIV0(7, 1, 4, 3, 7, 3), E4210_CPU_DIV1(0, 5), }, + { 1000000, E4210_CPU_DIV0(7, 1, 4, 3, 7, 3), E4210_CPU_DIV1(0, 4), }, + { 800000, E4210_CPU_DIV0(7, 1, 3, 3, 7, 3), E4210_CPU_DIV1(0, 3), }, + { 500000, E4210_CPU_DIV0(7, 1, 3, 3, 7, 3), E4210_CPU_DIV1(0, 3), }, + { 400000, E4210_CPU_DIV0(7, 1, 3, 3, 7, 3), E4210_CPU_DIV1(0, 3), }, + { 200000, E4210_CPU_DIV0(0, 1, 1, 1, 3, 1), E4210_CPU_DIV1(0, 3), }, + { 0 }, +}; + /* register exynos4 clocks */ static void __init exynos4_clk_init(struct device_node *np, enum exynos4_soc soc) @@ -1455,6 +1473,10 @@ static void __init exynos4_clk_init(struct device_node *np, samsung_clk_register_fixed_factor(ctx, exynos4210_fixed_factor_clks, ARRAY_SIZE(exynos4210_fixed_factor_clks)); + exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk", + mout_core_p4210[0], mout_core_p4210[1], 0x14200, + e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d), + CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1); } else { samsung_clk_register_mux(ctx, exynos4x12_mux_clks, ARRAY_SIZE(exynos4x12_mux_clks)); diff --git a/drivers/clk/samsung/clk-exynos5260.c b/drivers/clk/samsung/clk-exynos5260.c index e2e5193d1049..06f96eb7cf93 100644 --- a/drivers/clk/samsung/clk-exynos5260.c +++ b/drivers/clk/samsung/clk-exynos5260.c @@ -94,7 +94,7 @@ PNAME(mout_aud_pll_user_p) = {"fin_pll", "fout_aud_pll"}; PNAME(mout_sclk_aud_i2s_p) = {"mout_aud_pll_user", "ioclk_i2s_cdclk"}; PNAME(mout_sclk_aud_pcm_p) = {"mout_aud_pll_user", "ioclk_pcm_extclk"}; -struct samsung_mux_clock aud_mux_clks[] __initdata = { +static struct samsung_mux_clock aud_mux_clks[] __initdata = { MUX(AUD_MOUT_AUD_PLL_USER, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 0, 1), MUX(AUD_MOUT_SCLK_AUD_I2S, "mout_sclk_aud_i2s", mout_sclk_aud_i2s_p, @@ -103,7 +103,7 @@ struct samsung_mux_clock aud_mux_clks[] __initdata = { MUX_SEL_AUD, 8, 1), }; -struct samsung_div_clock aud_div_clks[] __initdata = { +static struct samsung_div_clock aud_div_clks[] __initdata = { DIV(AUD_DOUT_ACLK_AUD_131, "dout_aclk_aud_131", "mout_aud_pll_user", DIV_AUD0, 0, 4), @@ -115,7 +115,7 @@ struct samsung_div_clock aud_div_clks[] __initdata = { DIV_AUD1, 12, 4), }; -struct samsung_gate_clock aud_gate_clks[] __initdata = { +static struct samsung_gate_clock aud_gate_clks[] __initdata = { GATE(AUD_SCLK_I2S, "sclk_aud_i2s", "dout_sclk_aud_i2s", EN_SCLK_AUD, 0, CLK_SET_RATE_PARENT, 0), GATE(AUD_SCLK_PCM, "sclk_aud_pcm", "dout_sclk_aud_pcm", @@ -135,7 +135,7 @@ struct samsung_gate_clock aud_gate_clks[] __initdata = { static void __init exynos5260_clk_aud_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = aud_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks); @@ -203,7 +203,7 @@ PNAME(mout_phyclk_mipi_dphy_4lmrxclk_esc0_user_p) = {"fin_pll", PNAME(mout_sclk_hdmi_spdif_p) = {"fin_pll", "ioclk_spdif_extclk", "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"}; -struct samsung_mux_clock disp_mux_clks[] __initdata = { +static struct samsung_mux_clock disp_mux_clks[] __initdata = { MUX(DISP_MOUT_ACLK_DISP_333_USER, "mout_aclk_disp_333_user", mout_aclk_disp_333_user_p, MUX_SEL_DISP0, 0, 1), @@ -272,7 +272,7 @@ struct samsung_mux_clock disp_mux_clks[] __initdata = { MUX_SEL_DISP4, 4, 2), }; -struct samsung_div_clock disp_div_clks[] __initdata = { +static struct samsung_div_clock disp_div_clks[] __initdata = { DIV(DISP_DOUT_PCLK_DISP_111, "dout_pclk_disp_111", "mout_aclk_disp_222_user", DIV_DISP, 8, 4), @@ -285,7 +285,7 @@ struct samsung_div_clock disp_div_clks[] __initdata = { DIV_DISP, 16, 4), }; -struct samsung_gate_clock disp_gate_clks[] __initdata = { +static struct samsung_gate_clock disp_gate_clks[] __initdata = { GATE(DISP_MOUT_HDMI_PHY_PIXEL_USER, "sclk_hdmi_link_i_pixel", "mout_phyclk_hdmi_phy_pixel_clko_user", EN_SCLK_DISP0, 26, CLK_SET_RATE_PARENT, 0), @@ -325,7 +325,7 @@ struct samsung_gate_clock disp_gate_clks[] __initdata = { static void __init exynos5260_clk_disp_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = disp_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks); @@ -363,13 +363,13 @@ static unsigned long egl_clk_regs[] __initdata = { PNAME(mout_egl_b_p) = {"mout_egl_pll", "dout_bus_pll"}; PNAME(mout_egl_pll_p) = {"fin_pll", "fout_egl_pll"}; -struct samsung_mux_clock egl_mux_clks[] __initdata = { +static struct samsung_mux_clock egl_mux_clks[] __initdata = { MUX(EGL_MOUT_EGL_PLL, "mout_egl_pll", mout_egl_pll_p, MUX_SEL_EGL, 4, 1), MUX(EGL_MOUT_EGL_B, "mout_egl_b", mout_egl_b_p, MUX_SEL_EGL, 16, 1), }; -struct samsung_div_clock egl_div_clks[] __initdata = { +static struct samsung_div_clock egl_div_clks[] __initdata = { DIV(EGL_DOUT_EGL1, "dout_egl1", "mout_egl_b", DIV_EGL, 0, 3), DIV(EGL_DOUT_EGL2, "dout_egl2", "dout_egl1", DIV_EGL, 4, 3), DIV(EGL_DOUT_ACLK_EGL, "dout_aclk_egl", "dout_egl2", DIV_EGL, 8, 3), @@ -389,7 +389,7 @@ static struct samsung_pll_clock egl_pll_clks[] __initdata = { static void __init exynos5260_clk_egl_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.pll_clks = egl_pll_clks; cmu.nr_pll_clks = ARRAY_SIZE(egl_pll_clks); @@ -433,7 +433,7 @@ PNAME(mout_phyclk_usbdrd30_pipe_pclk_user_p) = {"fin_pll", PNAME(mout_phyclk_usbdrd30_phyclock_user_p) = {"fin_pll", "phyclk_usbdrd30_udrd30_phyclock"}; -struct samsung_mux_clock fsys_mux_clks[] __initdata = { +static struct samsung_mux_clock fsys_mux_clks[] __initdata = { MUX(FSYS_MOUT_PHYCLK_USBDRD30_PHYCLOCK_USER, "mout_phyclk_usbdrd30_phyclock_user", mout_phyclk_usbdrd30_phyclock_user_p, @@ -456,7 +456,7 @@ struct samsung_mux_clock fsys_mux_clks[] __initdata = { MUX_SEL_FSYS1, 16, 1), }; -struct samsung_gate_clock fsys_gate_clks[] __initdata = { +static struct samsung_gate_clock fsys_gate_clks[] __initdata = { GATE(FSYS_PHYCLK_USBHOST20, "phyclk_usbhost20_phyclock", "mout_phyclk_usbdrd30_phyclock_user", EN_SCLK_FSYS, 1, 0, 0), @@ -491,7 +491,7 @@ struct samsung_gate_clock fsys_gate_clks[] __initdata = { static void __init exynos5260_clk_fsys_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = fsys_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks); @@ -537,18 +537,18 @@ static unsigned long g2d_clk_regs[] __initdata = { PNAME(mout_aclk_g2d_333_user_p) = {"fin_pll", "dout_aclk_g2d_333"}; -struct samsung_mux_clock g2d_mux_clks[] __initdata = { +static struct samsung_mux_clock g2d_mux_clks[] __initdata = { MUX(G2D_MOUT_ACLK_G2D_333_USER, "mout_aclk_g2d_333_user", mout_aclk_g2d_333_user_p, MUX_SEL_G2D, 0, 1), }; -struct samsung_div_clock g2d_div_clks[] __initdata = { +static struct samsung_div_clock g2d_div_clks[] __initdata = { DIV(G2D_DOUT_PCLK_G2D_83, "dout_pclk_g2d_83", "mout_aclk_g2d_333_user", DIV_G2D, 0, 3), }; -struct samsung_gate_clock g2d_gate_clks[] __initdata = { +static struct samsung_gate_clock g2d_gate_clks[] __initdata = { GATE(G2D_CLK_G2D, "clk_g2d", "mout_aclk_g2d_333_user", EN_IP_G2D, 4, 0, 0), GATE(G2D_CLK_JPEG, "clk_jpeg", "mout_aclk_g2d_333_user", @@ -580,7 +580,7 @@ struct samsung_gate_clock g2d_gate_clks[] __initdata = { static void __init exynos5260_clk_g2d_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = g2d_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks); @@ -617,17 +617,17 @@ static unsigned long g3d_clk_regs[] __initdata = { PNAME(mout_g3d_pll_p) = {"fin_pll", "fout_g3d_pll"}; -struct samsung_mux_clock g3d_mux_clks[] __initdata = { +static struct samsung_mux_clock g3d_mux_clks[] __initdata = { MUX(G3D_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p, MUX_SEL_G3D, 0, 1), }; -struct samsung_div_clock g3d_div_clks[] __initdata = { +static struct samsung_div_clock g3d_div_clks[] __initdata = { DIV(G3D_DOUT_PCLK_G3D, "dout_pclk_g3d", "dout_aclk_g3d", DIV_G3D, 0, 3), DIV(G3D_DOUT_ACLK_G3D, "dout_aclk_g3d", "mout_g3d_pll", DIV_G3D, 4, 3), }; -struct samsung_gate_clock g3d_gate_clks[] __initdata = { +static struct samsung_gate_clock g3d_gate_clks[] __initdata = { GATE(G3D_CLK_G3D, "clk_g3d", "dout_aclk_g3d", EN_IP_G3D, 2, 0, 0), GATE(G3D_CLK_G3D_HPM, "clk_g3d_hpm", "dout_aclk_g3d", EN_IP_G3D, 3, 0, 0), @@ -641,7 +641,7 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = { static void __init exynos5260_clk_g3d_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.pll_clks = g3d_pll_clks; cmu.nr_pll_clks = ARRAY_SIZE(g3d_pll_clks); @@ -694,7 +694,7 @@ PNAME(mout_aclk_m2m_400_user_p) = {"fin_pll", "dout_aclk_gscl_400"}; PNAME(mout_aclk_gscl_fimc_user_p) = {"fin_pll", "dout_aclk_gscl_400"}; PNAME(mout_aclk_csis_p) = {"dout_aclk_csis_200", "mout_aclk_gscl_fimc_user"}; -struct samsung_mux_clock gscl_mux_clks[] __initdata = { +static struct samsung_mux_clock gscl_mux_clks[] __initdata = { MUX(GSCL_MOUT_ACLK_GSCL_333_USER, "mout_aclk_gscl_333_user", mout_aclk_gscl_333_user_p, MUX_SEL_GSCL, 0, 1), @@ -708,7 +708,7 @@ struct samsung_mux_clock gscl_mux_clks[] __initdata = { MUX_SEL_GSCL, 24, 1), }; -struct samsung_div_clock gscl_div_clks[] __initdata = { +static struct samsung_div_clock gscl_div_clks[] __initdata = { DIV(GSCL_DOUT_PCLK_M2M_100, "dout_pclk_m2m_100", "mout_aclk_m2m_400_user", DIV_GSCL, 0, 3), @@ -717,7 +717,7 @@ struct samsung_div_clock gscl_div_clks[] __initdata = { DIV_GSCL, 4, 3), }; -struct samsung_gate_clock gscl_gate_clks[] __initdata = { +static struct samsung_gate_clock gscl_gate_clks[] __initdata = { GATE(GSCL_SCLK_CSIS0_WRAP, "sclk_csis0_wrap", "dout_aclk_csis_200", EN_SCLK_GSCL_FIMC, 0, CLK_SET_RATE_PARENT, 0), GATE(GSCL_SCLK_CSIS1_WRAP, "sclk_csis1_wrap", "dout_aclk_csis_200", @@ -776,7 +776,7 @@ struct samsung_gate_clock gscl_gate_clks[] __initdata = { static void __init exynos5260_clk_gscl_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = gscl_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks); @@ -813,14 +813,14 @@ static unsigned long isp_clk_regs[] __initdata = { PNAME(mout_isp_400_user_p) = {"fin_pll", "dout_aclk_isp1_400"}; PNAME(mout_isp_266_user_p) = {"fin_pll", "dout_aclk_isp1_266"}; -struct samsung_mux_clock isp_mux_clks[] __initdata = { +static struct samsung_mux_clock isp_mux_clks[] __initdata = { MUX(ISP_MOUT_ISP_266_USER, "mout_isp_266_user", mout_isp_266_user_p, MUX_SEL_ISP0, 0, 1), MUX(ISP_MOUT_ISP_400_USER, "mout_isp_400_user", mout_isp_400_user_p, MUX_SEL_ISP0, 4, 1), }; -struct samsung_div_clock isp_div_clks[] __initdata = { +static struct samsung_div_clock isp_div_clks[] __initdata = { DIV(ISP_DOUT_PCLK_ISP_66, "dout_pclk_isp_66", "mout_kfc", DIV_ISP, 0, 3), DIV(ISP_DOUT_PCLK_ISP_133, "dout_pclk_isp_133", "mout_kfc", @@ -832,7 +832,7 @@ struct samsung_div_clock isp_div_clks[] __initdata = { DIV(ISP_DOUT_SCLK_MPWM, "dout_sclk_mpwm", "mout_kfc", DIV_ISP, 20, 2), }; -struct samsung_gate_clock isp_gate_clks[] __initdata = { +static struct samsung_gate_clock isp_gate_clks[] __initdata = { GATE(ISP_CLK_GIC, "clk_isp_gic", "mout_aclk_isp1_266", EN_IP_ISP0, 15, 0, 0), @@ -895,7 +895,7 @@ struct samsung_gate_clock isp_gate_clks[] __initdata = { static void __init exynos5260_clk_isp_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = isp_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks); @@ -934,13 +934,13 @@ static unsigned long kfc_clk_regs[] __initdata = { PNAME(mout_kfc_pll_p) = {"fin_pll", "fout_kfc_pll"}; PNAME(mout_kfc_p) = {"mout_kfc_pll", "dout_media_pll"}; -struct samsung_mux_clock kfc_mux_clks[] __initdata = { +static struct samsung_mux_clock kfc_mux_clks[] __initdata = { MUX(KFC_MOUT_KFC_PLL, "mout_kfc_pll", mout_kfc_pll_p, MUX_SEL_KFC0, 0, 1), MUX(KFC_MOUT_KFC, "mout_kfc", mout_kfc_p, MUX_SEL_KFC2, 0, 1), }; -struct samsung_div_clock kfc_div_clks[] __initdata = { +static struct samsung_div_clock kfc_div_clks[] __initdata = { DIV(KFC_DOUT_KFC1, "dout_kfc1", "mout_kfc", DIV_KFC, 0, 3), DIV(KFC_DOUT_KFC2, "dout_kfc2", "dout_kfc1", DIV_KFC, 4, 3), DIV(KFC_DOUT_KFC_ATCLK, "dout_kfc_atclk", "dout_kfc2", DIV_KFC, 8, 3), @@ -959,7 +959,7 @@ static struct samsung_pll_clock kfc_pll_clks[] __initdata = { static void __init exynos5260_clk_kfc_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.pll_clks = kfc_pll_clks; cmu.nr_pll_clks = ARRAY_SIZE(kfc_pll_clks); @@ -993,18 +993,18 @@ static unsigned long mfc_clk_regs[] __initdata = { PNAME(mout_aclk_mfc_333_user_p) = {"fin_pll", "dout_aclk_mfc_333"}; -struct samsung_mux_clock mfc_mux_clks[] __initdata = { +static struct samsung_mux_clock mfc_mux_clks[] __initdata = { MUX(MFC_MOUT_ACLK_MFC_333_USER, "mout_aclk_mfc_333_user", mout_aclk_mfc_333_user_p, MUX_SEL_MFC, 0, 1), }; -struct samsung_div_clock mfc_div_clks[] __initdata = { +static struct samsung_div_clock mfc_div_clks[] __initdata = { DIV(MFC_DOUT_PCLK_MFC_83, "dout_pclk_mfc_83", "mout_aclk_mfc_333_user", DIV_MFC, 0, 3), }; -struct samsung_gate_clock mfc_gate_clks[] __initdata = { +static struct samsung_gate_clock mfc_gate_clks[] __initdata = { GATE(MFC_CLK_MFC, "clk_mfc", "mout_aclk_mfc_333_user", EN_IP_MFC, 1, 0, 0), GATE(MFC_CLK_SMMU2_MFCM0, "clk_smmu2_mfcm0", "mout_aclk_mfc_333_user", @@ -1015,7 +1015,7 @@ struct samsung_gate_clock mfc_gate_clks[] __initdata = { static void __init exynos5260_clk_mfc_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = mfc_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks); @@ -1078,7 +1078,7 @@ PNAME(mout_mif_drex2x_p) = {"dout_mem_pll", "dout_bus_pll"}; PNAME(mout_clkm_phy_p) = {"mout_mif_drex", "dout_media_pll"}; PNAME(mout_clk2x_phy_p) = {"mout_mif_drex2x", "dout_media_pll"}; -struct samsung_mux_clock mif_mux_clks[] __initdata = { +static struct samsung_mux_clock mif_mux_clks[] __initdata = { MUX(MIF_MOUT_MEM_PLL, "mout_mem_pll", mout_mem_pll_p, MUX_SEL_MIF, 0, 1), MUX(MIF_MOUT_BUS_PLL, "mout_bus_pll", mout_bus_pll_p, @@ -1095,7 +1095,7 @@ struct samsung_mux_clock mif_mux_clks[] __initdata = { MUX_SEL_MIF, 24, 1), }; -struct samsung_div_clock mif_div_clks[] __initdata = { +static struct samsung_div_clock mif_div_clks[] __initdata = { DIV(MIF_DOUT_MEDIA_PLL, "dout_media_pll", "mout_media_pll", DIV_MIF, 0, 3), DIV(MIF_DOUT_MEM_PLL, "dout_mem_pll", "mout_mem_pll", @@ -1114,7 +1114,7 @@ struct samsung_div_clock mif_div_clks[] __initdata = { DIV_MIF, 28, 4), }; -struct samsung_gate_clock mif_gate_clks[] __initdata = { +static struct samsung_gate_clock mif_gate_clks[] __initdata = { GATE(MIF_CLK_LPDDR3PHY_WRAP0, "clk_lpddr3phy_wrap0", "dout_clk2x_phy", EN_IP_MIF, 12, CLK_IGNORE_UNUSED, 0), GATE(MIF_CLK_LPDDR3PHY_WRAP1, "clk_lpddr3phy_wrap1", "dout_clk2x_phy", @@ -1162,7 +1162,7 @@ static struct samsung_pll_clock mif_pll_clks[] __initdata = { static void __init exynos5260_clk_mif_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.pll_clks = mif_pll_clks; cmu.nr_pll_clks = ARRAY_SIZE(mif_pll_clks); @@ -1221,7 +1221,7 @@ PNAME(mout_sclk_i2scod_p) = {"ioclk_i2s_cdclk", "fin_pll", "dout_aclk_peri_aud", PNAME(mout_sclk_spdif_p) = {"ioclk_spdif_extclk", "fin_pll", "dout_aclk_peri_aud", "phyclk_hdmi_phy_ref_cko"}; -struct samsung_mux_clock peri_mux_clks[] __initdata = { +static struct samsung_mux_clock peri_mux_clks[] __initdata = { MUX(PERI_MOUT_SCLK_PCM, "mout_sclk_pcm", mout_sclk_pcm_p, MUX_SEL_PERI1, 4, 2), MUX(PERI_MOUT_SCLK_I2SCOD, "mout_sclk_i2scod", mout_sclk_i2scod_p, @@ -1230,12 +1230,12 @@ struct samsung_mux_clock peri_mux_clks[] __initdata = { MUX_SEL_PERI1, 20, 2), }; -struct samsung_div_clock peri_div_clks[] __initdata = { +static struct samsung_div_clock peri_div_clks[] __initdata = { DIV(PERI_DOUT_PCM, "dout_pcm", "mout_sclk_pcm", DIV_PERI, 0, 8), DIV(PERI_DOUT_I2S, "dout_i2s", "mout_sclk_i2scod", DIV_PERI, 8, 6), }; -struct samsung_gate_clock peri_gate_clks[] __initdata = { +static struct samsung_gate_clock peri_gate_clks[] __initdata = { GATE(PERI_SCLK_PCM1, "sclk_pcm1", "dout_pcm", EN_SCLK_PERI, 0, CLK_SET_RATE_PARENT, 0), GATE(PERI_SCLK_I2S, "sclk_i2s", "dout_i2s", EN_SCLK_PERI, 1, @@ -1370,7 +1370,7 @@ struct samsung_gate_clock peri_gate_clks[] __initdata = { static void __init exynos5260_clk_peri_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.mux_clks = peri_mux_clks; cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks); @@ -1432,7 +1432,7 @@ static unsigned long top_clk_regs[] __initdata = { }; /* fixed rate clocks generated inside the soc */ -struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = { +static struct samsung_fixed_rate_clock fixed_rate_clks[] __initdata = { FRATE(PHYCLK_DPTX_PHY_CH3_TXD_CLK, "phyclk_dptx_phy_ch3_txd_clk", NULL, CLK_IS_ROOT, 270000000), FRATE(PHYCLK_DPTX_PHY_CH2_TXD_CLK, "phyclk_dptx_phy_ch2_txd_clk", NULL, @@ -1519,7 +1519,7 @@ PNAME(mout_sclk_fsys_mmc1_sdclkin_b_p) = {"mout_sclk_fsys_mmc1_sdclkin_a", PNAME(mout_sclk_fsys_mmc2_sdclkin_b_p) = {"mout_sclk_fsys_mmc2_sdclkin_a", "mout_mediatop_pll_user"}; -struct samsung_mux_clock top_mux_clks[] __initdata = { +static struct samsung_mux_clock top_mux_clks[] __initdata = { MUX(TOP_MOUT_MEDIATOP_PLL_USER, "mout_mediatop_pll_user", mout_mediatop_pll_user_p, MUX_SEL_TOP_PLL0, 0, 1), @@ -1679,7 +1679,7 @@ struct samsung_mux_clock top_mux_clks[] __initdata = { MUX_SEL_TOP_GSCL, 20, 1), }; -struct samsung_div_clock top_div_clks[] __initdata = { +static struct samsung_div_clock top_div_clks[] __initdata = { DIV(TOP_DOUT_ACLK_G2D_333, "dout_aclk_g2d_333", "mout_aclk_g2d_333", DIV_TOP_G2D_MFC, 0, 3), DIV(TOP_DOUT_ACLK_MFC_333, "dout_aclk_mfc_333", "mout_aclk_mfc_333", @@ -1800,7 +1800,7 @@ struct samsung_div_clock top_div_clks[] __initdata = { }; -struct samsung_gate_clock top_gate_clks[] __initdata = { +static struct samsung_gate_clock top_gate_clks[] __initdata = { GATE(TOP_SCLK_MMC0, "sclk_fsys_mmc0_sdclkin", "dout_sclk_fsys_mmc0_sdclkin_b", EN_SCLK_TOP, 7, CLK_SET_RATE_PARENT, 0), @@ -1826,7 +1826,7 @@ static struct samsung_pll_clock top_pll_clks[] __initdata = { static void __init exynos5260_clk_top_init(struct device_node *np) { - struct samsung_cmu_info cmu = {0}; + struct samsung_cmu_info cmu = { NULL }; cmu.pll_clks = top_pll_clks; cmu.nr_pll_clks = ARRAY_SIZE(top_pll_clks); diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index bea4a173eef5..a1d731ca8f48 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -504,7 +504,7 @@ static struct samsung_fixed_factor_clock FFACTOR(0, "ff_dout_spll2", "mout_sclk_spll", 1, 2, 0), }; -struct samsung_mux_clock exynos5800_mux_clks[] __initdata = { +static struct samsung_mux_clock exynos5800_mux_clks[] __initdata = { MUX(0, "mout_aclk400_isp", mout_group3_5800_p, SRC_TOP0, 0, 3), MUX(0, "mout_aclk400_mscl", mout_group3_5800_p, SRC_TOP0, 4, 3), MUX(0, "mout_aclk400_wcore", mout_group2_5800_p, SRC_TOP0, 16, 3), @@ -553,7 +553,7 @@ struct samsung_mux_clock exynos5800_mux_clks[] __initdata = { MUX(0, "mout_fimd1", mout_group2_p, SRC_DISP10, 4, 3), }; -struct samsung_div_clock exynos5800_div_clks[] __initdata = { +static struct samsung_div_clock exynos5800_div_clks[] __initdata = { DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore", DIV_TOP0, 16, 3), DIV(0, "dout_aclk550_cam", "mout_aclk550_cam", @@ -569,14 +569,14 @@ struct samsung_div_clock exynos5800_div_clks[] __initdata = { DIV(0, "dout_sclk_sw", "sclk_spll", DIV_TOP9, 24, 6), }; -struct samsung_gate_clock exynos5800_gate_clks[] __initdata = { +static struct samsung_gate_clock exynos5800_gate_clks[] __initdata = { GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam", GATE_BUS_TOP, 24, 0, 0), GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler", GATE_BUS_TOP, 27, 0, 0), }; -struct samsung_mux_clock exynos5420_mux_clks[] __initdata = { +static struct samsung_mux_clock exynos5420_mux_clks[] __initdata = { MUX(0, "sclk_bpll", mout_bpll_p, TOP_SPARE2, 0, 1), MUX(0, "mout_aclk400_wcore_bpll", mout_aclk400_wcore_bpll_p, TOP_SPARE2, 4, 1), @@ -606,7 +606,7 @@ struct samsung_mux_clock exynos5420_mux_clks[] __initdata = { MUX(0, "mout_fimd1", mout_group3_p, SRC_DISP10, 4, 1), }; -struct samsung_div_clock exynos5420_div_clks[] __initdata = { +static struct samsung_div_clock exynos5420_div_clks[] __initdata = { DIV(0, "dout_aclk400_wcore", "mout_aclk400_wcore_bpll", DIV_TOP0, 16, 3), }; diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c index 9e04ae2bb4d7..39c95649d3d0 100644 --- a/drivers/clk/samsung/clk-exynos5433.c +++ b/drivers/clk/samsung/clk-exynos5433.c @@ -835,6 +835,7 @@ static unsigned long cpif_clk_regs[] __initdata = { MPHY_PLL_CON1, MPHY_PLL_FREQ_DET, MUX_SEL_CPIF0, + DIV_CPIF, ENABLE_SCLK_CPIF, }; @@ -1389,7 +1390,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = { /* ENABLE_ACLK_MIF2 */ GATE(CLK_ACLK_MIFND_266, "aclk_mifnd_266", "div_aclk_mif_266", - ENABLE_ACLK_MIF2, 20, 0, 0), + ENABLE_ACLK_MIF2, 20, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_PPMU_DREX1S3, "aclk_ppmu_drex1s3", "div_aclk_drex1", ENABLE_ACLK_MIF2, 17, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_PPMU_DREX1S1, "aclk_ppmu_drex1s1", "div_aclk_drex1", @@ -1832,39 +1833,39 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = { /* ENABLE_PCLK_PERIS_SECURE_TZPC */ GATE(CLK_PCLK_TZPC12, "pclk_tzpc12", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 12, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 12, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC11, "pclk_tzpc11", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 11, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 11, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC10, "pclk_tzpc10", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 10, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 10, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC9, "pclk_tzpc9", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 9, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 9, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC8, "pclk_tzpc8", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 8, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 8, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC7, "pclk_tzpc7", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 7, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 7, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC6, "pclk_tzpc6", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 6, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 6, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC5, "pclk_tzpc5", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 5, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 5, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC4, "pclk_tzpc4", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 4, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 4, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC3, "pclk_tzpc3", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 3, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 3, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC2, "pclk_tzpc2", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 2, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 2, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC1, "pclk_tzpc1", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 1, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_PCLK_TZPC0, "pclk_tzpc0", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_TZPC, 0, 0, 0), + ENABLE_PCLK_PERIS_SECURE_TZPC, 0, CLK_IGNORE_UNUSED, 0), /* ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF */ GATE(CLK_PCLK_SECKEY_APBIF, "pclk_seckey_apbif", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF, 0, 0, 0), + ENABLE_PCLK_PERIS_SECURE_SECKEY_APBIF, 0, CLK_IGNORE_UNUSED, 0), /* ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF */ GATE(CLK_PCLK_CHIPID_APBIF, "pclk_chipid_apbif", "aclk_peris_66", - ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF, 0, 0, 0), + ENABLE_PCLK_PERIS_SECURE_CHIPID_APBIF, 0, CLK_IGNORE_UNUSED, 0), /* ENABLE_PCLK_PERIS_SECURE_TOPRTC */ GATE(CLK_PCLK_TOPRTC, "pclk_toprtc", "aclk_peris_66", @@ -1895,11 +1896,11 @@ static struct samsung_gate_clock peris_gate_clks[] __initdata = { /* ENABLE_SCLK_PERIS_SECURE_SECKEY */ GATE(CLK_SCLK_SECKEY, "sclk_seckey", "oscclk_efuse_common", - ENABLE_SCLK_PERIS_SECURE_SECKEY, 0, 0, 0), + ENABLE_SCLK_PERIS_SECURE_SECKEY, 0, CLK_IGNORE_UNUSED, 0), /* ENABLE_SCLK_PERIS_SECURE_CHIPID */ GATE(CLK_SCLK_CHIPID, "sclk_chipid", "oscclk_efuse_common", - ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, 0, 0), + ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, CLK_IGNORE_UNUSED, 0), /* ENABLE_SCLK_PERIS_SECURE_TOPRTC */ GATE(CLK_SCLK_TOPRTC, "sclk_toprtc", "oscclk_efuse_common", @@ -3286,10 +3287,10 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = { static struct samsung_mux_clock g3d_mux_clks[] __initdata = { /* MUX_SEL_G3D */ - MUX(CLK_MOUT_ACLK_G3D_400, "mout_aclk_g3d_400", mout_aclk_g3d_400_p, - MUX_SEL_G3D, 8, 1), - MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p, - MUX_SEL_G3D, 0, 1), + MUX_F(CLK_MOUT_ACLK_G3D_400, "mout_aclk_g3d_400", mout_aclk_g3d_400_p, + MUX_SEL_G3D, 8, 1, CLK_SET_RATE_PARENT, 0), + MUX_F(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p, + MUX_SEL_G3D, 0, 1, CLK_SET_RATE_PARENT, 0), }; static struct samsung_div_clock g3d_div_clks[] __initdata = { @@ -3298,8 +3299,8 @@ static struct samsung_div_clock g3d_div_clks[] __initdata = { 8, 2), DIV(CLK_DIV_PCLK_G3D, "div_pclk_g3d", "div_aclk_g3d", DIV_G3D, 4, 3), - DIV(CLK_DIV_ACLK_G3D, "div_aclk_g3d", "mout_aclk_g3d_400", DIV_G3D, - 0, 3), + DIV_F(CLK_DIV_ACLK_G3D, "div_aclk_g3d", "mout_aclk_g3d_400", DIV_G3D, + 0, 3, CLK_SET_RATE_PARENT, 0), }; static struct samsung_gate_clock g3d_gate_clks[] __initdata = { @@ -3309,9 +3310,9 @@ static struct samsung_gate_clock g3d_gate_clks[] __initdata = { GATE(CLK_ACLK_BTS_G3D0, "aclk_bts_g3d0", "div_aclk_g3d", ENABLE_ACLK_G3D, 6, 0, 0), GATE(CLK_ACLK_ASYNCAPBS_G3D, "aclk_asyncapbs_g3d", "div_pclk_g3d", - ENABLE_ACLK_G3D, 5, 0, 0), + ENABLE_ACLK_G3D, 5, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_ASYNCAPBM_G3D, "aclk_asyncapbm_g3d", "div_aclk_g3d", - ENABLE_ACLK_G3D, 4, 0, 0), + ENABLE_ACLK_G3D, 4, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_AHB2APB_G3DP, "aclk_ahb2apb_g3dp", "div_pclk_g3d", ENABLE_ACLK_G3D, 3, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_G3DNP_150, "aclk_g3dnp_150", "div_pclk_g3d", @@ -3319,7 +3320,7 @@ static struct samsung_gate_clock g3d_gate_clks[] __initdata = { GATE(CLK_ACLK_G3DND_600, "aclk_g3dnd_600", "div_aclk_g3d", ENABLE_ACLK_G3D, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_ACLK_G3D, "aclk_g3d", "div_aclk_g3d", - ENABLE_ACLK_G3D, 0, 0, 0), + ENABLE_ACLK_G3D, 0, CLK_SET_RATE_PARENT, 0), /* ENABLE_PCLK_G3D */ GATE(CLK_PCLK_BTS_G3D1, "pclk_bts_g3d1", "div_pclk_g3d", @@ -3582,7 +3583,7 @@ static struct samsung_pll_clock apollo_pll_clks[] __initdata = { static struct samsung_mux_clock apollo_mux_clks[] __initdata = { /* MUX_SEL_APOLLO0 */ MUX_F(CLK_MOUT_APOLLO_PLL, "mout_apollo_pll", mout_apollo_pll_p, - MUX_SEL_APOLLO0, 0, 1, 0, CLK_MUX_READ_ONLY), + MUX_SEL_APOLLO0, 0, 1, CLK_SET_RATE_PARENT, 0), /* MUX_SEL_APOLLO1 */ MUX(CLK_MOUT_BUS_PLL_APOLLO_USER, "mout_bus_pll_apollo_user", @@ -3590,7 +3591,7 @@ static struct samsung_mux_clock apollo_mux_clks[] __initdata = { /* MUX_SEL_APOLLO2 */ MUX_F(CLK_MOUT_APOLLO, "mout_apollo", mout_apollo_p, MUX_SEL_APOLLO2, - 0, 1, 0, CLK_MUX_READ_ONLY), + 0, 1, CLK_SET_RATE_PARENT, 0), }; static struct samsung_div_clock apollo_div_clks[] __initdata = { @@ -3611,11 +3612,9 @@ static struct samsung_div_clock apollo_div_clks[] __initdata = { DIV_APOLLO0, 8, 3, CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), DIV_F(CLK_DIV_APOLLO2, "div_apollo2", "div_apollo1", - DIV_APOLLO0, 4, 3, CLK_GET_RATE_NOCACHE, - CLK_DIVIDER_READ_ONLY), + DIV_APOLLO0, 4, 3, CLK_SET_RATE_PARENT, 0), DIV_F(CLK_DIV_APOLLO1, "div_apollo1", "mout_apollo", - DIV_APOLLO0, 0, 3, CLK_GET_RATE_NOCACHE, - CLK_DIVIDER_READ_ONLY), + DIV_APOLLO0, 0, 3, CLK_SET_RATE_PARENT, 0), /* DIV_APOLLO1 */ DIV_F(CLK_DIV_SCLK_HPM_APOLLO, "div_sclk_hpm_apollo", "mout_apollo", @@ -3666,7 +3665,8 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = { GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo", ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2", - ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0), + ENABLE_SCLK_APOLLO, 0, + CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), }; static struct samsung_cmu_info apollo_cmu_info __initdata = { @@ -3775,7 +3775,7 @@ static struct samsung_pll_clock atlas_pll_clks[] __initdata = { static struct samsung_mux_clock atlas_mux_clks[] __initdata = { /* MUX_SEL_ATLAS0 */ MUX_F(CLK_MOUT_ATLAS_PLL, "mout_atlas_pll", mout_atlas_pll_p, - MUX_SEL_ATLAS0, 0, 1, 0, CLK_MUX_READ_ONLY), + MUX_SEL_ATLAS0, 0, 1, CLK_SET_RATE_PARENT, 0), /* MUX_SEL_ATLAS1 */ MUX(CLK_MOUT_BUS_PLL_ATLAS_USER, "mout_bus_pll_atlas_user", @@ -3783,7 +3783,7 @@ static struct samsung_mux_clock atlas_mux_clks[] __initdata = { /* MUX_SEL_ATLAS2 */ MUX_F(CLK_MOUT_ATLAS, "mout_atlas", mout_atlas_p, MUX_SEL_ATLAS2, - 0, 1, 0, CLK_MUX_READ_ONLY), + 0, 1, CLK_SET_RATE_PARENT, 0), }; static struct samsung_div_clock atlas_div_clks[] __initdata = { @@ -3804,11 +3804,9 @@ static struct samsung_div_clock atlas_div_clks[] __initdata = { DIV_ATLAS0, 8, 3, CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY), DIV_F(CLK_DIV_ATLAS2, "div_atlas2", "div_atlas1", - DIV_ATLAS0, 4, 3, CLK_GET_RATE_NOCACHE, - CLK_DIVIDER_READ_ONLY), + DIV_ATLAS0, 4, 3, CLK_SET_RATE_PARENT, 0), DIV_F(CLK_DIV_ATLAS1, "div_atlas1", "mout_atlas", - DIV_ATLAS0, 0, 3, CLK_GET_RATE_NOCACHE, - CLK_DIVIDER_READ_ONLY), + DIV_ATLAS0, 0, 3, CLK_SET_RATE_PARENT, 0), /* DIV_ATLAS1 */ DIV_F(CLK_DIV_SCLK_HPM_ATLAS, "div_sclk_hpm_atlas", "mout_atlas", @@ -3885,7 +3883,8 @@ static struct samsung_gate_clock atlas_gate_clks[] __initdata = { GATE(CLK_ATCLK, "atclk", "div_atclk_atlas", ENABLE_SCLK_ATLAS, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_SCLK_ATLAS, "sclk_atlas", "div_atlas2", - ENABLE_SCLK_ATLAS, 0, CLK_IGNORE_UNUSED, 0), + ENABLE_SCLK_ATLAS, 0, + CLK_IGNORE_UNUSED | CLK_SET_RATE_PARENT, 0), }; static struct samsung_cmu_info atlas_cmu_info __initdata = { diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c index 9d70e5c03804..bebc61b5fce1 100644 --- a/drivers/clk/samsung/clk-pll.c +++ b/drivers/clk/samsung/clk-pll.c @@ -1156,7 +1156,7 @@ static const struct clk_ops samsung_pll2650xx_clk_min_ops = { }; static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, - struct samsung_pll_clock *pll_clk, + const struct samsung_pll_clock *pll_clk, void __iomem *base) { struct samsung_clk_pll *pll; @@ -1303,7 +1303,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx, } void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx, - struct samsung_pll_clock *pll_list, + const struct samsung_pll_clock *pll_list, unsigned int nr_pll, void __iomem *base) { int cnt; diff --git a/drivers/clk/samsung/clk-s3c2410-dclk.c b/drivers/clk/samsung/clk-s3c2410-dclk.c index f4f29ed6bd25..e56df5064889 100644 --- a/drivers/clk/samsung/clk-s3c2410-dclk.c +++ b/drivers/clk/samsung/clk-s3c2410-dclk.c @@ -81,13 +81,13 @@ static int s3c24xx_clkout_set_parent(struct clk_hw *hw, u8 index) return ret; } -const struct clk_ops s3c24xx_clkout_ops = { +static const struct clk_ops s3c24xx_clkout_ops = { .get_parent = s3c24xx_clkout_get_parent, .set_parent = s3c24xx_clkout_set_parent, .determine_rate = __clk_mux_determine_rate, }; -struct clk *s3c24xx_register_clkout(struct device *dev, const char *name, +static struct clk *s3c24xx_register_clkout(struct device *dev, const char *name, const char **parent_names, u8 num_parents, u8 shift, u32 mask) { @@ -404,7 +404,7 @@ static struct s3c24xx_dclk_drv_data dclk_variants[] = { }, }; -static struct platform_device_id s3c24xx_dclk_driver_ids[] = { +static const struct platform_device_id s3c24xx_dclk_driver_ids[] = { { .name = "s3c2410-dclk", .driver_data = (kernel_ulong_t)&dclk_variants[S3C2410], diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c index e668e479a697..cf7e8fa7b624 100644 --- a/drivers/clk/samsung/clk-s5pv210.c +++ b/drivers/clk/samsung/clk-s5pv210.c @@ -169,44 +169,44 @@ static inline void s5pv210_clk_sleep_init(void) { } #endif /* Mux parent lists. */ -static const char *fin_pll_p[] __initdata = { +static const char *const fin_pll_p[] __initconst = { "xxti", "xusbxti" }; -static const char *mout_apll_p[] __initdata = { +static const char *const mout_apll_p[] __initconst = { "fin_pll", "fout_apll" }; -static const char *mout_mpll_p[] __initdata = { +static const char *const mout_mpll_p[] __initconst = { "fin_pll", "fout_mpll" }; -static const char *mout_epll_p[] __initdata = { +static const char *const mout_epll_p[] __initconst = { "fin_pll", "fout_epll" }; -static const char *mout_vpllsrc_p[] __initdata = { +static const char *const mout_vpllsrc_p[] __initconst = { "fin_pll", "sclk_hdmi27m" }; -static const char *mout_vpll_p[] __initdata = { +static const char *const mout_vpll_p[] __initconst = { "mout_vpllsrc", "fout_vpll" }; -static const char *mout_group1_p[] __initdata = { +static const char *const mout_group1_p[] __initconst = { "dout_a2m", "mout_mpll", "mout_epll", "mout_vpll" }; -static const char *mout_group2_p[] __initdata = { +static const char *const mout_group2_p[] __initconst = { "xxti", "xusbxti", "sclk_hdmi27m", @@ -218,7 +218,7 @@ static const char *mout_group2_p[] __initdata = { "mout_vpll", }; -static const char *mout_audio0_p[] __initdata = { +static const char *const mout_audio0_p[] __initconst = { "xxti", "pcmcdclk0", "sclk_hdmi27m", @@ -230,7 +230,7 @@ static const char *mout_audio0_p[] __initdata = { "mout_vpll", }; -static const char *mout_audio1_p[] __initdata = { +static const char *const mout_audio1_p[] __initconst = { "i2scdclk1", "pcmcdclk1", "sclk_hdmi27m", @@ -242,7 +242,7 @@ static const char *mout_audio1_p[] __initdata = { "mout_vpll", }; -static const char *mout_audio2_p[] __initdata = { +static const char *const mout_audio2_p[] __initconst = { "i2scdclk2", "pcmcdclk2", "sclk_hdmi27m", @@ -254,63 +254,63 @@ static const char *mout_audio2_p[] __initdata = { "mout_vpll", }; -static const char *mout_spdif_p[] __initdata = { +static const char *const mout_spdif_p[] __initconst = { "dout_audio0", "dout_audio1", "dout_audio3", }; -static const char *mout_group3_p[] __initdata = { +static const char *const mout_group3_p[] __initconst = { "mout_apll", "mout_mpll" }; -static const char *mout_group4_p[] __initdata = { +static const char *const mout_group4_p[] __initconst = { "mout_mpll", "dout_a2m" }; -static const char *mout_flash_p[] __initdata = { +static const char *const mout_flash_p[] __initconst = { "dout_hclkd", "dout_hclkp" }; -static const char *mout_dac_p[] __initdata = { +static const char *const mout_dac_p[] __initconst = { "mout_vpll", "sclk_hdmiphy" }; -static const char *mout_hdmi_p[] __initdata = { +static const char *const mout_hdmi_p[] __initconst = { "sclk_hdmiphy", "dout_tblk" }; -static const char *mout_mixer_p[] __initdata = { +static const char *const mout_mixer_p[] __initconst = { "mout_dac", "mout_hdmi" }; -static const char *mout_vpll_6442_p[] __initdata = { +static const char *const mout_vpll_6442_p[] __initconst = { "fin_pll", "fout_vpll" }; -static const char *mout_mixer_6442_p[] __initdata = { +static const char *const mout_mixer_6442_p[] __initconst = { "mout_vpll", "dout_mixer" }; -static const char *mout_d0sync_6442_p[] __initdata = { +static const char *const mout_d0sync_6442_p[] __initconst = { "mout_dsys", "div_apll" }; -static const char *mout_d1sync_6442_p[] __initdata = { +static const char *const mout_d1sync_6442_p[] __initconst = { "mout_psys", "div_apll" }; -static const char *mout_group2_6442_p[] __initdata = { +static const char *const mout_group2_6442_p[] __initconst = { "fin_pll", "none", "none", @@ -322,7 +322,7 @@ static const char *mout_group2_6442_p[] __initdata = { "mout_vpll", }; -static const char *mout_audio0_6442_p[] __initdata = { +static const char *const mout_audio0_6442_p[] __initconst = { "fin_pll", "pcmcdclk0", "none", @@ -334,7 +334,7 @@ static const char *mout_audio0_6442_p[] __initdata = { "mout_vpll", }; -static const char *mout_audio1_6442_p[] __initdata = { +static const char *const mout_audio1_6442_p[] __initconst = { "i2scdclk1", "pcmcdclk1", "none", @@ -347,7 +347,7 @@ static const char *mout_audio1_6442_p[] __initdata = { "fin_pll", }; -static const char *mout_clksel_p[] __initdata = { +static const char *const mout_clksel_p[] __initconst = { "fout_apll_clkout", "fout_mpll_clkout", "fout_epll", @@ -370,7 +370,7 @@ static const char *mout_clksel_p[] __initdata = { "div_dclk" }; -static const char *mout_clksel_6442_p[] __initdata = { +static const char *const mout_clksel_6442_p[] __initconst = { "fout_apll_clkout", "fout_mpll_clkout", "fout_epll", @@ -393,7 +393,7 @@ static const char *mout_clksel_6442_p[] __initdata = { "div_dclk" }; -static const char *mout_clkout_p[] __initdata = { +static const char *const mout_clkout_p[] __initconst = { "dout_clkout", "none", "xxti", @@ -401,20 +401,20 @@ static const char *mout_clkout_p[] __initdata = { }; /* Common fixed factor clocks. */ -static struct samsung_fixed_factor_clock ffactor_clks[] __initdata = { +static const struct samsung_fixed_factor_clock ffactor_clks[] __initconst = { FFACTOR(FOUT_APLL_CLKOUT, "fout_apll_clkout", "fout_apll", 1, 4, 0), FFACTOR(FOUT_MPLL_CLKOUT, "fout_mpll_clkout", "fout_mpll", 1, 2, 0), FFACTOR(DOUT_APLL_CLKOUT, "dout_apll_clkout", "dout_apll", 1, 4, 0), }; /* PLL input mux (fin_pll), which needs to be registered before PLLs. */ -static struct samsung_mux_clock early_mux_clks[] __initdata = { +static const struct samsung_mux_clock early_mux_clks[] __initconst = { MUX_F(FIN_PLL, "fin_pll", fin_pll_p, OM_STAT, 0, 1, CLK_MUX_READ_ONLY, 0), }; /* Common clock muxes. */ -static struct samsung_mux_clock mux_clks[] __initdata = { +static const struct samsung_mux_clock mux_clks[] __initconst = { MUX(MOUT_FLASH, "mout_flash", mout_flash_p, CLK_SRC0, 28, 1), MUX(MOUT_PSYS, "mout_psys", mout_group4_p, CLK_SRC0, 24, 1), MUX(MOUT_DSYS, "mout_dsys", mout_group4_p, CLK_SRC0, 20, 1), @@ -427,7 +427,7 @@ static struct samsung_mux_clock mux_clks[] __initdata = { }; /* S5PV210-specific clock muxes. */ -static struct samsung_mux_clock s5pv210_mux_clks[] __initdata = { +static const struct samsung_mux_clock s5pv210_mux_clks[] __initconst = { MUX(MOUT_VPLL, "mout_vpll", mout_vpll_p, CLK_SRC0, 12, 1), MUX(MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, CLK_SRC1, 28, 1), @@ -472,7 +472,7 @@ static struct samsung_mux_clock s5pv210_mux_clks[] __initdata = { }; /* S5P6442-specific clock muxes. */ -static struct samsung_mux_clock s5p6442_mux_clks[] __initdata = { +static const struct samsung_mux_clock s5p6442_mux_clks[] __initconst = { MUX(MOUT_VPLL, "mout_vpll", mout_vpll_6442_p, CLK_SRC0, 12, 1), MUX(MOUT_FIMD, "mout_fimd", mout_group2_6442_p, CLK_SRC1, 20, 4), @@ -504,7 +504,7 @@ static struct samsung_mux_clock s5p6442_mux_clks[] __initdata = { }; /* S5PV210-specific fixed rate clocks generated inside the SoC. */ -static struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initconst = { FRATE(SCLK_HDMI27M, "sclk_hdmi27m", NULL, CLK_IS_ROOT, 27000000), FRATE(SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000), FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000), @@ -512,12 +512,12 @@ static struct samsung_fixed_rate_clock s5pv210_frate_clks[] __initdata = { }; /* S5P6442-specific fixed rate clocks generated inside the SoC. */ -static struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initdata = { +static const struct samsung_fixed_rate_clock s5p6442_frate_clks[] __initconst = { FRATE(SCLK_USBPHY0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 30000000), }; /* Common clock dividers. */ -static struct samsung_div_clock div_clks[] __initdata = { +static const struct samsung_div_clock div_clks[] __initconst = { DIV(DOUT_PCLKP, "dout_pclkp", "dout_hclkp", CLK_DIV0, 28, 3), DIV(DOUT_PCLKD, "dout_pclkd", "dout_hclkd", CLK_DIV0, 20, 3), DIV(DOUT_A2M, "dout_a2m", "mout_apll", CLK_DIV0, 4, 3), @@ -549,7 +549,7 @@ static struct samsung_div_clock div_clks[] __initdata = { }; /* S5PV210-specific clock dividers. */ -static struct samsung_div_clock s5pv210_div_clks[] __initdata = { +static const struct samsung_div_clock s5pv210_div_clks[] __initconst = { DIV(DOUT_HCLKP, "dout_hclkp", "mout_psys", CLK_DIV0, 24, 4), DIV(DOUT_HCLKD, "dout_hclkd", "mout_dsys", CLK_DIV0, 16, 4), DIV(DOUT_PCLKM, "dout_pclkm", "dout_hclkm", CLK_DIV0, 12, 3), @@ -578,7 +578,7 @@ static struct samsung_div_clock s5pv210_div_clks[] __initdata = { }; /* S5P6442-specific clock dividers. */ -static struct samsung_div_clock s5p6442_div_clks[] __initdata = { +static const struct samsung_div_clock s5p6442_div_clks[] __initconst = { DIV(DOUT_HCLKP, "dout_hclkp", "mout_d1sync", CLK_DIV0, 24, 4), DIV(DOUT_HCLKD, "dout_hclkd", "mout_d0sync", CLK_DIV0, 16, 4), @@ -586,7 +586,7 @@ static struct samsung_div_clock s5p6442_div_clks[] __initdata = { }; /* Common clock gates. */ -static struct samsung_gate_clock gate_clks[] __initdata = { +static const struct samsung_gate_clock gate_clks[] __initconst = { GATE(CLK_ROTATOR, "rotator", "dout_hclkd", CLK_GATE_IP0, 29, 0, 0), GATE(CLK_FIMC2, "fimc2", "dout_hclkd", CLK_GATE_IP0, 26, 0, 0), GATE(CLK_FIMC1, "fimc1", "dout_hclkd", CLK_GATE_IP0, 25, 0, 0), @@ -666,7 +666,7 @@ static struct samsung_gate_clock gate_clks[] __initdata = { }; /* S5PV210-specific clock gates. */ -static struct samsung_gate_clock s5pv210_gate_clks[] __initdata = { +static const struct samsung_gate_clock s5pv210_gate_clks[] __initconst = { GATE(CLK_CSIS, "clk_csis", "dout_hclkd", CLK_GATE_IP0, 31, 0, 0), GATE(CLK_MFC, "mfc", "dout_hclkm", CLK_GATE_IP0, 16, 0, 0), GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0), @@ -728,7 +728,7 @@ static struct samsung_gate_clock s5pv210_gate_clks[] __initdata = { }; /* S5P6442-specific clock gates. */ -static struct samsung_gate_clock s5p6442_gate_clks[] __initdata = { +static const struct samsung_gate_clock s5p6442_gate_clks[] __initconst = { GATE(CLK_JPEG, "jpeg", "dout_hclkd", CLK_GATE_IP0, 28, 0, 0), GATE(CLK_MFC, "mfc", "dout_hclkd", CLK_GATE_IP0, 16, 0, 0), GATE(CLK_G2D, "g2d", "dout_hclkd", CLK_GATE_IP0, 12, 0, 0), @@ -748,14 +748,14 @@ static struct samsung_gate_clock s5p6442_gate_clks[] __initdata = { * Clock aliases for legacy clkdev look-up. * NOTE: Needed only to support legacy board files. */ -static struct samsung_clock_alias s5pv210_aliases[] = { +static const struct samsung_clock_alias s5pv210_aliases[] __initconst = { ALIAS(DOUT_APLL, NULL, "armclk"), ALIAS(DOUT_HCLKM, NULL, "hclk_msys"), ALIAS(MOUT_DMC0, NULL, "sclk_dmc0"), }; /* S5PV210-specific PLLs. */ -static struct samsung_pll_clock s5pv210_pll_clks[] __initdata = { +static const struct samsung_pll_clock s5pv210_pll_clks[] __initconst = { [apll] = PLL(pll_4508, FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK, APLL_CON0, NULL), [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll", @@ -767,7 +767,7 @@ static struct samsung_pll_clock s5pv210_pll_clks[] __initdata = { }; /* S5P6442-specific PLLs. */ -static struct samsung_pll_clock s5p6442_pll_clks[] __initdata = { +static const struct samsung_pll_clock s5p6442_pll_clks[] __initconst = { [apll] = PLL(pll_4502, FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK, APLL_CON0, NULL), [mpll] = PLL(pll_4502, FOUT_MPLL, "fout_mpll", "fin_pll", diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index 9e1f88c04fd4..0117238391d6 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -98,7 +98,7 @@ void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk, /* register a list of aliases */ void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx, - struct samsung_clock_alias *list, + const struct samsung_clock_alias *list, unsigned int nr_clk) { struct clk *clk; @@ -132,7 +132,8 @@ void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx, /* register a list of fixed clocks */ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx, - struct samsung_fixed_rate_clock *list, unsigned int nr_clk) + const struct samsung_fixed_rate_clock *list, + unsigned int nr_clk) { struct clk *clk; unsigned int idx, ret; @@ -161,7 +162,7 @@ void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx, /* register a list of fixed factor clocks */ void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx, - struct samsung_fixed_factor_clock *list, unsigned int nr_clk) + const struct samsung_fixed_factor_clock *list, unsigned int nr_clk) { struct clk *clk; unsigned int idx; @@ -181,7 +182,7 @@ void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx, /* register a list of mux clocks */ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx, - struct samsung_mux_clock *list, + const struct samsung_mux_clock *list, unsigned int nr_clk) { struct clk *clk; @@ -213,7 +214,7 @@ void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx, /* register a list of div clocks */ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx, - struct samsung_div_clock *list, + const struct samsung_div_clock *list, unsigned int nr_clk) { struct clk *clk; @@ -252,7 +253,7 @@ void __init samsung_clk_register_div(struct samsung_clk_provider *ctx, /* register a list of gate clocks */ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx, - struct samsung_gate_clock *list, + const struct samsung_gate_clock *list, unsigned int nr_clk) { struct clk *clk; @@ -389,7 +390,7 @@ struct samsung_clk_provider * __init samsung_cmu_register_one( ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids); if (!ctx) { - panic("%s: unable to alllocate ctx\n", __func__); + panic("%s: unable to allocate ctx\n", __func__); return ctx; } diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index e4c75383cea7..b775fc29caa5 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -121,7 +121,7 @@ struct samsung_mux_clock { unsigned int id; const char *dev_name; const char *name; - const char **parent_names; + const char *const *parent_names; u8 num_parents; unsigned long flags; unsigned long offset; @@ -368,28 +368,28 @@ extern void __init samsung_clk_of_register_fixed_ext( extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk, unsigned int id); -extern void samsung_clk_register_alias(struct samsung_clk_provider *ctx, - struct samsung_clock_alias *list, +extern void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx, + const struct samsung_clock_alias *list, unsigned int nr_clk); extern void __init samsung_clk_register_fixed_rate( struct samsung_clk_provider *ctx, - struct samsung_fixed_rate_clock *clk_list, + const struct samsung_fixed_rate_clock *clk_list, unsigned int nr_clk); extern void __init samsung_clk_register_fixed_factor( struct samsung_clk_provider *ctx, - struct samsung_fixed_factor_clock *list, + const struct samsung_fixed_factor_clock *list, unsigned int nr_clk); extern void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx, - struct samsung_mux_clock *clk_list, + const struct samsung_mux_clock *clk_list, unsigned int nr_clk); extern void __init samsung_clk_register_div(struct samsung_clk_provider *ctx, - struct samsung_div_clock *clk_list, + const struct samsung_div_clock *clk_list, unsigned int nr_clk); extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx, - struct samsung_gate_clock *clk_list, + const struct samsung_gate_clock *clk_list, unsigned int nr_clk); extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx, - struct samsung_pll_clock *pll_list, + const struct samsung_pll_clock *pll_list, unsigned int nr_clk, void __iomem *base); extern struct samsung_clk_provider __init *samsung_cmu_register_one( diff --git a/drivers/clk/shmobile/clk-emev2.c b/drivers/clk/shmobile/clk-emev2.c index 6c7c929c7765..5b60beb7d0eb 100644 --- a/drivers/clk/shmobile/clk-emev2.c +++ b/drivers/clk/shmobile/clk-emev2.c @@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(lock); /* not pretty, but hey */ -void __iomem *smu_base; +static void __iomem *smu_base; static void __init emev2_smu_write(unsigned long value, int offs) { diff --git a/drivers/clk/sirf/Makefile b/drivers/clk/sirf/Makefile index 36b8e203f6e7..09b4210d9124 100644 --- a/drivers/clk/sirf/Makefile +++ b/drivers/clk/sirf/Makefile @@ -2,4 +2,4 @@ # Makefile for sirf specific clk # -obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o +obj-$(CONFIG_ARCH_SIRF) += clk-prima2.o clk-atlas6.o clk-atlas7.o diff --git a/drivers/clk/sirf/clk-atlas7.c b/drivers/clk/sirf/clk-atlas7.c new file mode 100644 index 000000000000..db8ab691dbf6 --- /dev/null +++ b/drivers/clk/sirf/clk-atlas7.c @@ -0,0 +1,1632 @@ +/* + * Clock tree for CSR SiRFAtlas7 + * + * Copyright (c) 2014 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/bitops.h> +#include <linux/io.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/of_address.h> +#include <linux/reset-controller.h> +#include <linux/slab.h> + +#define SIRFSOC_CLKC_MEMPLL_AB_FREQ 0x0000 +#define SIRFSOC_CLKC_MEMPLL_AB_SSC 0x0004 +#define SIRFSOC_CLKC_MEMPLL_AB_CTRL0 0x0008 +#define SIRFSOC_CLKC_MEMPLL_AB_CTRL1 0x000c +#define SIRFSOC_CLKC_MEMPLL_AB_STATUS 0x0010 +#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_ADDR 0x0014 +#define SIRFSOC_CLKC_MEMPLL_AB_SSRAM_DATA 0x0018 + +#define SIRFSOC_CLKC_CPUPLL_AB_FREQ 0x001c +#define SIRFSOC_CLKC_CPUPLL_AB_SSC 0x0020 +#define SIRFSOC_CLKC_CPUPLL_AB_CTRL0 0x0024 +#define SIRFSOC_CLKC_CPUPLL_AB_CTRL1 0x0028 +#define SIRFSOC_CLKC_CPUPLL_AB_STATUS 0x002c + +#define SIRFSOC_CLKC_SYS0PLL_AB_FREQ 0x0030 +#define SIRFSOC_CLKC_SYS0PLL_AB_SSC 0x0034 +#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL0 0x0038 +#define SIRFSOC_CLKC_SYS0PLL_AB_CTRL1 0x003c +#define SIRFSOC_CLKC_SYS0PLL_AB_STATUS 0x0040 + +#define SIRFSOC_CLKC_SYS1PLL_AB_FREQ 0x0044 +#define SIRFSOC_CLKC_SYS1PLL_AB_SSC 0x0048 +#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL0 0x004c +#define SIRFSOC_CLKC_SYS1PLL_AB_CTRL1 0x0050 +#define SIRFSOC_CLKC_SYS1PLL_AB_STATUS 0x0054 + +#define SIRFSOC_CLKC_SYS2PLL_AB_FREQ 0x0058 +#define SIRFSOC_CLKC_SYS2PLL_AB_SSC 0x005c +#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL0 0x0060 +#define SIRFSOC_CLKC_SYS2PLL_AB_CTRL1 0x0064 +#define SIRFSOC_CLKC_SYS2PLL_AB_STATUS 0x0068 + +#define SIRFSOC_CLKC_SYS3PLL_AB_FREQ 0x006c +#define SIRFSOC_CLKC_SYS3PLL_AB_SSC 0x0070 +#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL0 0x0074 +#define SIRFSOC_CLKC_SYS3PLL_AB_CTRL1 0x0078 +#define SIRFSOC_CLKC_SYS3PLL_AB_STATUS 0x007c + +#define SIRFSOC_ABPLL_CTRL0_SSEN 0x00001000 +#define SIRFSOC_ABPLL_CTRL0_BYPASS 0x00000010 +#define SIRFSOC_ABPLL_CTRL0_RESET 0x00000001 + +#define SIRFSOC_CLKC_AUDIO_DTO_INC 0x0088 +#define SIRFSOC_CLKC_DISP0_DTO_INC 0x008c +#define SIRFSOC_CLKC_DISP1_DTO_INC 0x0090 + +#define SIRFSOC_CLKC_AUDIO_DTO_SRC 0x0094 +#define SIRFSOC_CLKC_AUDIO_DTO_ENA 0x0098 +#define SIRFSOC_CLKC_AUDIO_DTO_DROFF 0x009c + +#define SIRFSOC_CLKC_DISP0_DTO_SRC 0x00a0 +#define SIRFSOC_CLKC_DISP0_DTO_ENA 0x00a4 +#define SIRFSOC_CLKC_DISP0_DTO_DROFF 0x00a8 + +#define SIRFSOC_CLKC_DISP1_DTO_SRC 0x00ac +#define SIRFSOC_CLKC_DISP1_DTO_ENA 0x00b0 +#define SIRFSOC_CLKC_DISP1_DTO_DROFF 0x00b4 + +#define SIRFSOC_CLKC_I2S_CLK_SEL 0x00b8 +#define SIRFSOC_CLKC_I2S_SEL_STAT 0x00bc + +#define SIRFSOC_CLKC_USBPHY_CLKDIV_CFG 0x00c0 +#define SIRFSOC_CLKC_USBPHY_CLKDIV_ENA 0x00c4 +#define SIRFSOC_CLKC_USBPHY_CLK_SEL 0x00c8 +#define SIRFSOC_CLKC_USBPHY_CLK_SEL_STAT 0x00cc + +#define SIRFSOC_CLKC_BTSS_CLKDIV_CFG 0x00d0 +#define SIRFSOC_CLKC_BTSS_CLKDIV_ENA 0x00d4 +#define SIRFSOC_CLKC_BTSS_CLK_SEL 0x00d8 +#define SIRFSOC_CLKC_BTSS_CLK_SEL_STAT 0x00dc + +#define SIRFSOC_CLKC_RGMII_CLKDIV_CFG 0x00e0 +#define SIRFSOC_CLKC_RGMII_CLKDIV_ENA 0x00e4 +#define SIRFSOC_CLKC_RGMII_CLK_SEL 0x00e8 +#define SIRFSOC_CLKC_RGMII_CLK_SEL_STAT 0x00ec + +#define SIRFSOC_CLKC_CPU_CLKDIV_CFG 0x00f0 +#define SIRFSOC_CLKC_CPU_CLKDIV_ENA 0x00f4 +#define SIRFSOC_CLKC_CPU_CLK_SEL 0x00f8 +#define SIRFSOC_CLKC_CPU_CLK_SEL_STAT 0x00fc + +#define SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG 0x0100 +#define SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA 0x0104 +#define SIRFSOC_CLKC_SDPHY01_CLK_SEL 0x0108 +#define SIRFSOC_CLKC_SDPHY01_CLK_SEL_STAT 0x010c + +#define SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG 0x0110 +#define SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA 0x0114 +#define SIRFSOC_CLKC_SDPHY23_CLK_SEL 0x0118 +#define SIRFSOC_CLKC_SDPHY23_CLK_SEL_STAT 0x011c + +#define SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG 0x0120 +#define SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA 0x0124 +#define SIRFSOC_CLKC_SDPHY45_CLK_SEL 0x0128 +#define SIRFSOC_CLKC_SDPHY45_CLK_SEL_STAT 0x012c + +#define SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG 0x0130 +#define SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA 0x0134 +#define SIRFSOC_CLKC_SDPHY67_CLK_SEL 0x0138 +#define SIRFSOC_CLKC_SDPHY67_CLK_SEL_STAT 0x013c + +#define SIRFSOC_CLKC_CAN_CLKDIV_CFG 0x0140 +#define SIRFSOC_CLKC_CAN_CLKDIV_ENA 0x0144 +#define SIRFSOC_CLKC_CAN_CLK_SEL 0x0148 +#define SIRFSOC_CLKC_CAN_CLK_SEL_STAT 0x014c + +#define SIRFSOC_CLKC_DEINT_CLKDIV_CFG 0x0150 +#define SIRFSOC_CLKC_DEINT_CLKDIV_ENA 0x0154 +#define SIRFSOC_CLKC_DEINT_CLK_SEL 0x0158 +#define SIRFSOC_CLKC_DEINT_CLK_SEL_STAT 0x015c + +#define SIRFSOC_CLKC_NAND_CLKDIV_CFG 0x0160 +#define SIRFSOC_CLKC_NAND_CLKDIV_ENA 0x0164 +#define SIRFSOC_CLKC_NAND_CLK_SEL 0x0168 +#define SIRFSOC_CLKC_NAND_CLK_SEL_STAT 0x016c + +#define SIRFSOC_CLKC_DISP0_CLKDIV_CFG 0x0170 +#define SIRFSOC_CLKC_DISP0_CLKDIV_ENA 0x0174 +#define SIRFSOC_CLKC_DISP0_CLK_SEL 0x0178 +#define SIRFSOC_CLKC_DISP0_CLK_SEL_STAT 0x017c + +#define SIRFSOC_CLKC_DISP1_CLKDIV_CFG 0x0180 +#define SIRFSOC_CLKC_DISP1_CLKDIV_ENA 0x0184 +#define SIRFSOC_CLKC_DISP1_CLK_SEL 0x0188 +#define SIRFSOC_CLKC_DISP1_CLK_SEL_STAT 0x018c + +#define SIRFSOC_CLKC_GPU_CLKDIV_CFG 0x0190 +#define SIRFSOC_CLKC_GPU_CLKDIV_ENA 0x0194 +#define SIRFSOC_CLKC_GPU_CLK_SEL 0x0198 +#define SIRFSOC_CLKC_GPU_CLK_SEL_STAT 0x019c + +#define SIRFSOC_CLKC_GNSS_CLKDIV_CFG 0x01a0 +#define SIRFSOC_CLKC_GNSS_CLKDIV_ENA 0x01a4 +#define SIRFSOC_CLKC_GNSS_CLK_SEL 0x01a8 +#define SIRFSOC_CLKC_GNSS_CLK_SEL_STAT 0x01ac + +#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG0 0x01b0 +#define SIRFSOC_CLKC_SHARED_DIVIDER_CFG1 0x01b4 +#define SIRFSOC_CLKC_SHARED_DIVIDER_ENA 0x01b8 + +#define SIRFSOC_CLKC_SYS_CLK_SEL 0x01bc +#define SIRFSOC_CLKC_SYS_CLK_SEL_STAT 0x01c0 +#define SIRFSOC_CLKC_IO_CLK_SEL 0x01c4 +#define SIRFSOC_CLKC_IO_CLK_SEL_STAT 0x01c8 +#define SIRFSOC_CLKC_G2D_CLK_SEL 0x01cc +#define SIRFSOC_CLKC_G2D_CLK_SEL_STAT 0x01d0 +#define SIRFSOC_CLKC_JPENC_CLK_SEL 0x01d4 +#define SIRFSOC_CLKC_JPENC_CLK_SEL_STAT 0x01d8 +#define SIRFSOC_CLKC_VDEC_CLK_SEL 0x01dc +#define SIRFSOC_CLKC_VDEC_CLK_SEL_STAT 0x01e0 +#define SIRFSOC_CLKC_GMAC_CLK_SEL 0x01e4 +#define SIRFSOC_CLKC_GMAC_CLK_SEL_STAT 0x01e8 +#define SIRFSOC_CLKC_USB_CLK_SEL 0x01ec +#define SIRFSOC_CLKC_USB_CLK_SEL_STAT 0x01f0 +#define SIRFSOC_CLKC_KAS_CLK_SEL 0x01f4 +#define SIRFSOC_CLKC_KAS_CLK_SEL_STAT 0x01f8 +#define SIRFSOC_CLKC_SEC_CLK_SEL 0x01fc +#define SIRFSOC_CLKC_SEC_CLK_SEL_STAT 0x0200 +#define SIRFSOC_CLKC_SDR_CLK_SEL 0x0204 +#define SIRFSOC_CLKC_SDR_CLK_SEL_STAT 0x0208 +#define SIRFSOC_CLKC_VIP_CLK_SEL 0x020c +#define SIRFSOC_CLKC_VIP_CLK_SEL_STAT 0x0210 +#define SIRFSOC_CLKC_NOCD_CLK_SEL 0x0214 +#define SIRFSOC_CLKC_NOCD_CLK_SEL_STAT 0x0218 +#define SIRFSOC_CLKC_NOCR_CLK_SEL 0x021c +#define SIRFSOC_CLKC_NOCR_CLK_SEL_STAT 0x0220 +#define SIRFSOC_CLKC_TPIU_CLK_SEL 0x0224 +#define SIRFSOC_CLKC_TPIU_CLK_SEL_STAT 0x0228 + +#define SIRFSOC_CLKC_ROOT_CLK_EN0_SET 0x022c +#define SIRFSOC_CLKC_ROOT_CLK_EN0_CLR 0x0230 +#define SIRFSOC_CLKC_ROOT_CLK_EN0_STAT 0x0234 +#define SIRFSOC_CLKC_ROOT_CLK_EN1_SET 0x0238 +#define SIRFSOC_CLKC_ROOT_CLK_EN1_CLR 0x023c +#define SIRFSOC_CLKC_ROOT_CLK_EN1_STAT 0x0240 + +#define SIRFSOC_CLKC_LEAF_CLK_EN0_SET 0x0244 +#define SIRFSOC_CLKC_LEAF_CLK_EN0_CLR 0x0248 +#define SIRFSOC_CLKC_LEAF_CLK_EN0_STAT 0x024c + +#define SIRFSOC_CLKC_RSTC_A7_SW_RST 0x0308 + +#define SIRFSOC_CLKC_LEAF_CLK_EN1_SET 0x04a0 +#define SIRFSOC_CLKC_LEAF_CLK_EN2_SET 0x04b8 +#define SIRFSOC_CLKC_LEAF_CLK_EN3_SET 0x04d0 +#define SIRFSOC_CLKC_LEAF_CLK_EN4_SET 0x04e8 +#define SIRFSOC_CLKC_LEAF_CLK_EN5_SET 0x0500 +#define SIRFSOC_CLKC_LEAF_CLK_EN6_SET 0x0518 +#define SIRFSOC_CLKC_LEAF_CLK_EN7_SET 0x0530 +#define SIRFSOC_CLKC_LEAF_CLK_EN8_SET 0x0548 + + +static void __iomem *sirfsoc_clk_vbase; +static struct clk_onecell_data clk_data; + +static const struct clk_div_table pll_div_table[] = { + { .val = 0, .div = 1 }, + { .val = 1, .div = 2 }, + { .val = 2, .div = 4 }, + { .val = 3, .div = 8 }, + { .val = 4, .div = 16 }, + { .val = 5, .div = 32 }, +}; + +struct clk_pll { + struct clk_hw hw; + u16 regofs; /* register offset */ +}; +#define to_pllclk(_hw) container_of(_hw, struct clk_pll, hw) + +struct clk_dto { + struct clk_hw hw; + u16 inc_offset; /* dto increment offset */ + u16 src_offset; /* dto src offset */ +}; +#define to_dtoclk(_hw) container_of(_hw, struct clk_dto, hw) + +struct clk_unit { + struct clk_hw hw; + u16 regofs; + u16 bit; + spinlock_t *lock; +}; +#define to_unitclk(_hw) container_of(_hw, struct clk_unit, hw) + +struct atlas7_div_init_data { + const char *div_name; + const char *parent_name; + const char *gate_name; + unsigned long flags; + u8 divider_flags; + u8 gate_flags; + u32 div_offset; + u8 shift; + u8 width; + u32 gate_offset; + u8 gate_bit; + spinlock_t *lock; +}; + +struct atlas7_mux_init_data { + const char *mux_name; + const char * const *parent_names; + u8 parent_num; + unsigned long flags; + u8 mux_flags; + u32 mux_offset; + u8 shift; + u8 width; +}; + +struct atlas7_unit_init_data { + u32 index; + const char *unit_name; + const char *parent_name; + unsigned long flags; + u32 regofs; + u8 bit; + spinlock_t *lock; +}; + +struct atlas7_reset_desc { + const char *name; + u32 clk_ofs; + u8 clk_bit; + u32 rst_ofs; + u8 rst_bit; + spinlock_t *lock; +}; + +static DEFINE_SPINLOCK(cpupll_ctrl1_lock); +static DEFINE_SPINLOCK(mempll_ctrl1_lock); +static DEFINE_SPINLOCK(sys0pll_ctrl1_lock); +static DEFINE_SPINLOCK(sys1pll_ctrl1_lock); +static DEFINE_SPINLOCK(sys2pll_ctrl1_lock); +static DEFINE_SPINLOCK(sys3pll_ctrl1_lock); +static DEFINE_SPINLOCK(usbphy_div_lock); +static DEFINE_SPINLOCK(btss_div_lock); +static DEFINE_SPINLOCK(rgmii_div_lock); +static DEFINE_SPINLOCK(cpu_div_lock); +static DEFINE_SPINLOCK(sdphy01_div_lock); +static DEFINE_SPINLOCK(sdphy23_div_lock); +static DEFINE_SPINLOCK(sdphy45_div_lock); +static DEFINE_SPINLOCK(sdphy67_div_lock); +static DEFINE_SPINLOCK(can_div_lock); +static DEFINE_SPINLOCK(deint_div_lock); +static DEFINE_SPINLOCK(nand_div_lock); +static DEFINE_SPINLOCK(disp0_div_lock); +static DEFINE_SPINLOCK(disp1_div_lock); +static DEFINE_SPINLOCK(gpu_div_lock); +static DEFINE_SPINLOCK(gnss_div_lock); +/* gate register shared */ +static DEFINE_SPINLOCK(share_div_lock); +static DEFINE_SPINLOCK(root0_gate_lock); +static DEFINE_SPINLOCK(root1_gate_lock); +static DEFINE_SPINLOCK(leaf0_gate_lock); +static DEFINE_SPINLOCK(leaf1_gate_lock); +static DEFINE_SPINLOCK(leaf2_gate_lock); +static DEFINE_SPINLOCK(leaf3_gate_lock); +static DEFINE_SPINLOCK(leaf4_gate_lock); +static DEFINE_SPINLOCK(leaf5_gate_lock); +static DEFINE_SPINLOCK(leaf6_gate_lock); +static DEFINE_SPINLOCK(leaf7_gate_lock); +static DEFINE_SPINLOCK(leaf8_gate_lock); + +static inline unsigned long clkc_readl(unsigned reg) +{ + return readl(sirfsoc_clk_vbase + reg); +} + +static inline void clkc_writel(u32 val, unsigned reg) +{ + writel(val, sirfsoc_clk_vbase + reg); +} + +/* +* ABPLL +* integer mode: Fvco = Fin * 2 * NF / NR +* Spread Spectrum mode: Fvco = Fin * SSN / NR +* SSN = 2^24 / (256 * ((ssdiv >> ssdepth) << ssdepth) + (ssmod << ssdepth)) +*/ +static unsigned long pll_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long fin = parent_rate; + struct clk_pll *clk = to_pllclk(hw); + u64 rate; + u32 regctrl0 = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_CTRL0 - + SIRFSOC_CLKC_MEMPLL_AB_FREQ); + u32 regfreq = clkc_readl(clk->regofs); + u32 regssc = clkc_readl(clk->regofs + SIRFSOC_CLKC_MEMPLL_AB_SSC - + SIRFSOC_CLKC_MEMPLL_AB_FREQ); + u32 nr = (regfreq >> 16 & (BIT(3) - 1)) + 1; + u32 nf = (regfreq & (BIT(9) - 1)) + 1; + u32 ssdiv = regssc >> 8 & (BIT(12) - 1); + u32 ssdepth = regssc >> 20 & (BIT(2) - 1); + u32 ssmod = regssc & (BIT(8) - 1); + + if (regctrl0 & SIRFSOC_ABPLL_CTRL0_BYPASS) + return fin; + + if (regctrl0 & SIRFSOC_ABPLL_CTRL0_SSEN) { + rate = fin; + rate *= 1 << 24; + do_div(rate, (256 * ((ssdiv >> ssdepth) << ssdepth) + + (ssmod << ssdepth))); + } else { + rate = 2 * fin; + rate *= nf; + do_div(rate, nr); + } + return rate; +} + +static const struct clk_ops ab_pll_ops = { + .recalc_rate = pll_clk_recalc_rate, +}; + +static const char * const pll_clk_parents[] = { + "xin", +}; + +static struct clk_init_data clk_cpupll_init = { + .name = "cpupll_vco", + .ops = &ab_pll_ops, + .parent_names = pll_clk_parents, + .num_parents = ARRAY_SIZE(pll_clk_parents), +}; + +static struct clk_pll clk_cpupll = { + .regofs = SIRFSOC_CLKC_CPUPLL_AB_FREQ, + .hw = { + .init = &clk_cpupll_init, + }, +}; + +static struct clk_init_data clk_mempll_init = { + .name = "mempll_vco", + .ops = &ab_pll_ops, + .parent_names = pll_clk_parents, + .num_parents = ARRAY_SIZE(pll_clk_parents), +}; + +static struct clk_pll clk_mempll = { + .regofs = SIRFSOC_CLKC_MEMPLL_AB_FREQ, + .hw = { + .init = &clk_mempll_init, + }, +}; + +static struct clk_init_data clk_sys0pll_init = { + .name = "sys0pll_vco", + .ops = &ab_pll_ops, + .parent_names = pll_clk_parents, + .num_parents = ARRAY_SIZE(pll_clk_parents), +}; + +static struct clk_pll clk_sys0pll = { + .regofs = SIRFSOC_CLKC_SYS0PLL_AB_FREQ, + .hw = { + .init = &clk_sys0pll_init, + }, +}; + +static struct clk_init_data clk_sys1pll_init = { + .name = "sys1pll_vco", + .ops = &ab_pll_ops, + .parent_names = pll_clk_parents, + .num_parents = ARRAY_SIZE(pll_clk_parents), +}; + +static struct clk_pll clk_sys1pll = { + .regofs = SIRFSOC_CLKC_SYS1PLL_AB_FREQ, + .hw = { + .init = &clk_sys1pll_init, + }, +}; + +static struct clk_init_data clk_sys2pll_init = { + .name = "sys2pll_vco", + .ops = &ab_pll_ops, + .parent_names = pll_clk_parents, + .num_parents = ARRAY_SIZE(pll_clk_parents), +}; + +static struct clk_pll clk_sys2pll = { + .regofs = SIRFSOC_CLKC_SYS2PLL_AB_FREQ, + .hw = { + .init = &clk_sys2pll_init, + }, +}; + +static struct clk_init_data clk_sys3pll_init = { + .name = "sys3pll_vco", + .ops = &ab_pll_ops, + .parent_names = pll_clk_parents, + .num_parents = ARRAY_SIZE(pll_clk_parents), +}; + +static struct clk_pll clk_sys3pll = { + .regofs = SIRFSOC_CLKC_SYS3PLL_AB_FREQ, + .hw = { + .init = &clk_sys3pll_init, + }, +}; + +/* + * DTO in clkc, default enable double resolution mode + * double resolution mode:fout = fin * finc / 2^29 + * normal mode:fout = fin * finc / 2^28 + */ +static int dto_clk_is_enabled(struct clk_hw *hw) +{ + struct clk_dto *clk = to_dtoclk(hw); + int reg; + + reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC; + + return !!(clkc_readl(reg) & BIT(0)); +} + +static int dto_clk_enable(struct clk_hw *hw) +{ + u32 val, reg; + struct clk_dto *clk = to_dtoclk(hw); + + reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC; + + val = clkc_readl(reg) | BIT(0); + clkc_writel(val, reg); + return 0; +} + +static void dto_clk_disable(struct clk_hw *hw) +{ + u32 val, reg; + struct clk_dto *clk = to_dtoclk(hw); + + reg = clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_ENA - SIRFSOC_CLKC_AUDIO_DTO_SRC; + + val = clkc_readl(reg) & ~BIT(0); + clkc_writel(val, reg); +} + +static unsigned long dto_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + u64 rate = parent_rate; + struct clk_dto *clk = to_dtoclk(hw); + u32 finc = clkc_readl(clk->inc_offset); + u32 droff = clkc_readl(clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC); + + rate *= finc; + if (droff & BIT(0)) + /* Double resolution off */ + do_div(rate, 1 << 28); + else + do_div(rate, 1 << 29); + + return rate; +} + +static long dto_clk_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + u64 dividend = rate * (1 << 29); + + do_div(dividend, *parent_rate); + dividend *= *parent_rate; + do_div(dividend, 1 << 29); + + return dividend; +} + +static int dto_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + u64 dividend = rate * (1 << 29); + struct clk_dto *clk = to_dtoclk(hw); + + do_div(dividend, parent_rate); + clkc_writel(0, clk->src_offset + SIRFSOC_CLKC_AUDIO_DTO_DROFF - SIRFSOC_CLKC_AUDIO_DTO_SRC); + clkc_writel(dividend, clk->inc_offset); + + return 0; +} + +static u8 dto_clk_get_parent(struct clk_hw *hw) +{ + struct clk_dto *clk = to_dtoclk(hw); + + return clkc_readl(clk->src_offset); +} + +/* + * dto need CLK_SET_PARENT_GATE + */ +static int dto_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_dto *clk = to_dtoclk(hw); + + clkc_writel(index, clk->src_offset); + return 0; +} + +static const struct clk_ops dto_ops = { + .is_enabled = dto_clk_is_enabled, + .enable = dto_clk_enable, + .disable = dto_clk_disable, + .recalc_rate = dto_clk_recalc_rate, + .round_rate = dto_clk_round_rate, + .set_rate = dto_clk_set_rate, + .get_parent = dto_clk_get_parent, + .set_parent = dto_clk_set_parent, +}; + +/* dto parent clock as syspllvco/clk1 */ +static const char * const audiodto_clk_parents[] = { + "sys0pll_clk1", + "sys1pll_clk1", + "sys3pll_clk1", +}; + +static struct clk_init_data clk_audiodto_init = { + .name = "audio_dto", + .ops = &dto_ops, + .parent_names = audiodto_clk_parents, + .num_parents = ARRAY_SIZE(audiodto_clk_parents), +}; + +static struct clk_dto clk_audio_dto = { + .inc_offset = SIRFSOC_CLKC_AUDIO_DTO_INC, + .src_offset = SIRFSOC_CLKC_AUDIO_DTO_SRC, + .hw = { + .init = &clk_audiodto_init, + }, +}; + +static const char * const disp0dto_clk_parents[] = { + "sys0pll_clk1", + "sys1pll_clk1", + "sys3pll_clk1", +}; + +static struct clk_init_data clk_disp0dto_init = { + .name = "disp0_dto", + .ops = &dto_ops, + .parent_names = disp0dto_clk_parents, + .num_parents = ARRAY_SIZE(disp0dto_clk_parents), +}; + +static struct clk_dto clk_disp0_dto = { + .inc_offset = SIRFSOC_CLKC_DISP0_DTO_INC, + .src_offset = SIRFSOC_CLKC_DISP0_DTO_SRC, + .hw = { + .init = &clk_disp0dto_init, + }, +}; + +static const char * const disp1dto_clk_parents[] = { + "sys0pll_clk1", + "sys1pll_clk1", + "sys3pll_clk1", +}; + +static struct clk_init_data clk_disp1dto_init = { + .name = "disp1_dto", + .ops = &dto_ops, + .parent_names = disp1dto_clk_parents, + .num_parents = ARRAY_SIZE(disp1dto_clk_parents), +}; + +static struct clk_dto clk_disp1_dto = { + .inc_offset = SIRFSOC_CLKC_DISP1_DTO_INC, + .src_offset = SIRFSOC_CLKC_DISP1_DTO_SRC, + .hw = { + .init = &clk_disp1dto_init, + }, +}; + +static struct atlas7_div_init_data divider_list[] __initdata = { + /* div_name, parent_name, gate_name, clk_flag, divider_flag, gate_flag, div_offset, shift, wdith, gate_offset, bit_enable, lock */ + { "sys0pll_qa1", "sys0pll_fixdiv", "sys0pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 0, &usbphy_div_lock }, + { "sys1pll_qa1", "sys1pll_fixdiv", "sys1pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 4, &usbphy_div_lock }, + { "sys2pll_qa1", "sys2pll_fixdiv", "sys2pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 8, &usbphy_div_lock }, + { "sys3pll_qa1", "sys3pll_fixdiv", "sys3pll_a1", 0, 0, 0, SIRFSOC_CLKC_USBPHY_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_USBPHY_CLKDIV_ENA, 12, &usbphy_div_lock }, + { "sys0pll_qa2", "sys0pll_fixdiv", "sys0pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 0, &btss_div_lock }, + { "sys1pll_qa2", "sys1pll_fixdiv", "sys1pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 4, &btss_div_lock }, + { "sys2pll_qa2", "sys2pll_fixdiv", "sys2pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 8, &btss_div_lock }, + { "sys3pll_qa2", "sys3pll_fixdiv", "sys3pll_a2", 0, 0, 0, SIRFSOC_CLKC_BTSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_BTSS_CLKDIV_ENA, 12, &btss_div_lock }, + { "sys0pll_qa3", "sys0pll_fixdiv", "sys0pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 0, &rgmii_div_lock }, + { "sys1pll_qa3", "sys1pll_fixdiv", "sys1pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 4, &rgmii_div_lock }, + { "sys2pll_qa3", "sys2pll_fixdiv", "sys2pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 8, &rgmii_div_lock }, + { "sys3pll_qa3", "sys3pll_fixdiv", "sys3pll_a3", 0, 0, 0, SIRFSOC_CLKC_RGMII_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_RGMII_CLKDIV_ENA, 12, &rgmii_div_lock }, + { "sys0pll_qa4", "sys0pll_fixdiv", "sys0pll_a4", 0, 0, 0, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 0, &cpu_div_lock }, + { "sys1pll_qa4", "sys1pll_fixdiv", "sys1pll_a4", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_CPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CPU_CLKDIV_ENA, 4, &cpu_div_lock }, + { "sys0pll_qa5", "sys0pll_fixdiv", "sys0pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 0, &sdphy01_div_lock }, + { "sys1pll_qa5", "sys1pll_fixdiv", "sys1pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 4, &sdphy01_div_lock }, + { "sys2pll_qa5", "sys2pll_fixdiv", "sys2pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 8, &sdphy01_div_lock }, + { "sys3pll_qa5", "sys3pll_fixdiv", "sys3pll_a5", 0, 0, 0, SIRFSOC_CLKC_SDPHY01_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY01_CLKDIV_ENA, 12, &sdphy01_div_lock }, + { "sys0pll_qa6", "sys0pll_fixdiv", "sys0pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 0, &sdphy23_div_lock }, + { "sys1pll_qa6", "sys1pll_fixdiv", "sys1pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 4, &sdphy23_div_lock }, + { "sys2pll_qa6", "sys2pll_fixdiv", "sys2pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 8, &sdphy23_div_lock }, + { "sys3pll_qa6", "sys3pll_fixdiv", "sys3pll_a6", 0, 0, 0, SIRFSOC_CLKC_SDPHY23_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY23_CLKDIV_ENA, 12, &sdphy23_div_lock }, + { "sys0pll_qa7", "sys0pll_fixdiv", "sys0pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 0, &sdphy45_div_lock }, + { "sys1pll_qa7", "sys1pll_fixdiv", "sys1pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 4, &sdphy45_div_lock }, + { "sys2pll_qa7", "sys2pll_fixdiv", "sys2pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 8, &sdphy45_div_lock }, + { "sys3pll_qa7", "sys3pll_fixdiv", "sys3pll_a7", 0, 0, 0, SIRFSOC_CLKC_SDPHY45_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY45_CLKDIV_ENA, 12, &sdphy45_div_lock }, + { "sys0pll_qa8", "sys0pll_fixdiv", "sys0pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 0, &sdphy67_div_lock }, + { "sys1pll_qa8", "sys1pll_fixdiv", "sys1pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 4, &sdphy67_div_lock }, + { "sys2pll_qa8", "sys2pll_fixdiv", "sys2pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 8, &sdphy67_div_lock }, + { "sys3pll_qa8", "sys3pll_fixdiv", "sys3pll_a8", 0, 0, 0, SIRFSOC_CLKC_SDPHY67_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_SDPHY67_CLKDIV_ENA, 12, &sdphy67_div_lock }, + { "sys0pll_qa9", "sys0pll_fixdiv", "sys0pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 0, &can_div_lock }, + { "sys1pll_qa9", "sys1pll_fixdiv", "sys1pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 4, &can_div_lock }, + { "sys2pll_qa9", "sys2pll_fixdiv", "sys2pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 8, &can_div_lock }, + { "sys3pll_qa9", "sys3pll_fixdiv", "sys3pll_a9", 0, 0, 0, SIRFSOC_CLKC_CAN_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_CAN_CLKDIV_ENA, 12, &can_div_lock }, + { "sys0pll_qa10", "sys0pll_fixdiv", "sys0pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 0, &deint_div_lock }, + { "sys1pll_qa10", "sys1pll_fixdiv", "sys1pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 4, &deint_div_lock }, + { "sys2pll_qa10", "sys2pll_fixdiv", "sys2pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 8, &deint_div_lock }, + { "sys3pll_qa10", "sys3pll_fixdiv", "sys3pll_a10", 0, 0, 0, SIRFSOC_CLKC_DEINT_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DEINT_CLKDIV_ENA, 12, &deint_div_lock }, + { "sys0pll_qa11", "sys0pll_fixdiv", "sys0pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 0, &nand_div_lock }, + { "sys1pll_qa11", "sys1pll_fixdiv", "sys1pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 4, &nand_div_lock }, + { "sys2pll_qa11", "sys2pll_fixdiv", "sys2pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 8, &nand_div_lock }, + { "sys3pll_qa11", "sys3pll_fixdiv", "sys3pll_a11", 0, 0, 0, SIRFSOC_CLKC_NAND_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_NAND_CLKDIV_ENA, 12, &nand_div_lock }, + { "sys0pll_qa12", "sys0pll_fixdiv", "sys0pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 0, &disp0_div_lock }, + { "sys1pll_qa12", "sys1pll_fixdiv", "sys1pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 4, &disp0_div_lock }, + { "sys2pll_qa12", "sys2pll_fixdiv", "sys2pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 8, &disp0_div_lock }, + { "sys3pll_qa12", "sys3pll_fixdiv", "sys3pll_a12", 0, 0, 0, SIRFSOC_CLKC_DISP0_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP0_CLKDIV_ENA, 12, &disp0_div_lock }, + { "sys0pll_qa13", "sys0pll_fixdiv", "sys0pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 0, &disp1_div_lock }, + { "sys1pll_qa13", "sys1pll_fixdiv", "sys1pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 4, &disp1_div_lock }, + { "sys2pll_qa13", "sys2pll_fixdiv", "sys2pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 8, &disp1_div_lock }, + { "sys3pll_qa13", "sys3pll_fixdiv", "sys3pll_a13", 0, 0, 0, SIRFSOC_CLKC_DISP1_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_DISP1_CLKDIV_ENA, 12, &disp1_div_lock }, + { "sys0pll_qa14", "sys0pll_fixdiv", "sys0pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 0, &gpu_div_lock }, + { "sys1pll_qa14", "sys1pll_fixdiv", "sys1pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 4, &gpu_div_lock }, + { "sys2pll_qa14", "sys2pll_fixdiv", "sys2pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 8, &gpu_div_lock }, + { "sys3pll_qa14", "sys3pll_fixdiv", "sys3pll_a14", 0, 0, 0, SIRFSOC_CLKC_GPU_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GPU_CLKDIV_ENA, 12, &gpu_div_lock }, + { "sys0pll_qa15", "sys0pll_fixdiv", "sys0pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 0, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 0, &gnss_div_lock }, + { "sys1pll_qa15", "sys1pll_fixdiv", "sys1pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 8, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 4, &gnss_div_lock }, + { "sys2pll_qa15", "sys2pll_fixdiv", "sys2pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 16, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 8, &gnss_div_lock }, + { "sys3pll_qa15", "sys3pll_fixdiv", "sys3pll_a15", 0, 0, 0, SIRFSOC_CLKC_GNSS_CLKDIV_CFG, 24, 6, SIRFSOC_CLKC_GNSS_CLKDIV_ENA, 12, &gnss_div_lock }, + { "sys1pll_qa18", "sys1pll_fixdiv", "sys1pll_a18", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 24, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 12, &share_div_lock }, + { "sys1pll_qa19", "sys1pll_fixdiv", "sys1pll_a19", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 16, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 8, &share_div_lock }, + { "sys1pll_qa20", "sys1pll_fixdiv", "sys1pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 4, &share_div_lock }, + { "sys2pll_qa20", "sys2pll_fixdiv", "sys2pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG0, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 0, &share_div_lock }, + { "sys1pll_qa17", "sys1pll_fixdiv", "sys1pll_a17", 0, 0, CLK_IGNORE_UNUSED, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 8, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 20, &share_div_lock }, + { "sys0pll_qa20", "sys0pll_fixdiv", "sys0pll_a20", 0, 0, 0, SIRFSOC_CLKC_SHARED_DIVIDER_CFG1, 0, 6, SIRFSOC_CLKC_SHARED_DIVIDER_ENA, 16, &share_div_lock }, +}; + +static const char * const i2s_clk_parents[] = { + "xin", + "xinw", + "audio_dto", + /* "pwm_i2s01" */ +}; + +static const char * const usbphy_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a1", + "sys1pll_a1", + "sys2pll_a1", + "sys3pll_a1", +}; + +static const char * const btss_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a2", + "sys1pll_a2", + "sys2pll_a2", + "sys3pll_a2", +}; + +static const char * const rgmii_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a3", + "sys1pll_a3", + "sys2pll_a3", + "sys3pll_a3", +}; + +static const char * const cpu_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a4", + "sys1pll_a4", + "cpupll_clk1", +}; + +static const char * const sdphy01_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a5", + "sys1pll_a5", + "sys2pll_a5", + "sys3pll_a5", +}; + +static const char * const sdphy23_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a6", + "sys1pll_a6", + "sys2pll_a6", + "sys3pll_a6", +}; + +static const char * const sdphy45_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a7", + "sys1pll_a7", + "sys2pll_a7", + "sys3pll_a7", +}; + +static const char * const sdphy67_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a8", + "sys1pll_a8", + "sys2pll_a8", + "sys3pll_a8", +}; + +static const char * const can_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a9", + "sys1pll_a9", + "sys2pll_a9", + "sys3pll_a9", +}; + +static const char * const deint_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a10", + "sys1pll_a10", + "sys2pll_a10", + "sys3pll_a10", +}; + +static const char * const nand_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a11", + "sys1pll_a11", + "sys2pll_a11", + "sys3pll_a11", +}; + +static const char * const disp0_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a12", + "sys1pll_a12", + "sys2pll_a12", + "sys3pll_a12", + "disp0_dto", +}; + +static const char * const disp1_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a13", + "sys1pll_a13", + "sys2pll_a13", + "sys3pll_a13", + "disp1_dto", +}; + +static const char * const gpu_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a14", + "sys1pll_a14", + "sys2pll_a14", + "sys3pll_a14", +}; + +static const char * const gnss_clk_parents[] = { + "xin", + "xinw", + "sys0pll_a15", + "sys1pll_a15", + "sys2pll_a15", + "sys3pll_a15", +}; + +static const char * const sys_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const io_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const g2d_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const jpenc_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const vdec_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const gmac_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const usb_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const kas_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const sec_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const sdr_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const vip_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const nocd_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const nocr_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static const char * const tpiu_clk_parents[] = { + "xin", + "xinw", + "sys2pll_a20", + "sys1pll_a20", + "sys1pll_a19", + "sys1pll_a18", + "sys0pll_a20", + "sys1pll_a17", +}; + +static struct atlas7_mux_init_data mux_list[] __initdata = { + /* mux_name, parent_names, parent_num, flags, mux_flags, mux_offset, shift, width */ + { "i2s_mux", i2s_clk_parents, ARRAY_SIZE(i2s_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 2 }, + { "usbphy_mux", usbphy_clk_parents, ARRAY_SIZE(usbphy_clk_parents), 0, 0, SIRFSOC_CLKC_I2S_CLK_SEL, 0, 3 }, + { "btss_mux", btss_clk_parents, ARRAY_SIZE(btss_clk_parents), 0, 0, SIRFSOC_CLKC_BTSS_CLK_SEL, 0, 3 }, + { "rgmii_mux", rgmii_clk_parents, ARRAY_SIZE(rgmii_clk_parents), 0, 0, SIRFSOC_CLKC_RGMII_CLK_SEL, 0, 3 }, + { "cpu_mux", cpu_clk_parents, ARRAY_SIZE(cpu_clk_parents), 0, 0, SIRFSOC_CLKC_CPU_CLK_SEL, 0, 3 }, + { "sdphy01_mux", sdphy01_clk_parents, ARRAY_SIZE(sdphy01_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY01_CLK_SEL, 0, 3 }, + { "sdphy23_mux", sdphy23_clk_parents, ARRAY_SIZE(sdphy23_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY23_CLK_SEL, 0, 3 }, + { "sdphy45_mux", sdphy45_clk_parents, ARRAY_SIZE(sdphy45_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY45_CLK_SEL, 0, 3 }, + { "sdphy67_mux", sdphy67_clk_parents, ARRAY_SIZE(sdphy67_clk_parents), 0, 0, SIRFSOC_CLKC_SDPHY67_CLK_SEL, 0, 3 }, + { "can_mux", can_clk_parents, ARRAY_SIZE(can_clk_parents), 0, 0, SIRFSOC_CLKC_CAN_CLK_SEL, 0, 3 }, + { "deint_mux", deint_clk_parents, ARRAY_SIZE(deint_clk_parents), 0, 0, SIRFSOC_CLKC_DEINT_CLK_SEL, 0, 3 }, + { "nand_mux", nand_clk_parents, ARRAY_SIZE(nand_clk_parents), 0, 0, SIRFSOC_CLKC_NAND_CLK_SEL, 0, 3 }, + { "disp0_mux", disp0_clk_parents, ARRAY_SIZE(disp0_clk_parents), 0, 0, SIRFSOC_CLKC_DISP0_CLK_SEL, 0, 3 }, + { "disp1_mux", disp1_clk_parents, ARRAY_SIZE(disp1_clk_parents), 0, 0, SIRFSOC_CLKC_DISP1_CLK_SEL, 0, 3 }, + { "gpu_mux", gpu_clk_parents, ARRAY_SIZE(gpu_clk_parents), 0, 0, SIRFSOC_CLKC_GPU_CLK_SEL, 0, 3 }, + { "gnss_mux", gnss_clk_parents, ARRAY_SIZE(gnss_clk_parents), 0, 0, SIRFSOC_CLKC_GNSS_CLK_SEL, 0, 3 }, + { "sys_mux", sys_clk_parents, ARRAY_SIZE(sys_clk_parents), 0, 0, SIRFSOC_CLKC_SYS_CLK_SEL, 0, 3 }, + { "io_mux", io_clk_parents, ARRAY_SIZE(io_clk_parents), 0, 0, SIRFSOC_CLKC_IO_CLK_SEL, 0, 3 }, + { "g2d_mux", g2d_clk_parents, ARRAY_SIZE(g2d_clk_parents), 0, 0, SIRFSOC_CLKC_G2D_CLK_SEL, 0, 3 }, + { "jpenc_mux", jpenc_clk_parents, ARRAY_SIZE(jpenc_clk_parents), 0, 0, SIRFSOC_CLKC_JPENC_CLK_SEL, 0, 3 }, + { "vdec_mux", vdec_clk_parents, ARRAY_SIZE(vdec_clk_parents), 0, 0, SIRFSOC_CLKC_VDEC_CLK_SEL, 0, 3 }, + { "gmac_mux", gmac_clk_parents, ARRAY_SIZE(gmac_clk_parents), 0, 0, SIRFSOC_CLKC_GMAC_CLK_SEL, 0, 3 }, + { "usb_mux", usb_clk_parents, ARRAY_SIZE(usb_clk_parents), 0, 0, SIRFSOC_CLKC_USB_CLK_SEL, 0, 3 }, + { "kas_mux", kas_clk_parents, ARRAY_SIZE(kas_clk_parents), 0, 0, SIRFSOC_CLKC_KAS_CLK_SEL, 0, 3 }, + { "sec_mux", sec_clk_parents, ARRAY_SIZE(sec_clk_parents), 0, 0, SIRFSOC_CLKC_SEC_CLK_SEL, 0, 3 }, + { "sdr_mux", sdr_clk_parents, ARRAY_SIZE(sdr_clk_parents), 0, 0, SIRFSOC_CLKC_SDR_CLK_SEL, 0, 3 }, + { "vip_mux", vip_clk_parents, ARRAY_SIZE(vip_clk_parents), 0, 0, SIRFSOC_CLKC_VIP_CLK_SEL, 0, 3 }, + { "nocd_mux", nocd_clk_parents, ARRAY_SIZE(nocd_clk_parents), 0, 0, SIRFSOC_CLKC_NOCD_CLK_SEL, 0, 3 }, + { "nocr_mux", nocr_clk_parents, ARRAY_SIZE(nocr_clk_parents), 0, 0, SIRFSOC_CLKC_NOCR_CLK_SEL, 0, 3 }, + { "tpiu_mux", tpiu_clk_parents, ARRAY_SIZE(tpiu_clk_parents), 0, 0, SIRFSOC_CLKC_TPIU_CLK_SEL, 0, 3 }, +}; + + /* new unit should add start from the tail of list */ +static struct atlas7_unit_init_data unit_list[] __initdata = { + /* unit_name, parent_name, flags, regofs, bit, lock */ + { 0, "audmscm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 0, &root0_gate_lock }, + { 1, "gnssm_gnss", "gnss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 1, &root0_gate_lock }, + { 2, "gpum_gpu", "gpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 2, &root0_gate_lock }, + { 3, "mediam_g2d", "g2d_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 3, &root0_gate_lock }, + { 4, "mediam_jpenc", "jpenc_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 4, &root0_gate_lock }, + { 5, "vdifm_disp0", "disp0_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 5, &root0_gate_lock }, + { 6, "vdifm_disp1", "disp1_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 6, &root0_gate_lock }, + { 7, "audmscm_i2s", "i2s_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 8, &root0_gate_lock }, + { 8, "audmscm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 11, &root0_gate_lock }, + { 9, "vdifm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 12, &root0_gate_lock }, + { 10, "gnssm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 13, &root0_gate_lock }, + { 11, "mediam_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 14, &root0_gate_lock }, + { 12, "btm_io", "io_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 17, &root0_gate_lock }, + { 13, "mediam_sdphy01", "sdphy01_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 18, &root0_gate_lock }, + { 14, "vdifm_sdphy23", "sdphy23_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 19, &root0_gate_lock }, + { 15, "vdifm_sdphy45", "sdphy45_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 20, &root0_gate_lock }, + { 16, "vdifm_sdphy67", "sdphy67_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 21, &root0_gate_lock }, + { 17, "audmscm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 22, &root0_gate_lock }, + { 18, "mediam_nand", "nand_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 27, &root0_gate_lock }, + { 19, "gnssm_sec", "sec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 28, &root0_gate_lock }, + { 20, "cpum_cpu", "cpu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 29, &root0_gate_lock }, + { 21, "gnssm_xin", "xin", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 30, &root0_gate_lock }, + { 22, "vdifm_vip", "vip_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN0_SET, 31, &root0_gate_lock }, + { 23, "btm_btss", "btss_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 0, &root1_gate_lock }, + { 24, "mediam_usbphy", "usbphy_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 1, &root1_gate_lock }, + { 25, "rtcm_kas", "kas_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 2, &root1_gate_lock }, + { 26, "audmscm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 3, &root1_gate_lock }, + { 27, "vdifm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 4, &root1_gate_lock }, + { 28, "gnssm_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 5, &root1_gate_lock }, + { 29, "mediam_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 6, &root1_gate_lock }, + { 30, "cpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 8, &root1_gate_lock }, + { 31, "gpum_nocd", "nocd_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 9, &root1_gate_lock }, + { 32, "audmscm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 11, &root1_gate_lock }, + { 33, "vdifm_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 12, &root1_gate_lock }, + { 34, "gnssm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 13, &root1_gate_lock }, + { 35, "mediam_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 14, &root1_gate_lock }, + { 36, "ddrm_nocr", "nocr_mux", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 15, &root1_gate_lock }, + { 37, "cpum_tpiu", "tpiu_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 16, &root1_gate_lock }, + { 38, "gpum_nocr", "nocr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 17, &root1_gate_lock }, + { 39, "gnssm_rgmii", "rgmii_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 20, &root1_gate_lock }, + { 40, "mediam_vdec", "vdec_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 21, &root1_gate_lock }, + { 41, "gpum_sdr", "sdr_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 22, &root1_gate_lock }, + { 42, "vdifm_deint", "deint_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 23, &root1_gate_lock }, + { 43, "gnssm_can", "can_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 26, &root1_gate_lock }, + { 44, "mediam_usb", "usb_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 28, &root1_gate_lock }, + { 45, "gnssm_gmac", "gmac_mux", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 29, &root1_gate_lock }, + { 46, "cvd_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 0, &leaf1_gate_lock }, + { 47, "timer_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 1, &leaf1_gate_lock }, + { 48, "pulse_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 2, &leaf1_gate_lock }, + { 49, "tsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 3, &leaf1_gate_lock }, + { 50, "tsc_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 21, &leaf1_gate_lock }, + { 51, "ioctop_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 4, &leaf1_gate_lock }, + { 52, "rsc_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 5, &leaf1_gate_lock }, + { 53, "dvm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 6, &leaf1_gate_lock }, + { 54, "lvds_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 7, &leaf1_gate_lock }, + { 55, "kas_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 8, &leaf1_gate_lock }, + { 56, "ac97_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 9, &leaf1_gate_lock }, + { 57, "usp0_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 10, &leaf1_gate_lock }, + { 58, "usp1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 11, &leaf1_gate_lock }, + { 59, "usp2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 12, &leaf1_gate_lock }, + { 60, "dmac2_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 13, &leaf1_gate_lock }, + { 61, "dmac3_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 14, &leaf1_gate_lock }, + { 62, "audioif_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 15, &leaf1_gate_lock }, + { 63, "i2s1_kas", "audmscm_kas", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 17, &leaf1_gate_lock }, + { 64, "thaudmscm_io", "audmscm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 22, &leaf1_gate_lock }, + { 65, "analogtest_xin", "audmscm_xin", 0, SIRFSOC_CLKC_LEAF_CLK_EN1_SET, 23, &leaf1_gate_lock }, + { 66, "sys2pci_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 0, &leaf2_gate_lock }, + { 67, "pciarb_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 1, &leaf2_gate_lock }, + { 68, "pcicopy_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 2, &leaf2_gate_lock }, + { 69, "rom_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 3, &leaf2_gate_lock }, + { 70, "sdio23_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 4, &leaf2_gate_lock }, + { 71, "sdio45_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 5, &leaf2_gate_lock }, + { 72, "sdio67_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 6, &leaf2_gate_lock }, + { 73, "vip1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 7, &leaf2_gate_lock }, + { 74, "vip1_vip", "vdifm_vip", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 16, &leaf2_gate_lock }, + { 75, "sdio23_sdphy23", "vdifm_sdphy23", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 8, &leaf2_gate_lock }, + { 76, "sdio45_sdphy45", "vdifm_sdphy45", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 9, &leaf2_gate_lock }, + { 77, "sdio67_sdphy67", "vdifm_sdphy67", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 10, &leaf2_gate_lock }, + { 78, "vpp0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 11, &leaf2_gate_lock }, + { 79, "lcd0_disp0", "vdifm_disp0", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 12, &leaf2_gate_lock }, + { 80, "vpp1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 13, &leaf2_gate_lock }, + { 81, "lcd1_disp1", "vdifm_disp1", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 14, &leaf2_gate_lock }, + { 82, "dcu_deint", "vdifm_deint", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 15, &leaf2_gate_lock }, + { 83, "vdifm_dapa_r_nocr", "vdifm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 17, &leaf2_gate_lock }, + { 84, "gpio1_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 18, &leaf2_gate_lock }, + { 85, "thvdifm_io", "vdifm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN2_SET, 19, &leaf2_gate_lock }, + { 86, "gmac_rgmii", "gnssm_rgmii", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 0, &leaf3_gate_lock }, + { 87, "gmac_gmac", "gnssm_gmac", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 1, &leaf3_gate_lock }, + { 88, "uart1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 2, &leaf3_gate_lock }, + { 89, "dmac0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 3, &leaf3_gate_lock }, + { 90, "uart0_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 4, &leaf3_gate_lock }, + { 91, "uart2_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 5, &leaf3_gate_lock }, + { 92, "uart3_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 6, &leaf3_gate_lock }, + { 93, "uart4_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 7, &leaf3_gate_lock }, + { 94, "uart5_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 8, &leaf3_gate_lock }, + { 95, "spi1_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 9, &leaf3_gate_lock }, + { 96, "gnss_gnss", "gnssm_gnss", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 10, &leaf3_gate_lock }, + { 97, "canbus1_can", "gnssm_can", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 12, &leaf3_gate_lock }, + { 98, "ccsec_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 15, &leaf3_gate_lock }, + { 99, "ccpub_sec", "gnssm_sec", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 16, &leaf3_gate_lock }, + { 100, "gnssm_dapa_r_nocr", "gnssm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 13, &leaf3_gate_lock }, + { 101, "thgnssm_io", "gnssm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN3_SET, 14, &leaf3_gate_lock }, + { 102, "media_vdec", "mediam_vdec", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 0, &leaf4_gate_lock }, + { 103, "media_jpenc", "mediam_jpenc", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 1, &leaf4_gate_lock }, + { 104, "g2d_g2d", "mediam_g2d", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 2, &leaf4_gate_lock }, + { 105, "i2c0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 3, &leaf4_gate_lock }, + { 106, "i2c1_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 4, &leaf4_gate_lock }, + { 107, "gpio0_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 5, &leaf4_gate_lock }, + { 108, "nand_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 6, &leaf4_gate_lock }, + { 109, "sdio01_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 7, &leaf4_gate_lock }, + { 110, "sys2pci2_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 8, &leaf4_gate_lock }, + { 111, "sdio01_sdphy01", "mediam_sdphy01", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 9, &leaf4_gate_lock }, + { 112, "nand_nand", "mediam_nand", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 10, &leaf4_gate_lock }, + { 113, "usb0_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 11, &leaf4_gate_lock }, + { 114, "usb1_usb", "mediam_usb", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 12, &leaf4_gate_lock }, + { 115, "usbphy0_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 13, &leaf4_gate_lock }, + { 116, "usbphy1_usbphy", "mediam_usbphy", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 14, &leaf4_gate_lock }, + { 117, "thmediam_io", "mediam_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN4_SET, 15, &leaf4_gate_lock }, + { 118, "memc_mem", "mempll_clk1", CLK_IGNORE_UNUSED, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 0, &leaf5_gate_lock }, + { 119, "dapa_mem", "mempll_clk1", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 1, &leaf5_gate_lock }, + { 120, "nocddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 2, &leaf5_gate_lock }, + { 121, "thddrm_nocr", "ddrm_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN5_SET, 3, &leaf5_gate_lock }, + { 122, "spram1_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 0, &leaf6_gate_lock }, + { 123, "spram2_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 1, &leaf6_gate_lock }, + { 124, "coresight_cpudiv2", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 2, &leaf6_gate_lock }, + { 125, "thcpum_cpudiv4", "cpum_cpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN6_SET, 3, &leaf6_gate_lock }, + { 126, "graphic_gpu", "gpum_gpu", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 0, &leaf7_gate_lock }, + { 127, "vss_sdr", "gpum_sdr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 1, &leaf7_gate_lock }, + { 128, "thgpum_nocr", "gpum_nocr", 0, SIRFSOC_CLKC_LEAF_CLK_EN7_SET, 2, &leaf7_gate_lock }, + { 129, "a7ca_btss", "btm_btss", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 1, &leaf8_gate_lock }, + { 130, "dmac4_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 2, &leaf8_gate_lock }, + { 131, "uart6_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 3, &leaf8_gate_lock }, + { 132, "usp3_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 4, &leaf8_gate_lock }, + { 133, "a7ca_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 5, &leaf8_gate_lock }, + { 134, "noc_btm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 6, &leaf8_gate_lock }, + { 135, "thbtm_io", "btm_io", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 7, &leaf8_gate_lock }, + { 136, "btslow", "xinw_fixdiv_btslow", 0, SIRFSOC_CLKC_ROOT_CLK_EN1_SET, 25, &root1_gate_lock }, + { 137, "a7ca_btslow", "btslow", 0, SIRFSOC_CLKC_LEAF_CLK_EN8_SET, 0, &leaf8_gate_lock }, +}; + +static struct clk *atlas7_clks[ARRAY_SIZE(unit_list)]; + +static int unit_clk_is_enabled(struct clk_hw *hw) +{ + struct clk_unit *clk = to_unitclk(hw); + u32 reg; + + reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_STAT - SIRFSOC_CLKC_ROOT_CLK_EN0_SET; + + return !!(clkc_readl(reg) & BIT(clk->bit)); +} + +static int unit_clk_enable(struct clk_hw *hw) +{ + u32 reg; + struct clk_unit *clk = to_unitclk(hw); + unsigned long flags; + + reg = clk->regofs; + + spin_lock_irqsave(clk->lock, flags); + clkc_writel(BIT(clk->bit), reg); + spin_unlock_irqrestore(clk->lock, flags); + return 0; +} + +static void unit_clk_disable(struct clk_hw *hw) +{ + u32 reg; + struct clk_unit *clk = to_unitclk(hw); + unsigned long flags; + + reg = clk->regofs + SIRFSOC_CLKC_ROOT_CLK_EN0_CLR - SIRFSOC_CLKC_ROOT_CLK_EN0_SET; + + spin_lock_irqsave(clk->lock, flags); + clkc_writel(BIT(clk->bit), reg); + spin_unlock_irqrestore(clk->lock, flags); +} + +static const struct clk_ops unit_clk_ops = { + .is_enabled = unit_clk_is_enabled, + .enable = unit_clk_enable, + .disable = unit_clk_disable, +}; + +static struct clk * __init +atlas7_unit_clk_register(struct device *dev, const char *name, + const char * const parent_name, unsigned long flags, + u32 regofs, u8 bit, spinlock_t *lock) +{ + struct clk *clk; + struct clk_unit *unit; + struct clk_init_data init; + + unit = kzalloc(sizeof(*unit), GFP_KERNEL); + if (!unit) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.parent_names = &parent_name; + init.num_parents = 1; + init.ops = &unit_clk_ops; + init.flags = flags; + + unit->hw.init = &init; + unit->regofs = regofs; + unit->bit = bit; + unit->lock = lock; + + clk = clk_register(dev, &unit->hw); + if (IS_ERR(clk)) + kfree(unit); + + return clk; +} + +static struct atlas7_reset_desc atlas7_reset_unit[] = { + { "PWM", 0x0244, 0, 0x0320, 0, &leaf0_gate_lock }, /* 0-5 */ + { "THCGUM", 0x0244, 3, 0x0320, 1, &leaf0_gate_lock }, + { "CVD", 0x04A0, 0, 0x032C, 0, &leaf1_gate_lock }, + { "TIMER", 0x04A0, 1, 0x032C, 1, &leaf1_gate_lock }, + { "PULSEC", 0x04A0, 2, 0x032C, 2, &leaf1_gate_lock }, + { "TSC", 0x04A0, 3, 0x032C, 3, &leaf1_gate_lock }, + { "IOCTOP", 0x04A0, 4, 0x032C, 4, &leaf1_gate_lock }, /* 6-10 */ + { "RSC", 0x04A0, 5, 0x032C, 5, &leaf1_gate_lock }, + { "DVM", 0x04A0, 6, 0x032C, 6, &leaf1_gate_lock }, + { "LVDS", 0x04A0, 7, 0x032C, 7, &leaf1_gate_lock }, + { "KAS", 0x04A0, 8, 0x032C, 8, &leaf1_gate_lock }, + { "AC97", 0x04A0, 9, 0x032C, 9, &leaf1_gate_lock }, /* 11-15 */ + { "USP0", 0x04A0, 10, 0x032C, 10, &leaf1_gate_lock }, + { "USP1", 0x04A0, 11, 0x032C, 11, &leaf1_gate_lock }, + { "USP2", 0x04A0, 12, 0x032C, 12, &leaf1_gate_lock }, + { "DMAC2", 0x04A0, 13, 0x032C, 13, &leaf1_gate_lock }, + { "DMAC3", 0x04A0, 14, 0x032C, 14, &leaf1_gate_lock }, /* 16-20 */ + { "AUDIO", 0x04A0, 15, 0x032C, 15, &leaf1_gate_lock }, + { "I2S1", 0x04A0, 17, 0x032C, 16, &leaf1_gate_lock }, + { "PMU_AUDIO", 0x04A0, 22, 0x032C, 17, &leaf1_gate_lock }, + { "THAUDMSCM", 0x04A0, 23, 0x032C, 18, &leaf1_gate_lock }, + { "SYS2PCI", 0x04B8, 0, 0x0338, 0, &leaf2_gate_lock }, /* 21-25 */ + { "PCIARB", 0x04B8, 1, 0x0338, 1, &leaf2_gate_lock }, + { "PCICOPY", 0x04B8, 2, 0x0338, 2, &leaf2_gate_lock }, + { "ROM", 0x04B8, 3, 0x0338, 3, &leaf2_gate_lock }, + { "SDIO23", 0x04B8, 4, 0x0338, 4, &leaf2_gate_lock }, + { "SDIO45", 0x04B8, 5, 0x0338, 5, &leaf2_gate_lock }, /* 26-30 */ + { "SDIO67", 0x04B8, 6, 0x0338, 6, &leaf2_gate_lock }, + { "VIP1", 0x04B8, 7, 0x0338, 7, &leaf2_gate_lock }, + { "VPP0", 0x04B8, 11, 0x0338, 8, &leaf2_gate_lock }, + { "LCD0", 0x04B8, 12, 0x0338, 9, &leaf2_gate_lock }, + { "VPP1", 0x04B8, 13, 0x0338, 10, &leaf2_gate_lock }, /* 31-35 */ + { "LCD1", 0x04B8, 14, 0x0338, 11, &leaf2_gate_lock }, + { "DCU", 0x04B8, 15, 0x0338, 12, &leaf2_gate_lock }, + { "GPIO", 0x04B8, 18, 0x0338, 13, &leaf2_gate_lock }, + { "DAPA_VDIFM", 0x04B8, 17, 0x0338, 15, &leaf2_gate_lock }, + { "THVDIFM", 0x04B8, 19, 0x0338, 16, &leaf2_gate_lock }, /* 36-40 */ + { "RGMII", 0x04D0, 0, 0x0344, 0, &leaf3_gate_lock }, + { "GMAC", 0x04D0, 1, 0x0344, 1, &leaf3_gate_lock }, + { "UART1", 0x04D0, 2, 0x0344, 2, &leaf3_gate_lock }, + { "DMAC0", 0x04D0, 3, 0x0344, 3, &leaf3_gate_lock }, + { "UART0", 0x04D0, 4, 0x0344, 4, &leaf3_gate_lock }, /* 41-45 */ + { "UART2", 0x04D0, 5, 0x0344, 5, &leaf3_gate_lock }, + { "UART3", 0x04D0, 6, 0x0344, 6, &leaf3_gate_lock }, + { "UART4", 0x04D0, 7, 0x0344, 7, &leaf3_gate_lock }, + { "UART5", 0x04D0, 8, 0x0344, 8, &leaf3_gate_lock }, + { "SPI1", 0x04D0, 9, 0x0344, 9, &leaf3_gate_lock }, /* 46-50 */ + { "GNSS_SYS_M0", 0x04D0, 10, 0x0344, 10, &leaf3_gate_lock }, + { "CANBUS1", 0x04D0, 12, 0x0344, 11, &leaf3_gate_lock }, + { "CCSEC", 0x04D0, 15, 0x0344, 12, &leaf3_gate_lock }, + { "CCPUB", 0x04D0, 16, 0x0344, 13, &leaf3_gate_lock }, + { "DAPA_GNSSM", 0x04D0, 13, 0x0344, 14, &leaf3_gate_lock }, /* 51-55 */ + { "THGNSSM", 0x04D0, 14, 0x0344, 15, &leaf3_gate_lock }, + { "VDEC", 0x04E8, 0, 0x0350, 0, &leaf4_gate_lock }, + { "JPENC", 0x04E8, 1, 0x0350, 1, &leaf4_gate_lock }, + { "G2D", 0x04E8, 2, 0x0350, 2, &leaf4_gate_lock }, + { "I2C0", 0x04E8, 3, 0x0350, 3, &leaf4_gate_lock }, /* 56-60 */ + { "I2C1", 0x04E8, 4, 0x0350, 4, &leaf4_gate_lock }, + { "GPIO0", 0x04E8, 5, 0x0350, 5, &leaf4_gate_lock }, + { "NAND", 0x04E8, 6, 0x0350, 6, &leaf4_gate_lock }, + { "SDIO01", 0x04E8, 7, 0x0350, 7, &leaf4_gate_lock }, + { "SYS2PCI2", 0x04E8, 8, 0x0350, 8, &leaf4_gate_lock }, /* 61-65 */ + { "USB0", 0x04E8, 11, 0x0350, 9, &leaf4_gate_lock }, + { "USB1", 0x04E8, 12, 0x0350, 10, &leaf4_gate_lock }, + { "THMEDIAM", 0x04E8, 15, 0x0350, 11, &leaf4_gate_lock }, + { "MEMC_DDRPHY", 0x0500, 0, 0x035C, 0, &leaf5_gate_lock }, + { "MEMC_UPCTL", 0x0500, 0, 0x035C, 1, &leaf5_gate_lock }, /* 66-70 */ + { "DAPA_MEM", 0x0500, 1, 0x035C, 2, &leaf5_gate_lock }, + { "MEMC_MEMDIV", 0x0500, 0, 0x035C, 3, &leaf5_gate_lock }, + { "THDDRM", 0x0500, 3, 0x035C, 4, &leaf5_gate_lock }, + { "CORESIGHT", 0x0518, 3, 0x0368, 13, &leaf6_gate_lock }, + { "THCPUM", 0x0518, 4, 0x0368, 17, &leaf6_gate_lock }, /* 71-75 */ + { "GRAPHIC", 0x0530, 0, 0x0374, 0, &leaf7_gate_lock }, + { "VSS_SDR", 0x0530, 1, 0x0374, 1, &leaf7_gate_lock }, + { "THGPUM", 0x0530, 2, 0x0374, 2, &leaf7_gate_lock }, + { "DMAC4", 0x0548, 2, 0x0380, 1, &leaf8_gate_lock }, + { "UART6", 0x0548, 3, 0x0380, 2, &leaf8_gate_lock }, /* 76- */ + { "USP3", 0x0548, 4, 0x0380, 3, &leaf8_gate_lock }, + { "THBTM", 0x0548, 5, 0x0380, 5, &leaf8_gate_lock }, + { "A7CA", 0x0548, 1, 0x0380, 0, &leaf8_gate_lock }, + { "A7CA_APB", 0x0548, 5, 0x0380, 4, &leaf8_gate_lock }, +}; + +static int atlas7_reset_module(struct reset_controller_dev *rcdev, + unsigned long reset_idx) +{ + struct atlas7_reset_desc *reset = &atlas7_reset_unit[reset_idx]; + unsigned long flags; + + /* + * HW suggest unit reset sequence: + * assert sw reset (0) + * setting sw clk_en to if the clock was disabled before reset + * delay 16 clocks + * disable clock (sw clk_en = 0) + * de-assert reset (1) + * after this sequence, restore clock or not is decided by SW + */ + + spin_lock_irqsave(reset->lock, flags); + /* clock enable or not */ + if (clkc_readl(reset->clk_ofs + 8) & (1 << reset->clk_bit)) { + clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4); + udelay(2); + clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4); + clkc_writel(1 << reset->rst_bit, reset->rst_ofs); + /* restore clock enable */ + clkc_writel(1 << reset->clk_bit, reset->clk_ofs); + } else { + clkc_writel(1 << reset->rst_bit, reset->rst_ofs + 4); + clkc_writel(1 << reset->clk_bit, reset->clk_ofs); + udelay(2); + clkc_writel(1 << reset->clk_bit, reset->clk_ofs + 4); + clkc_writel(1 << reset->rst_bit, reset->rst_ofs); + } + spin_unlock_irqrestore(reset->lock, flags); + + return 0; +} + +static struct reset_control_ops atlas7_rst_ops = { + .reset = atlas7_reset_module, +}; + +static struct reset_controller_dev atlas7_rst_ctlr = { + .ops = &atlas7_rst_ops, + .owner = THIS_MODULE, + .of_reset_n_cells = 1, +}; + +static void __init atlas7_clk_init(struct device_node *np) +{ + struct clk *clk; + struct atlas7_div_init_data *div; + struct atlas7_mux_init_data *mux; + struct atlas7_unit_init_data *unit; + int i; + int ret; + + sirfsoc_clk_vbase = of_iomap(np, 0); + if (!sirfsoc_clk_vbase) + panic("unable to map clkc registers\n"); + + of_node_put(np); + + clk = clk_register(NULL, &clk_cpupll.hw); + BUG_ON(!clk); + clk = clk_register(NULL, &clk_mempll.hw); + BUG_ON(!clk); + clk = clk_register(NULL, &clk_sys0pll.hw); + BUG_ON(!clk); + clk = clk_register(NULL, &clk_sys1pll.hw); + BUG_ON(!clk); + clk = clk_register(NULL, &clk_sys2pll.hw); + BUG_ON(!clk); + clk = clk_register(NULL, &clk_sys3pll.hw); + BUG_ON(!clk); + + clk = clk_register_divider_table(NULL, "cpupll_div1", "cpupll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 0, 3, 0, + pll_div_table, &cpupll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "cpupll_div2", "cpupll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 4, 3, 0, + pll_div_table, &cpupll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "cpupll_div3", "cpupll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, 8, 3, 0, + pll_div_table, &cpupll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register_divider_table(NULL, "mempll_div1", "mempll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 0, 3, 0, + pll_div_table, &mempll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "mempll_div2", "mempll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 4, 3, 0, + pll_div_table, &mempll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "mempll_div3", "mempll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, 8, 3, 0, + pll_div_table, &mempll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register_divider_table(NULL, "sys0pll_div1", "sys0pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 0, 3, 0, + pll_div_table, &sys0pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys0pll_div2", "sys0pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 4, 3, 0, + pll_div_table, &sys0pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys0pll_div3", "sys0pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, 8, 3, 0, + pll_div_table, &sys0pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_fixed_factor(NULL, "sys0pll_fixdiv", "sys0pll_vco", + CLK_SET_RATE_PARENT, 1, 2); + + clk = clk_register_divider_table(NULL, "sys1pll_div1", "sys1pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 0, 3, 0, + pll_div_table, &sys1pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys1pll_div2", "sys1pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 4, 3, 0, + pll_div_table, &sys1pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys1pll_div3", "sys1pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, 8, 3, 0, + pll_div_table, &sys1pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_fixed_factor(NULL, "sys1pll_fixdiv", "sys1pll_vco", + CLK_SET_RATE_PARENT, 1, 2); + + clk = clk_register_divider_table(NULL, "sys2pll_div1", "sys2pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 0, 3, 0, + pll_div_table, &sys2pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys2pll_div2", "sys2pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 4, 3, 0, + pll_div_table, &sys2pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys2pll_div3", "sys2pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, 8, 3, 0, + pll_div_table, &sys2pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_fixed_factor(NULL, "sys2pll_fixdiv", "sys2pll_vco", + CLK_SET_RATE_PARENT, 1, 2); + + clk = clk_register_divider_table(NULL, "sys3pll_div1", "sys3pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 0, 3, 0, + pll_div_table, &sys3pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys3pll_div2", "sys3pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 4, 3, 0, + pll_div_table, &sys3pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_divider_table(NULL, "sys3pll_div3", "sys3pll_vco", 0, + sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, 8, 3, 0, + pll_div_table, &sys3pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_fixed_factor(NULL, "sys3pll_fixdiv", "sys3pll_vco", + CLK_SET_RATE_PARENT, 1, 2); + + BUG_ON(!clk); + clk = clk_register_fixed_factor(NULL, "xinw_fixdiv_btslow", "xinw", + CLK_SET_RATE_PARENT, 1, 4); + + BUG_ON(!clk); + clk = clk_register_gate(NULL, "cpupll_clk1", "cpupll_div1", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, + 12, 0, &cpupll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "cpupll_clk2", "cpupll_div2", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, + 13, 0, &cpupll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "cpupll_clk3", "cpupll_div3", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_CPUPLL_AB_CTRL1, + 14, 0, &cpupll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register_gate(NULL, "mempll_clk1", "mempll_div1", + CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, + sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, + 12, 0, &mempll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "mempll_clk2", "mempll_div2", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, + 13, 0, &mempll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "mempll_clk3", "mempll_div3", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_MEMPLL_AB_CTRL1, + 14, 0, &mempll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register_gate(NULL, "sys0pll_clk1", "sys0pll_div1", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, + 12, 0, &sys0pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys0pll_clk2", "sys0pll_div2", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, + 13, 0, &sys0pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys0pll_clk3", "sys0pll_div3", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS0PLL_AB_CTRL1, + 14, 0, &sys0pll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register_gate(NULL, "sys1pll_clk1", "sys1pll_div1", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, + 12, 0, &sys1pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys1pll_clk2", "sys1pll_div2", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, + 13, 0, &sys1pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys1pll_clk3", "sys1pll_div3", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS1PLL_AB_CTRL1, + 14, 0, &sys1pll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register_gate(NULL, "sys2pll_clk1", "sys2pll_div1", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, + 12, 0, &sys2pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys2pll_clk2", "sys2pll_div2", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, + 13, 0, &sys2pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys2pll_clk3", "sys2pll_div3", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS2PLL_AB_CTRL1, + 14, 0, &sys2pll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register_gate(NULL, "sys3pll_clk1", "sys3pll_div1", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, + 12, 0, &sys3pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys3pll_clk2", "sys3pll_div2", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, + 13, 0, &sys3pll_ctrl1_lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, "sys3pll_clk3", "sys3pll_div3", + CLK_SET_RATE_PARENT, sirfsoc_clk_vbase + SIRFSOC_CLKC_SYS3PLL_AB_CTRL1, + 14, 0, &sys3pll_ctrl1_lock); + BUG_ON(!clk); + + clk = clk_register(NULL, &clk_audio_dto.hw); + BUG_ON(!clk); + + clk = clk_register(NULL, &clk_disp0_dto.hw); + BUG_ON(!clk); + + clk = clk_register(NULL, &clk_disp1_dto.hw); + BUG_ON(!clk); + + for (i = 0; i < ARRAY_SIZE(divider_list); i++) { + div = ÷r_list[i]; + clk = clk_register_divider(NULL, div->div_name, + div->parent_name, div->divider_flags, sirfsoc_clk_vbase + div->div_offset, + div->shift, div->width, 0, div->lock); + BUG_ON(!clk); + clk = clk_register_gate(NULL, div->gate_name, div->div_name, + div->gate_flags, sirfsoc_clk_vbase + div->gate_offset, + div->gate_bit, 0, div->lock); + BUG_ON(!clk); + } + /* ignore selector status register check */ + for (i = 0; i < ARRAY_SIZE(mux_list); i++) { + mux = &mux_list[i]; + clk = clk_register_mux(NULL, mux->mux_name, mux->parent_names, + mux->parent_num, mux->flags, + sirfsoc_clk_vbase + mux->mux_offset, + mux->shift, mux->width, + mux->mux_flags, NULL); + BUG_ON(!clk); + } + + for (i = 0; i < ARRAY_SIZE(unit_list); i++) { + unit = &unit_list[i]; + atlas7_clks[i] = atlas7_unit_clk_register(NULL, unit->unit_name, unit->parent_name, + unit->flags, unit->regofs, unit->bit, unit->lock); + BUG_ON(!atlas7_clks[i]); + } + + clk_data.clks = atlas7_clks; + clk_data.clk_num = ARRAY_SIZE(unit_list); + + ret = of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + BUG_ON(ret); + + atlas7_rst_ctlr.of_node = np; + atlas7_rst_ctlr.nr_resets = ARRAY_SIZE(atlas7_reset_unit); + reset_controller_register(&atlas7_rst_ctlr); +} +CLK_OF_DECLARE(atlas7_clk, "sirf,atlas7-car", atlas7_clk_init); diff --git a/drivers/clk/sirf/clk-common.c b/drivers/clk/sirf/clk-common.c index 37af51c5f213..9fc285d784d3 100644 --- a/drivers/clk/sirf/clk-common.c +++ b/drivers/clk/sirf/clk-common.c @@ -10,8 +10,8 @@ #define KHZ 1000 #define MHZ (KHZ * KHZ) -static void *sirfsoc_clk_vbase; -static void *sirfsoc_rsc_vbase; +static void __iomem *sirfsoc_clk_vbase; +static void __iomem *sirfsoc_rsc_vbase; static struct clk_onecell_data clk_data; /* @@ -188,7 +188,7 @@ static struct clk_ops std_pll_ops = { .set_rate = pll_clk_set_rate, }; -static const char *pll_clk_parents[] = { +static const char * const pll_clk_parents[] = { "osc", }; @@ -284,7 +284,7 @@ static struct clk_hw usb_pll_clk_hw = { * clock domains - cpu, mem, sys/io, dsp, gfx */ -static const char *dmn_clk_parents[] = { +static const char * const dmn_clk_parents[] = { "rtc", "osc", "pll1", @@ -673,7 +673,7 @@ static void std_clk_disable(struct clk_hw *hw) clkc_writel(val, reg); } -static const char *std_clk_io_parents[] = { +static const char * const std_clk_io_parents[] = { "io", }; @@ -949,7 +949,7 @@ static struct clk_std clk_pulse = { }, }; -static const char *std_clk_dsp_parents[] = { +static const char * const std_clk_dsp_parents[] = { "dsp", }; @@ -981,7 +981,7 @@ static struct clk_std clk_mf = { }, }; -static const char *std_clk_sys_parents[] = { +static const char * const std_clk_sys_parents[] = { "sys", }; @@ -999,7 +999,7 @@ static struct clk_std clk_security = { }, }; -static const char *std_clk_usb_parents[] = { +static const char * const std_clk_usb_parents[] = { "usb_pll", }; diff --git a/drivers/clk/socfpga/Makefile b/drivers/clk/socfpga/Makefile index 7e2d15a0c7b8..d8bb239753a4 100644 --- a/drivers/clk/socfpga/Makefile +++ b/drivers/clk/socfpga/Makefile @@ -2,3 +2,4 @@ obj-y += clk.o obj-y += clk-gate.o obj-y += clk-pll.o obj-y += clk-periph.o +obj-y += clk-pll-a10.o clk-periph-a10.o clk-gate-a10.o diff --git a/drivers/clk/socfpga/clk-gate-a10.c b/drivers/clk/socfpga/clk-gate-a10.c new file mode 100644 index 000000000000..83c6780ff4b2 --- /dev/null +++ b/drivers/clk/socfpga/clk-gate-a10.c @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2015 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/regmap.h> + +#include "clk.h" + +#define streq(a, b) (strcmp((a), (b)) == 0) + +#define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw) + +/* SDMMC Group for System Manager defines */ +#define SYSMGR_SDMMCGRP_CTRL_OFFSET 0x28 + +static unsigned long socfpga_gate_clk_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk); + u32 div = 1, val; + + if (socfpgaclk->fixed_div) + div = socfpgaclk->fixed_div; + else if (socfpgaclk->div_reg) { + val = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; + val &= div_mask(socfpgaclk->width); + div = (1 << val); + } + + return parent_rate / div; +} + +static int socfpga_clk_prepare(struct clk_hw *hwclk) +{ + struct socfpga_gate_clk *socfpgaclk = to_socfpga_gate_clk(hwclk); + int i; + u32 hs_timing; + u32 clk_phase[2]; + + if (socfpgaclk->clk_phase[0] || socfpgaclk->clk_phase[1]) { + for (i = 0; i < ARRAY_SIZE(clk_phase); i++) { + switch (socfpgaclk->clk_phase[i]) { + case 0: + clk_phase[i] = 0; + break; + case 45: + clk_phase[i] = 1; + break; + case 90: + clk_phase[i] = 2; + break; + case 135: + clk_phase[i] = 3; + break; + case 180: + clk_phase[i] = 4; + break; + case 225: + clk_phase[i] = 5; + break; + case 270: + clk_phase[i] = 6; + break; + case 315: + clk_phase[i] = 7; + break; + default: + clk_phase[i] = 0; + break; + } + } + + hs_timing = SYSMGR_SDMMC_CTRL_SET(clk_phase[0], clk_phase[1]); + if (!IS_ERR(socfpgaclk->sys_mgr_base_addr)) + regmap_write(socfpgaclk->sys_mgr_base_addr, + SYSMGR_SDMMCGRP_CTRL_OFFSET, hs_timing); + else + pr_err("%s: cannot set clk_phase because sys_mgr_base_addr is not available!\n", + __func__); + } + return 0; +} + +static struct clk_ops gateclk_ops = { + .prepare = socfpga_clk_prepare, + .recalc_rate = socfpga_gate_clk_recalc_rate, +}; + +static void __init __socfpga_gate_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 clk_gate[2]; + u32 div_reg[3]; + u32 clk_phase[2]; + u32 fixed_div; + struct clk *clk; + struct socfpga_gate_clk *socfpga_clk; + const char *clk_name = node->name; + const char *parent_name[SOCFPGA_MAX_PARENTS]; + struct clk_init_data init; + int rc; + int i = 0; + + socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); + if (WARN_ON(!socfpga_clk)) + return; + + rc = of_property_read_u32_array(node, "clk-gate", clk_gate, 2); + if (rc) + clk_gate[0] = 0; + + if (clk_gate[0]) { + socfpga_clk->hw.reg = clk_mgr_a10_base_addr + clk_gate[0]; + socfpga_clk->hw.bit_idx = clk_gate[1]; + + gateclk_ops.enable = clk_gate_ops.enable; + gateclk_ops.disable = clk_gate_ops.disable; + } + + rc = of_property_read_u32(node, "fixed-divider", &fixed_div); + if (rc) + socfpga_clk->fixed_div = 0; + else + socfpga_clk->fixed_div = fixed_div; + + rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); + if (!rc) { + socfpga_clk->div_reg = clk_mgr_a10_base_addr + div_reg[0]; + socfpga_clk->shift = div_reg[1]; + socfpga_clk->width = div_reg[2]; + } else { + socfpga_clk->div_reg = NULL; + } + + rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2); + if (!rc) { + socfpga_clk->clk_phase[0] = clk_phase[0]; + socfpga_clk->clk_phase[1] = clk_phase[1]; + + socfpga_clk->sys_mgr_base_addr = + syscon_regmap_lookup_by_compatible("altr,sys-mgr"); + if (IS_ERR(socfpga_clk->sys_mgr_base_addr)) { + pr_err("%s: failed to find altr,sys-mgr regmap!\n", + __func__); + return; + } + } + + of_property_read_string(node, "clock-output-names", &clk_name); + + init.name = clk_name; + init.ops = ops; + init.flags = 0; + while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] = + of_clk_get_parent_name(node, i)) != NULL) + i++; + + init.parent_names = parent_name; + init.num_parents = i; + socfpga_clk->hw.hw.init = &init; + + clk = clk_register(NULL, &socfpga_clk->hw.hw); + if (WARN_ON(IS_ERR(clk))) { + kfree(socfpga_clk); + return; + } + rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); + if (WARN_ON(rc)) + return; +} + +void __init socfpga_a10_gate_init(struct device_node *node) +{ + __socfpga_gate_init(node, &gateclk_ops); +} diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c index dd3a78c64795..82449cd76fd7 100644 --- a/drivers/clk/socfpga/clk-gate.c +++ b/drivers/clk/socfpga/clk-gate.c @@ -32,14 +32,10 @@ #define SOCFPGA_MMC_CLK "sdmmc_clk" #define SOCFPGA_GPIO_DB_CLK_OFFSET 0xA8 -#define streq(a, b) (strcmp((a), (b)) == 0) - #define to_socfpga_gate_clk(p) container_of(p, struct socfpga_gate_clk, hw.hw) /* SDMMC Group for System Manager defines */ #define SYSMGR_SDMMCGRP_CTRL_OFFSET 0x108 -#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \ - ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0)) static u8 socfpga_clk_get_parent(struct clk_hw *hwclk) { @@ -194,7 +190,6 @@ static void __init __socfpga_gate_init(struct device_node *node, const char *parent_name[SOCFPGA_MAX_PARENTS]; struct clk_init_data init; int rc; - int i = 0; socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); if (WARN_ON(!socfpga_clk)) @@ -224,7 +219,7 @@ static void __init __socfpga_gate_init(struct device_node *node, socfpga_clk->shift = div_reg[1]; socfpga_clk->width = div_reg[2]; } else { - socfpga_clk->div_reg = 0; + socfpga_clk->div_reg = NULL; } rc = of_property_read_u32_array(node, "clk-phase", clk_phase, 2); @@ -238,12 +233,9 @@ static void __init __socfpga_gate_init(struct device_node *node, init.name = clk_name; init.ops = ops; init.flags = 0; - while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] = - of_clk_get_parent_name(node, i)) != NULL) - i++; + init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS); init.parent_names = parent_name; - init.num_parents = i; socfpga_clk->hw.hw.init = &init; clk = clk_register(NULL, &socfpga_clk->hw.hw); diff --git a/drivers/clk/socfpga/clk-periph-a10.c b/drivers/clk/socfpga/clk-periph-a10.c new file mode 100644 index 000000000000..9d0181b5a6a4 --- /dev/null +++ b/drivers/clk/socfpga/clk-periph-a10.c @@ -0,0 +1,138 @@ +/* + * Copyright (C) 2015 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> + +#include "clk.h" + +#define CLK_MGR_FREE_SHIFT 16 +#define CLK_MGR_FREE_MASK 0x7 + +#define SOCFPGA_MPU_FREE_CLK "mpu_free_clk" +#define SOCFPGA_NOC_FREE_CLK "noc_free_clk" +#define SOCFPGA_SDMMC_FREE_CLK "sdmmc_free_clk" +#define to_socfpga_periph_clk(p) container_of(p, struct socfpga_periph_clk, hw.hw) + +static unsigned long clk_periclk_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk); + u32 div; + + if (socfpgaclk->fixed_div) { + div = socfpgaclk->fixed_div; + } else if (socfpgaclk->div_reg) { + div = readl(socfpgaclk->div_reg) >> socfpgaclk->shift; + div &= div_mask(socfpgaclk->width); + div += 1; + } else { + div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1); + } + + return parent_rate / div; +} + +static u8 clk_periclk_get_parent(struct clk_hw *hwclk) +{ + struct socfpga_periph_clk *socfpgaclk = to_socfpga_periph_clk(hwclk); + u32 clk_src; + + clk_src = readl(socfpgaclk->hw.reg); + if (streq(hwclk->init->name, SOCFPGA_MPU_FREE_CLK) || + streq(hwclk->init->name, SOCFPGA_NOC_FREE_CLK) || + streq(hwclk->init->name, SOCFPGA_SDMMC_FREE_CLK)) + return (clk_src >> CLK_MGR_FREE_SHIFT) & + CLK_MGR_FREE_MASK; + else + return 0; +} + +static const struct clk_ops periclk_ops = { + .recalc_rate = clk_periclk_recalc_rate, + .get_parent = clk_periclk_get_parent, +}; + +static __init void __socfpga_periph_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 reg; + struct clk *clk; + struct socfpga_periph_clk *periph_clk; + const char *clk_name = node->name; + const char *parent_name; + struct clk_init_data init; + int rc; + u32 fixed_div; + u32 div_reg[3]; + + of_property_read_u32(node, "reg", ®); + + periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL); + if (WARN_ON(!periph_clk)) + return; + + periph_clk->hw.reg = clk_mgr_a10_base_addr + reg; + + rc = of_property_read_u32_array(node, "div-reg", div_reg, 3); + if (!rc) { + periph_clk->div_reg = clk_mgr_a10_base_addr + div_reg[0]; + periph_clk->shift = div_reg[1]; + periph_clk->width = div_reg[2]; + } else { + periph_clk->div_reg = NULL; + } + + rc = of_property_read_u32(node, "fixed-divider", &fixed_div); + if (rc) + periph_clk->fixed_div = 0; + else + periph_clk->fixed_div = fixed_div; + + of_property_read_string(node, "clock-output-names", &clk_name); + + init.name = clk_name; + init.ops = ops; + init.flags = 0; + + parent_name = of_clk_get_parent_name(node, 0); + init.num_parents = 1; + init.parent_names = &parent_name; + + periph_clk->hw.hw.init = &init; + + clk = clk_register(NULL, &periph_clk->hw.hw); + if (WARN_ON(IS_ERR(clk))) { + kfree(periph_clk); + return; + } + rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); + if (rc < 0) { + pr_err("Could not register clock provider for node:%s\n", + clk_name); + goto err_clk; + } + + return; + +err_clk: + clk_unregister(clk); +} + +void __init socfpga_a10_periph_init(struct device_node *node) +{ + __socfpga_periph_init(node, &periclk_ops); +} diff --git a/drivers/clk/socfpga/clk-periph.c b/drivers/clk/socfpga/clk-periph.c index 46531c34ec9b..83aeaa219d14 100644 --- a/drivers/clk/socfpga/clk-periph.c +++ b/drivers/clk/socfpga/clk-periph.c @@ -76,7 +76,7 @@ static __init void __socfpga_periph_init(struct device_node *node, periph_clk->shift = div_reg[1]; periph_clk->width = div_reg[2]; } else { - periph_clk->div_reg = 0; + periph_clk->div_reg = NULL; } rc = of_property_read_u32(node, "fixed-divider", &fixed_div); diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c new file mode 100644 index 000000000000..1178b11babca --- /dev/null +++ b/drivers/clk/socfpga/clk-pll-a10.c @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2015 Altera Corporation. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#include "clk.h" + +/* Clock Manager offsets */ +#define CLK_MGR_PLL_CLK_SRC_SHIFT 8 +#define CLK_MGR_PLL_CLK_SRC_MASK 0x3 + +/* Clock bypass bits */ +#define SOCFPGA_PLL_BG_PWRDWN 0 +#define SOCFPGA_PLL_PWR_DOWN 1 +#define SOCFPGA_PLL_EXT_ENA 2 +#define SOCFPGA_PLL_DIVF_MASK 0x00001FFF +#define SOCFPGA_PLL_DIVF_SHIFT 0 +#define SOCFPGA_PLL_DIVQ_MASK 0x003F0000 +#define SOCFPGA_PLL_DIVQ_SHIFT 16 +#define SOCFGPA_MAX_PARENTS 5 + +#define SOCFPGA_MAIN_PLL_CLK "main_pll" +#define SOCFPGA_PERIP_PLL_CLK "periph_pll" + +#define to_socfpga_clk(p) container_of(p, struct socfpga_pll, hw.hw) + +void __iomem *clk_mgr_a10_base_addr; + +static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, + unsigned long parent_rate) +{ + struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk); + unsigned long divf, divq, reg; + unsigned long long vco_freq; + + /* read VCO1 reg for numerator and denominator */ + reg = readl(socfpgaclk->hw.reg + 0x4); + divf = (reg & SOCFPGA_PLL_DIVF_MASK) >> SOCFPGA_PLL_DIVF_SHIFT; + divq = (reg & SOCFPGA_PLL_DIVQ_MASK) >> SOCFPGA_PLL_DIVQ_SHIFT; + vco_freq = (unsigned long long)parent_rate * (divf + 1); + do_div(vco_freq, (1 + divq)); + return (unsigned long)vco_freq; +} + +static u8 clk_pll_get_parent(struct clk_hw *hwclk) +{ + struct socfpga_pll *socfpgaclk = to_socfpga_clk(hwclk); + u32 pll_src; + + pll_src = readl(socfpgaclk->hw.reg); + + return (pll_src >> CLK_MGR_PLL_CLK_SRC_SHIFT) & + CLK_MGR_PLL_CLK_SRC_MASK; +} + +static struct clk_ops clk_pll_ops = { + .recalc_rate = clk_pll_recalc_rate, + .get_parent = clk_pll_get_parent, +}; + +static struct __init clk * __socfpga_pll_init(struct device_node *node, + const struct clk_ops *ops) +{ + u32 reg; + struct clk *clk; + struct socfpga_pll *pll_clk; + const char *clk_name = node->name; + const char *parent_name[SOCFGPA_MAX_PARENTS]; + struct clk_init_data init; + struct device_node *clkmgr_np; + int rc; + int i = 0; + + of_property_read_u32(node, "reg", ®); + + pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); + if (WARN_ON(!pll_clk)) + return NULL; + + clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr"); + clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0); + BUG_ON(!clk_mgr_a10_base_addr); + pll_clk->hw.reg = clk_mgr_a10_base_addr + reg; + + of_property_read_string(node, "clock-output-names", &clk_name); + + init.name = clk_name; + init.ops = ops; + init.flags = 0; + + while (i < SOCFGPA_MAX_PARENTS && (parent_name[i] = + of_clk_get_parent_name(node, i)) != NULL) + i++; + init.num_parents = i; + init.parent_names = parent_name; + pll_clk->hw.hw.init = &init; + + pll_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA; + clk_pll_ops.enable = clk_gate_ops.enable; + clk_pll_ops.disable = clk_gate_ops.disable; + + clk = clk_register(NULL, &pll_clk->hw.hw); + if (WARN_ON(IS_ERR(clk))) { + kfree(pll_clk); + return NULL; + } + rc = of_clk_add_provider(node, of_clk_src_simple_get, clk); + return clk; +} + +void __init socfpga_a10_pll_init(struct device_node *node) +{ + __socfpga_pll_init(node, &clk_pll_ops); +} diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c index de6da957a09d..8f26b5234947 100644 --- a/drivers/clk/socfpga/clk-pll.c +++ b/drivers/clk/socfpga/clk-pll.c @@ -92,7 +92,6 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, struct clk_init_data init; struct device_node *clkmgr_np; int rc; - int i = 0; of_property_read_u32(node, "reg", ®); @@ -111,11 +110,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node, init.ops = ops; init.flags = 0; - while (i < SOCFPGA_MAX_PARENTS && (parent_name[i] = - of_clk_get_parent_name(node, i)) != NULL) - i++; - - init.num_parents = i; + init.num_parents = of_clk_parent_fill(node, parent_name, SOCFPGA_MAX_PARENTS); init.parent_names = parent_name; pll_clk->hw.hw.init = &init; diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c index 43db947e5f0e..7564d2e35f32 100644 --- a/drivers/clk/socfpga/clk.c +++ b/drivers/clk/socfpga/clk.c @@ -24,4 +24,9 @@ CLK_OF_DECLARE(socfpga_pll_clk, "altr,socfpga-pll-clock", socfpga_pll_init); CLK_OF_DECLARE(socfpga_perip_clk, "altr,socfpga-perip-clk", socfpga_periph_init); CLK_OF_DECLARE(socfpga_gate_clk, "altr,socfpga-gate-clk", socfpga_gate_init); - +CLK_OF_DECLARE(socfpga_a10_pll_clk, "altr,socfpga-a10-pll-clock", + socfpga_a10_pll_init); +CLK_OF_DECLARE(socfpga_a10_perip_clk, "altr,socfpga-a10-perip-clk", + socfpga_a10_periph_init); +CLK_OF_DECLARE(socfpga_a10_gate_clk, "altr,socfpga-a10-gate-clk", + socfpga_a10_gate_init); diff --git a/drivers/clk/socfpga/clk.h b/drivers/clk/socfpga/clk.h index d291f60c46e1..603973ab7e29 100644 --- a/drivers/clk/socfpga/clk.h +++ b/drivers/clk/socfpga/clk.h @@ -26,14 +26,22 @@ #define CLKMGR_L4SRC 0x70 #define CLKMGR_PERPLL_SRC 0xAC -#define SOCFPGA_MAX_PARENTS 3 +#define SOCFPGA_MAX_PARENTS 5 #define div_mask(width) ((1 << (width)) - 1) +#define streq(a, b) (strcmp((a), (b)) == 0) +#define SYSMGR_SDMMC_CTRL_SET(smplsel, drvsel) \ + ((((smplsel) & 0x7) << 3) | (((drvsel) & 0x7) << 0)) + extern void __iomem *clk_mgr_base_addr; +extern void __iomem *clk_mgr_a10_base_addr; void __init socfpga_pll_init(struct device_node *node); void __init socfpga_periph_init(struct device_node *node); void __init socfpga_gate_init(struct device_node *node); +void socfpga_a10_pll_init(struct device_node *node); +void socfpga_a10_periph_init(struct device_node *node); +void socfpga_a10_gate_init(struct device_node *node); struct socfpga_pll { struct clk_gate hw; @@ -44,6 +52,7 @@ struct socfpga_gate_clk { char *parent_name; u32 fixed_div; void __iomem *div_reg; + struct regmap *sys_mgr_base_addr; u32 width; /* only valid if div_reg != 0 */ u32 shift; /* only valid if div_reg != 0 */ u32 clk_phase[2]; diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index bf12a25eb3a2..657ca14ba709 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -116,7 +116,7 @@ static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate, return *prate / div; } -unsigned long flexgen_recalc_rate(struct clk_hw *hw, +static unsigned long flexgen_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct flexgen *flexgen = to_flexgen(hw); @@ -174,7 +174,7 @@ static const struct clk_ops flexgen_ops = { .set_rate = flexgen_set_rate, }; -struct clk *clk_register_flexgen(const char *name, +static struct clk *clk_register_flexgen(const char *name, const char **parent_names, u8 num_parents, void __iomem *reg, spinlock_t *lock, u32 idx, unsigned long flexgen_flags) { @@ -245,7 +245,7 @@ static const char ** __init flexgen_get_parents(struct device_node *np, const char **parents; int nparents, i; - nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + nparents = of_clk_get_parent_count(np); if (WARN_ON(nparents <= 0)) return NULL; @@ -260,7 +260,7 @@ static const char ** __init flexgen_get_parents(struct device_node *np, return parents; } -void __init st_of_flexgen_setup(struct device_node *np) +static void __init st_of_flexgen_setup(struct device_node *np) { struct device_node *pnode; void __iomem *reg; diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c index a917c4c7eaa9..e94197f04b0b 100644 --- a/drivers/clk/st/clkgen-fsyn.c +++ b/drivers/clk/st/clkgen-fsyn.c @@ -492,7 +492,7 @@ static int quadfs_pll_is_enabled(struct clk_hw *hw) return !!npda; } -int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, +static int clk_fs660c32_vco_get_rate(unsigned long input, struct stm_fs *fs, unsigned long *rate) { unsigned long nd = fs->ndiv + 16; /* ndiv value */ @@ -519,7 +519,7 @@ static unsigned long quadfs_pll_fs660c32_recalc_rate(struct clk_hw *hw, return rate; } -int clk_fs660c32_vco_get_params(unsigned long input, +static int clk_fs660c32_vco_get_params(unsigned long input, unsigned long output, struct stm_fs *fs) { /* Formula diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c index fdcff10f6d30..4fbe6e099587 100644 --- a/drivers/clk/st/clkgen-mux.c +++ b/drivers/clk/st/clkgen-mux.c @@ -26,7 +26,7 @@ static const char ** __init clkgen_mux_get_parents(struct device_node *np, const char **parents; int nparents, i; - nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); + nparents = of_clk_get_parent_count(np); if (WARN_ON(nparents <= 0)) return ERR_PTR(-EINVAL); @@ -131,7 +131,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw) return (s8)clk_mux_ops.get_parent(mux_hw) > 0; } -u8 clkgena_divmux_get_parent(struct clk_hw *hw) +static u8 clkgena_divmux_get_parent(struct clk_hw *hw) { struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *mux_hw = &genamux->mux.hw; @@ -168,7 +168,7 @@ static int clkgena_divmux_set_parent(struct clk_hw *hw, u8 index) return 0; } -unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, +static unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clkgena_divmux *genamux = to_clkgena_divmux(hw); @@ -215,7 +215,7 @@ static const struct clk_ops clkgena_divmux_ops = { /** * clk_register_genamux - register a genamux clock with the clock framework */ -struct clk *clk_register_genamux(const char *name, +static struct clk *clk_register_genamux(const char *name, const char **parent_names, u8 num_parents, void __iomem *reg, const struct clkgena_divmux_data *muxdata, @@ -385,7 +385,7 @@ static void __iomem * __init clkgen_get_register_base( return reg; } -void __init st_of_clkgena_divmux_setup(struct device_node *np) +static void __init st_of_clkgena_divmux_setup(struct device_node *np) { const struct of_device_id *match; const struct clkgena_divmux_data *data; @@ -485,7 +485,7 @@ static const struct of_device_id clkgena_prediv_of_match[] = { {} }; -void __init st_of_clkgena_prediv_setup(struct device_node *np) +static void __init st_of_clkgena_prediv_setup(struct device_node *np) { const struct of_device_id *match; void __iomem *reg; @@ -622,7 +622,7 @@ static const struct of_device_id mux_of_match[] = { {} }; -void __init st_of_clkgen_mux_setup(struct device_node *np) +static void __init st_of_clkgen_mux_setup(struct device_node *np) { const struct of_device_id *match; struct clk *clk; @@ -699,7 +699,7 @@ static const struct of_device_id vcc_of_match[] = { {} }; -void __init st_of_clkgen_vcc_setup(struct device_node *np) +static void __init st_of_clkgen_vcc_setup(struct device_node *np) { const struct of_device_id *match; void __iomem *reg; diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c index d204ba85db3a..106532207213 100644 --- a/drivers/clk/st/clkgen-pll.c +++ b/drivers/clk/st/clkgen-pll.c @@ -270,7 +270,7 @@ static int clkgen_pll_is_enabled(struct clk_hw *hw) return !poweroff; } -unsigned long recalc_stm_pll800c65(struct clk_hw *hw, +static unsigned long recalc_stm_pll800c65(struct clk_hw *hw, unsigned long parent_rate) { struct clkgen_pll *pll = to_clkgen_pll(hw); @@ -297,7 +297,7 @@ unsigned long recalc_stm_pll800c65(struct clk_hw *hw, } -unsigned long recalc_stm_pll1600c65(struct clk_hw *hw, +static unsigned long recalc_stm_pll1600c65(struct clk_hw *hw, unsigned long parent_rate) { struct clkgen_pll *pll = to_clkgen_pll(hw); @@ -321,7 +321,7 @@ unsigned long recalc_stm_pll1600c65(struct clk_hw *hw, return rate; } -unsigned long recalc_stm_pll3200c32(struct clk_hw *hw, +static unsigned long recalc_stm_pll3200c32(struct clk_hw *hw, unsigned long parent_rate) { struct clkgen_pll *pll = to_clkgen_pll(hw); @@ -343,7 +343,7 @@ unsigned long recalc_stm_pll3200c32(struct clk_hw *hw, return rate; } -unsigned long recalc_stm_pll1200c32(struct clk_hw *hw, +static unsigned long recalc_stm_pll1200c32(struct clk_hw *hw, unsigned long parent_rate) { struct clkgen_pll *pll = to_clkgen_pll(hw); @@ -544,7 +544,7 @@ CLK_OF_DECLARE(clkgena_c65_plls, "st,clkgena-plls-c65", clkgena_c65_pll_setup); static struct clk * __init clkgen_odf_register(const char *parent_name, - void * __iomem reg, + void __iomem *reg, struct clkgen_pll_data *pll_data, int odf, spinlock_t *odf_lock, diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index ec8f5a1fca09..9d028aec58e5 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -128,7 +128,7 @@ static struct platform_driver sun4i_a10_mod0_clk_driver = { }, .probe = sun4i_a10_mod0_clk_probe, }; -module_platform_driver(sun4i_a10_mod0_clk_driver); +builtin_platform_driver(sun4i_a10_mod0_clk_driver); static const struct factors_data sun9i_a80_mod0_data __initconst = { .enable = 31, diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c index d8da77d72861..887f4ea161bb 100644 --- a/drivers/clk/sunxi/clk-sun9i-core.c +++ b/drivers/clk/sunxi/clk-sun9i-core.c @@ -93,7 +93,7 @@ static void __init sun9i_a80_pll4_setup(struct device_node *node) void __iomem *reg; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!reg) { + if (IS_ERR(reg)) { pr_err("Could not get registers for a80-pll4-clk: %s\n", node->name); return; @@ -154,7 +154,7 @@ static void __init sun9i_a80_gt_setup(struct device_node *node) struct clk *gt; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!reg) { + if (IS_ERR(reg)) { pr_err("Could not get registers for a80-gt-clk: %s\n", node->name); return; @@ -218,7 +218,7 @@ static void __init sun9i_a80_ahb_setup(struct device_node *node) void __iomem *reg; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!reg) { + if (IS_ERR(reg)) { pr_err("Could not get registers for a80-ahb-clk: %s\n", node->name); return; @@ -244,7 +244,7 @@ static void __init sun9i_a80_apb0_setup(struct device_node *node) void __iomem *reg; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!reg) { + if (IS_ERR(reg)) { pr_err("Could not get registers for a80-apb0-clk: %s\n", node->name); return; @@ -310,7 +310,7 @@ static void __init sun9i_a80_apb1_setup(struct device_node *node) void __iomem *reg; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); - if (!reg) { + if (IS_ERR(reg)) { pr_err("Could not get registers for a80-apb1-clk: %s\n", node->name); return; diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 7e1e2bd189b6..9a82f17d2d73 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -198,6 +198,8 @@ static void __init sun6i_ahb1_clk_setup(struct device_node *node) int i = 0; reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) + return; /* we have a mux, we will have >1 parents */ while (i < SUN6I_AHB1_MAX_PARENTS && diff --git a/drivers/clk/sunxi/clk-usb.c b/drivers/clk/sunxi/clk-usb.c index a86ed2f8d7af..3a25f9588e67 100644 --- a/drivers/clk/sunxi/clk-usb.c +++ b/drivers/clk/sunxi/clk-usb.c @@ -204,6 +204,17 @@ static void __init sun6i_a31_usb_setup(struct device_node *node) } CLK_OF_DECLARE(sun6i_a31_usb, "allwinner,sun6i-a31-usb-clk", sun6i_a31_usb_setup); +static const struct usb_clk_data sun8i_a23_usb_clk_data __initconst = { + .clk_mask = BIT(16) | BIT(11) | BIT(10) | BIT(9) | BIT(8), + .reset_mask = BIT(2) | BIT(1) | BIT(0), +}; + +static void __init sun8i_a23_usb_setup(struct device_node *node) +{ + sunxi_usb_clk_setup(node, &sun8i_a23_usb_clk_data, &sun4i_a10_usb_lock); +} +CLK_OF_DECLARE(sun8i_a23_usb, "allwinner,sun8i-a23-usb-clk", sun8i_a23_usb_setup); + static const struct usb_clk_data sun9i_a80_usb_mod_data __initconst = { .clk_mask = BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1), .reset_mask = BIT(19) | BIT(18) | BIT(17), diff --git a/drivers/clk/tegra/Kconfig b/drivers/clk/tegra/Kconfig new file mode 100644 index 000000000000..1ba30d1e14f2 --- /dev/null +++ b/drivers/clk/tegra/Kconfig @@ -0,0 +1,3 @@ +config TEGRA_CLK_EMC + def_bool y + depends on TEGRA124_EMC diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index edb8358fa6ce..aec862ba7a17 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -11,6 +11,7 @@ obj-y += clk-tegra-periph.o obj-y += clk-tegra-pmc.o obj-y += clk-tegra-fixed.o obj-y += clk-tegra-super-gen4.o +obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c new file mode 100644 index 000000000000..7649685c86bc --- /dev/null +++ b/drivers/clk/tegra/clk-emc.c @@ -0,0 +1,538 @@ +/* + * drivers/clk/tegra/clk-emc.c + * + * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. + * + * Author: + * Mikko Perttunen <[email protected]> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk-provider.h> +#include <linux/clk.h> +#include <linux/clkdev.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/sort.h> +#include <linux/string.h> + +#include <soc/tegra/fuse.h> +#include <soc/tegra/emc.h> + +#include "clk.h" + +#define CLK_SOURCE_EMC 0x19c + +#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0 +#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff +#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \ + CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT) + +#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29 +#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7 +#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \ + CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT) + +static const char * const emc_parent_clk_names[] = { + "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", + "pll_c2", "pll_c3", "pll_c_ud" +}; + +/* + * List of clock sources for various parents the EMC clock can have. + * When we change the timing to a timing with a parent that has the same + * clock source as the current parent, we must first change to a backup + * timing that has a different clock source. + */ + +#define EMC_SRC_PLL_M 0 +#define EMC_SRC_PLL_C 1 +#define EMC_SRC_PLL_P 2 +#define EMC_SRC_CLK_M 3 +#define EMC_SRC_PLL_C2 4 +#define EMC_SRC_PLL_C3 5 + +static const char emc_parent_clk_sources[] = { + EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M, + EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C +}; + +struct emc_timing { + unsigned long rate, parent_rate; + u8 parent_index; + struct clk *parent; + u32 ram_code; +}; + +struct tegra_clk_emc { + struct clk_hw hw; + void __iomem *clk_regs; + struct clk *prev_parent; + bool changing_timing; + + struct device_node *emc_node; + struct tegra_emc *emc; + + int num_timings; + struct emc_timing *timings; + spinlock_t *lock; +}; + +/* Common clock framework callback implementations */ + +static unsigned long emc_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct tegra_clk_emc *tegra; + u32 val, div; + + tegra = container_of(hw, struct tegra_clk_emc, hw); + + /* + * CCF wrongly assumes that the parent won't change during set_rate, + * so get the parent rate explicitly. + */ + parent_rate = __clk_get_rate(__clk_get_parent(hw->clk)); + + val = readl(tegra->clk_regs + CLK_SOURCE_EMC); + div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK; + + return parent_rate / (div + 2) * 2; +} + +/* + * Rounds up unless no higher rate exists, in which case down. This way is + * safer since things have EMC rate floors. Also don't touch parent_rate + * since we don't want the CCF to play with our parent clocks. + */ +static long emc_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_hw) +{ + struct tegra_clk_emc *tegra; + u8 ram_code = tegra_read_ram_code(); + struct emc_timing *timing = NULL; + int i; + + tegra = container_of(hw, struct tegra_clk_emc, hw); + + for (i = 0; i < tegra->num_timings; i++) { + if (tegra->timings[i].ram_code != ram_code) + continue; + + timing = tegra->timings + i; + + if (timing->rate > max_rate) { + i = min(i, 1); + return tegra->timings[i - 1].rate; + } + + if (timing->rate < min_rate) + continue; + + if (timing->rate >= rate) + return timing->rate; + } + + if (timing) + return timing->rate; + + return __clk_get_rate(hw->clk); +} + +static u8 emc_get_parent(struct clk_hw *hw) +{ + struct tegra_clk_emc *tegra; + u32 val; + + tegra = container_of(hw, struct tegra_clk_emc, hw); + + val = readl(tegra->clk_regs + CLK_SOURCE_EMC); + + return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT) + & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK; +} + +static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra) +{ + struct platform_device *pdev; + + if (tegra->emc) + return tegra->emc; + + if (!tegra->emc_node) + return NULL; + + pdev = of_find_device_by_node(tegra->emc_node); + if (!pdev) { + pr_err("%s: could not get external memory controller\n", + __func__); + return NULL; + } + + of_node_put(tegra->emc_node); + tegra->emc_node = NULL; + + tegra->emc = platform_get_drvdata(pdev); + if (!tegra->emc) { + pr_err("%s: cannot find EMC driver\n", __func__); + return NULL; + } + + return tegra->emc; +} + +static int emc_set_timing(struct tegra_clk_emc *tegra, + struct emc_timing *timing) +{ + int err; + u8 div; + u32 car_value; + unsigned long flags = 0; + struct tegra_emc *emc = emc_ensure_emc_driver(tegra); + + if (!emc) + return -ENOENT; + + pr_debug("going to rate %ld prate %ld p %s\n", timing->rate, + timing->parent_rate, __clk_get_name(timing->parent)); + + if (emc_get_parent(&tegra->hw) == timing->parent_index && + clk_get_rate(timing->parent) != timing->parent_rate) { + BUG(); + return -EINVAL; + } + + tegra->changing_timing = true; + + err = clk_set_rate(timing->parent, timing->parent_rate); + if (err) { + pr_err("cannot change parent %s rate to %ld: %d\n", + __clk_get_name(timing->parent), timing->parent_rate, + err); + + return err; + } + + err = clk_prepare_enable(timing->parent); + if (err) { + pr_err("cannot enable parent clock: %d\n", err); + return err; + } + + div = timing->parent_rate / (timing->rate / 2) - 2; + + err = tegra_emc_prepare_timing_change(emc, timing->rate); + if (err) + return err; + + spin_lock_irqsave(tegra->lock, flags); + + car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC); + + car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0); + car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index); + + car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0); + car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div); + + writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC); + + spin_unlock_irqrestore(tegra->lock, flags); + + tegra_emc_complete_timing_change(emc, timing->rate); + + clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent)); + clk_disable_unprepare(tegra->prev_parent); + + tegra->prev_parent = timing->parent; + tegra->changing_timing = false; + + return 0; +} + +/* + * Get backup timing to use as an intermediate step when a change between + * two timings with the same clock source has been requested. First try to + * find a timing with a higher clock rate to avoid a rate below any set rate + * floors. If that is not possible, find a lower rate. + */ +static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra, + int timing_index) +{ + int i; + u32 ram_code = tegra_read_ram_code(); + struct emc_timing *timing; + + for (i = timing_index+1; i < tegra->num_timings; i++) { + timing = tegra->timings + i; + if (timing->ram_code != ram_code) + continue; + + if (emc_parent_clk_sources[timing->parent_index] != + emc_parent_clk_sources[ + tegra->timings[timing_index].parent_index]) + return timing; + } + + for (i = timing_index-1; i >= 0; --i) { + timing = tegra->timings + i; + if (timing->ram_code != ram_code) + continue; + + if (emc_parent_clk_sources[timing->parent_index] != + emc_parent_clk_sources[ + tegra->timings[timing_index].parent_index]) + return timing; + } + + return NULL; +} + +static int emc_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct tegra_clk_emc *tegra; + struct emc_timing *timing = NULL; + int i, err; + u32 ram_code = tegra_read_ram_code(); + + tegra = container_of(hw, struct tegra_clk_emc, hw); + + if (__clk_get_rate(hw->clk) == rate) + return 0; + + /* + * When emc_set_timing changes the parent rate, CCF will propagate + * that downward to us, so ignore any set_rate calls while a rate + * change is already going on. + */ + if (tegra->changing_timing) + return 0; + + for (i = 0; i < tegra->num_timings; i++) { + if (tegra->timings[i].rate == rate && + tegra->timings[i].ram_code == ram_code) { + timing = tegra->timings + i; + break; + } + } + + if (!timing) { + pr_err("cannot switch to rate %ld without emc table\n", rate); + return -EINVAL; + } + + if (emc_parent_clk_sources[emc_get_parent(hw)] == + emc_parent_clk_sources[timing->parent_index] && + clk_get_rate(timing->parent) != timing->parent_rate) { + /* + * Parent clock source not changed but parent rate has changed, + * need to temporarily switch to another parent + */ + + struct emc_timing *backup_timing; + + backup_timing = get_backup_timing(tegra, i); + if (!backup_timing) { + pr_err("cannot find backup timing\n"); + return -EINVAL; + } + + pr_debug("using %ld as backup rate when going to %ld\n", + backup_timing->rate, rate); + + err = emc_set_timing(tegra, backup_timing); + if (err) { + pr_err("cannot set backup timing: %d\n", err); + return err; + } + } + + return emc_set_timing(tegra, timing); +} + +/* Initialization and deinitialization */ + +static int load_one_timing_from_dt(struct tegra_clk_emc *tegra, + struct emc_timing *timing, + struct device_node *node) +{ + int err, i; + u32 tmp; + + err = of_property_read_u32(node, "clock-frequency", &tmp); + if (err) { + pr_err("timing %s: failed to read rate\n", node->full_name); + return err; + } + + timing->rate = tmp; + + err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp); + if (err) { + pr_err("timing %s: failed to read parent rate\n", + node->full_name); + return err; + } + + timing->parent_rate = tmp; + + timing->parent = of_clk_get_by_name(node, "emc-parent"); + if (IS_ERR(timing->parent)) { + pr_err("timing %s: failed to get parent clock\n", + node->full_name); + return PTR_ERR(timing->parent); + } + + timing->parent_index = 0xff; + for (i = 0; i < ARRAY_SIZE(emc_parent_clk_names); i++) { + if (!strcmp(emc_parent_clk_names[i], + __clk_get_name(timing->parent))) { + timing->parent_index = i; + break; + } + } + if (timing->parent_index == 0xff) { + pr_err("timing %s: %s is not a valid parent\n", + node->full_name, __clk_get_name(timing->parent)); + clk_put(timing->parent); + return -EINVAL; + } + + return 0; +} + +static int cmp_timings(const void *_a, const void *_b) +{ + const struct emc_timing *a = _a; + const struct emc_timing *b = _b; + + if (a->rate < b->rate) + return -1; + else if (a->rate == b->rate) + return 0; + else + return 1; +} + +static int load_timings_from_dt(struct tegra_clk_emc *tegra, + struct device_node *node, + u32 ram_code) +{ + struct device_node *child; + int child_count = of_get_child_count(node); + int i = 0, err; + + tegra->timings = kcalloc(child_count, sizeof(struct emc_timing), + GFP_KERNEL); + if (!tegra->timings) + return -ENOMEM; + + tegra->num_timings = child_count; + + for_each_child_of_node(node, child) { + struct emc_timing *timing = tegra->timings + (i++); + + err = load_one_timing_from_dt(tegra, timing, child); + if (err) + return err; + + timing->ram_code = ram_code; + } + + sort(tegra->timings, tegra->num_timings, sizeof(struct emc_timing), + cmp_timings, NULL); + + return 0; +} + +static const struct clk_ops tegra_clk_emc_ops = { + .recalc_rate = emc_recalc_rate, + .determine_rate = emc_determine_rate, + .set_rate = emc_set_rate, + .get_parent = emc_get_parent, +}; + +struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np, + spinlock_t *lock) +{ + struct tegra_clk_emc *tegra; + struct clk_init_data init; + struct device_node *node; + u32 node_ram_code; + struct clk *clk; + int err; + + tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL); + if (!tegra) + return ERR_PTR(-ENOMEM); + + tegra->clk_regs = base; + tegra->lock = lock; + + tegra->num_timings = 0; + + for_each_child_of_node(np, node) { + err = of_property_read_u32(node, "nvidia,ram-code", + &node_ram_code); + if (err) { + of_node_put(node); + continue; + } + + /* + * Store timings for all ram codes as we cannot read the + * fuses until the apbmisc driver is loaded. + */ + err = load_timings_from_dt(tegra, node, node_ram_code); + if (err) + return ERR_PTR(err); + of_node_put(node); + break; + } + + if (tegra->num_timings == 0) + pr_warn("%s: no memory timings registered\n", __func__); + + tegra->emc_node = of_parse_phandle(np, + "nvidia,external-memory-controller", 0); + if (!tegra->emc_node) + pr_warn("%s: couldn't find node for EMC driver\n", __func__); + + init.name = "emc"; + init.ops = &tegra_clk_emc_ops; + init.flags = 0; + init.parent_names = emc_parent_clk_names; + init.num_parents = ARRAY_SIZE(emc_parent_clk_names); + + tegra->hw.init = &init; + + clk = clk_register(NULL, &tegra->hw); + if (IS_ERR(clk)) + return clk; + + tegra->prev_parent = clk_get_parent_by_index( + tegra->hw.clk, emc_get_parent(&tegra->hw)); + tegra->changing_timing = false; + + /* Allow debugging tools to see the EMC clock */ + clk_register_clkdev(clk, "emc", "tegra-clk-debug"); + + clk_prepare_enable(clk); + + return clk; +}; diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index 11f857cd5f6a..e8cca3eac007 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -152,11 +152,6 @@ static unsigned long tegra124_input_freq[] = { [12] = 260000000, }; -static const char *mux_pllmcp_clkm[] = { - "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", -}; -#define mux_pllmcp_clkm_idx NULL - static struct div_nmp pllxc_nmp = { .divm_shift = 0, .divm_width = 8, @@ -791,7 +786,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_i2c2] = { .dt_id = TEGRA124_CLK_I2C2, .present = true }, [tegra_clk_uartc] = { .dt_id = TEGRA124_CLK_UARTC, .present = true }, [tegra_clk_mipi_cal] = { .dt_id = TEGRA124_CLK_MIPI_CAL, .present = true }, - [tegra_clk_emc] = { .dt_id = TEGRA124_CLK_EMC, .present = true }, [tegra_clk_usb2] = { .dt_id = TEGRA124_CLK_USB2, .present = true }, [tegra_clk_usb3] = { .dt_id = TEGRA124_CLK_USB3, .present = true }, [tegra_clk_vde_8] = { .dt_id = TEGRA124_CLK_VDE, .present = true }, @@ -1127,13 +1121,7 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base, periph_clk_enb_refcnt); clks[TEGRA124_CLK_DSIB] = clk; - /* emc mux */ - clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, - ARRAY_SIZE(mux_pllmcp_clkm), 0, - clk_base + CLK_SOURCE_EMC, - 29, 3, 0, &emc_lock); - - clk = tegra_clk_register_mc("mc", "emc_mux", clk_base + CLK_SOURCE_EMC, + clk = tegra_clk_register_mc("mc", "emc", clk_base + CLK_SOURCE_EMC, &emc_lock); clks[TEGRA124_CLK_MC] = clk; @@ -1389,7 +1377,6 @@ static struct tegra_clk_init_table common_init_table[] __initdata = { {TEGRA124_CLK_XUSB_HOST_SRC, TEGRA124_CLK_PLL_RE_OUT, 112000000, 0}, {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0}, {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0}, - {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1}, {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1}, {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1}, {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0}, @@ -1513,6 +1500,10 @@ static void __init tegra124_132_clock_init_post(struct device_node *np) tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks, &pll_x_params); tegra_add_of_provider(np); + + clks[TEGRA124_CLK_EMC] = tegra_clk_register_emc(clk_base, np, + &emc_lock); + tegra_register_devclks(devclks, ARRAY_SIZE(devclks)); tegra_cpu_car_ops = &tegra124_cpu_car_ops; diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index 4b26509fc218..0af3e834dd24 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -679,7 +679,7 @@ static struct tegra_devclk devclks[] __initdata = { { .dev_id = "tegra30-dam.1", .dt_id = TEGRA30_CLK_DAM1 }, { .dev_id = "tegra30-dam.2", .dt_id = TEGRA30_CLK_DAM2 }, { .con_id = "hda", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA }, - { .con_id = "hda2codec", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2CODEC_2X }, + { .con_id = "hda2codec_2x", .dev_id = "tegra30-hda", .dt_id = TEGRA30_CLK_HDA2CODEC_2X }, { .dev_id = "spi_tegra.0", .dt_id = TEGRA30_CLK_SBC1 }, { .dev_id = "spi_tegra.1", .dt_id = TEGRA30_CLK_SBC2 }, { .dev_id = "spi_tegra.2", .dt_id = TEGRA30_CLK_SBC3 }, diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index d6ac00647faf..75ddc8ff8bd4 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -623,6 +623,18 @@ void tegra_super_clk_gen4_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_clk_pll_params *pll_params); +#ifdef CONFIG_TEGRA_CLK_EMC +struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np, + spinlock_t *lock); +#else +static inline struct clk *tegra_clk_register_emc(void __iomem *base, + struct device_node *np, + spinlock_t *lock) +{ + return NULL; +} +#endif + void tegra114_clock_tune_cpu_trimmers_high(void); void tegra114_clock_tune_cpu_trimmers_low(void); void tegra114_clock_tune_cpu_trimmers_init(void); diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c index d86bc46b93bd..19e543a32e2b 100644 --- a/drivers/clk/ti/clk-dra7-atl.c +++ b/drivers/clk/ti/clk-dra7-atl.c @@ -155,7 +155,7 @@ static int atl_clk_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -const struct clk_ops atl_clk_ops = { +static const struct clk_ops atl_clk_ops = { .enable = atl_clk_enable, .disable = atl_clk_disable, .is_enabled = atl_clk_is_enabled, @@ -167,7 +167,7 @@ const struct clk_ops atl_clk_ops = { static void __init of_dra7_atl_clock_setup(struct device_node *node) { struct dra7_atl_desc *clk_hw = NULL; - struct clk_init_data init = { 0 }; + struct clk_init_data init = { NULL }; const char **parent_names = NULL; struct clk *clk; @@ -252,6 +252,11 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev) } clk = of_clk_get_from_provider(&clkspec); + if (IS_ERR(clk)) { + pr_err("%s: failed to get atl clock %d from provider\n", + __func__, i); + return PTR_ERR(clk); + } cdesc = to_atl_desc(__clk_get_hw(clk)); cdesc->cinfo = cinfo; diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 0ebe5c51062b..64bb5e8a3b8c 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -122,14 +122,14 @@ void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index) if (i == CLK_MAX_MEMMAPS) { pr_err("clk-provider not found for %s!\n", node->name); - return ERR_PTR(-ENOENT); + return IOMEM_ERR_PTR(-ENOENT); } reg->index = i; if (of_property_read_u32_index(node, "reg", index, &val)) { pr_err("%s must have reg[%d]!\n", node->name, index); - return ERR_PTR(-EINVAL); + return IOMEM_ERR_PTR(-EINVAL); } reg->offset = val; diff --git a/drivers/clk/ti/clockdomain.c b/drivers/clk/ti/clockdomain.c index 35fe1085480c..b82ef07f3403 100644 --- a/drivers/clk/ti/clockdomain.c +++ b/drivers/clk/ti/clockdomain.c @@ -32,7 +32,7 @@ static void __init of_ti_clockdomain_setup(struct device_node *node) int i; int num_clks; - num_clks = of_count_phandle_with_args(node, "clocks", "#clock-cells"); + num_clks = of_clk_get_parent_count(node); for (i = 0; i < num_clks; i++) { clk = of_clk_get(node, i); diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 11478a501c30..2aacf7a3bcae 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -177,7 +177,7 @@ cleanup: } #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) -void __iomem *_get_reg(u8 module, u16 offset) +static void __iomem *_get_reg(u8 module, u16 offset) { u32 reg; struct clk_omap_reg *reg_setup; diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index ffcd8e09e85b..730aa62454a2 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -621,13 +621,13 @@ static void __init ti_fapll_setup(struct device_node *node) /* Check for hardwired audio_pll_clk1 */ if (is_audio_pll_clk1(freq)) { - freq = 0; - div = 0; + freq = NULL; + div = NULL; } else { /* Does the synthesizer have a FREQ register? */ v = readl_relaxed(freq); if (!v) - freq = 0; + freq = NULL; } synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance, output_name, node->name, diff --git a/drivers/clk/ux500/u8500_clk.c b/drivers/clk/ux500/u8500_clk.c index 80069c370a47..4626b97b7d83 100644 --- a/drivers/clk/ux500/u8500_clk.c +++ b/drivers/clk/ux500/u8500_clk.c @@ -116,11 +116,12 @@ void u8500_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, clk_register_clkdev(clk, NULL, "hdmi"); clk_register_clkdev(clk, "hdmi", "mcde"); - clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT); + clk = clk_reg_prcmu_scalable("apeatclk", NULL, PRCMU_APEATCLK, 0, + CLK_IS_ROOT|CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "apeat"); - clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK, - CLK_IS_ROOT); + clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0, + CLK_IS_ROOT|CLK_SET_RATE_GATE); clk_register_clkdev(clk, NULL, "apetrace"); clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT); diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c index 7b55ef89baa5..e319ef912dc6 100644 --- a/drivers/clk/ux500/u8500_of_clk.c +++ b/drivers/clk/ux500/u8500_of_clk.c @@ -166,8 +166,8 @@ void u8500_of_clk_init(u32 clkrst1_base, u32 clkrst2_base, u32 clkrst3_base, clk = clk_reg_prcmu_gate("apeatclk", NULL, PRCMU_APEATCLK, CLK_IS_ROOT); prcmu_clk[PRCMU_APEATCLK] = clk; - clk = clk_reg_prcmu_gate("apetraceclk", NULL, PRCMU_APETRACECLK, - CLK_IS_ROOT); + clk = clk_reg_prcmu_scalable("apetraceclk", NULL, PRCMU_APETRACECLK, 0, + CLK_IS_ROOT|CLK_SET_RATE_GATE); prcmu_clk[PRCMU_APETRACECLK] = clk; clk = clk_reg_prcmu_gate("mcdeclk", NULL, PRCMU_MCDECLK, CLK_IS_ROOT); diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c index c6e86a9a2aa3..a96dd8e53fdb 100644 --- a/drivers/clk/versatile/clk-sp810.c +++ b/drivers/clk/versatile/clk-sp810.c @@ -135,7 +135,7 @@ static struct clk *clk_sp810_timerclken_of_get(struct of_phandle_args *clkspec, return sp810->timerclken[clkspec->args[0]].clk; } -void __init clk_sp810_of_setup(struct device_node *node) +static void __init clk_sp810_of_setup(struct device_node *node) { struct clk_sp810 *sp810 = kzalloc(sizeof(*sp810), GFP_KERNEL); const char *parent_names[2]; @@ -156,7 +156,7 @@ void __init clk_sp810_of_setup(struct device_node *node) "timclk"); parent_names[1] = of_clk_get_parent_name(node, sp810->timclk_index); - if (parent_names[0] <= 0 || parent_names[1] <= 0) { + if (!parent_names[0] || !parent_names[1]) { pr_warn("Failed to obtain parent clocks for SP810!\n"); return; } diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 40cb113be6af..de614384bb44 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -85,22 +85,29 @@ static DEFINE_SPINLOCK(canmioclk_lock); static DEFINE_SPINLOCK(dbgclk_lock); static DEFINE_SPINLOCK(aperclk_lock); -static const char *armpll_parents[] __initdata = {"armpll_int", "ps_clk"}; -static const char *ddrpll_parents[] __initdata = {"ddrpll_int", "ps_clk"}; -static const char *iopll_parents[] __initdata = {"iopll_int", "ps_clk"}; +static const char *const armpll_parents[] __initconst = {"armpll_int", + "ps_clk"}; +static const char *const ddrpll_parents[] __initconst = {"ddrpll_int", + "ps_clk"}; +static const char *const iopll_parents[] __initconst = {"iopll_int", + "ps_clk"}; static const char *gem0_mux_parents[] __initdata = {"gem0_div1", "dummy_name"}; static const char *gem1_mux_parents[] __initdata = {"gem1_div1", "dummy_name"}; -static const char *can0_mio_mux2_parents[] __initdata = {"can0_gate", +static const char *const can0_mio_mux2_parents[] __initconst = {"can0_gate", "can0_mio_mux"}; -static const char *can1_mio_mux2_parents[] __initdata = {"can1_gate", +static const char *const can1_mio_mux2_parents[] __initconst = {"can1_gate", "can1_mio_mux"}; static const char *dbg_emio_mux_parents[] __initdata = {"dbg_div", "dummy_name"}; -static const char *dbgtrc_emio_input_names[] __initdata = {"trace_emio_clk"}; -static const char *gem0_emio_input_names[] __initdata = {"gem0_emio_clk"}; -static const char *gem1_emio_input_names[] __initdata = {"gem1_emio_clk"}; -static const char *swdt_ext_clk_input_names[] __initdata = {"swdt_ext_clk"}; +static const char *const dbgtrc_emio_input_names[] __initconst = { + "trace_emio_clk"}; +static const char *const gem0_emio_input_names[] __initconst = { + "gem0_emio_clk"}; +static const char *const gem1_emio_input_names[] __initconst = { + "gem1_emio_clk"}; +static const char *const swdt_ext_clk_input_names[] __initconst = { + "swdt_ext_clk"}; static void __init zynq_clk_register_fclk(enum zynq_clk fclk, const char *clk_name, void __iomem *fclk_ctrl_reg, diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 935b05936dbd..9064ff743598 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -462,15 +462,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET); if (mct_int_type == MCT_INT_SPI) { - evt->irq = mct_irqs[MCT_L0_IRQ + cpu]; - if (request_irq(evt->irq, exynos4_mct_tick_isr, - IRQF_TIMER | IRQF_NOBALANCING, - evt->name, mevt)) { - pr_err("exynos-mct: cannot register IRQ %d\n", - evt->irq); + + if (evt->irq == -1) return -EIO; - } - irq_force_affinity(mct_irqs[MCT_L0_IRQ + cpu], cpumask_of(cpu)); + + irq_force_affinity(evt->irq, cpumask_of(cpu)); + enable_irq(evt->irq); } else { enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0); } @@ -483,10 +480,12 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) static void exynos4_local_timer_stop(struct clock_event_device *evt) { evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); - if (mct_int_type == MCT_INT_SPI) - free_irq(evt->irq, this_cpu_ptr(&percpu_mct_tick)); - else + if (mct_int_type == MCT_INT_SPI) { + if (evt->irq != -1) + disable_irq_nosync(evt->irq); + } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); + } } static int exynos4_mct_cpu_notify(struct notifier_block *self, @@ -518,7 +517,7 @@ static struct notifier_block exynos4_mct_cpu_nb = { static void __init exynos4_timer_resources(struct device_node *np, void __iomem *base) { - int err; + int err, cpu; struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); struct clk *mct_clk, *tick_clk; @@ -545,7 +544,25 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem WARN(err, "MCT: can't request IRQ %d (%d)\n", mct_irqs[MCT_L0_IRQ], err); } else { - irq_set_affinity(mct_irqs[MCT_L0_IRQ], cpumask_of(0)); + for_each_possible_cpu(cpu) { + int mct_irq = mct_irqs[MCT_L0_IRQ + cpu]; + struct mct_clock_event_device *pcpu_mevt = + per_cpu_ptr(&percpu_mct_tick, cpu); + + pcpu_mevt->evt.irq = -1; + + irq_set_status_flags(mct_irq, IRQ_NOAUTOEN); + if (request_irq(mct_irq, + exynos4_mct_tick_isr, + IRQF_TIMER | IRQF_NOBALANCING, + pcpu_mevt->name, pcpu_mevt)) { + pr_err("exynos-mct: cannot register IRQ (cpu%d)\n", + cpu); + + continue; + } + pcpu_mevt->evt.irq = mct_irq; + } } err = register_cpu_notifier(&exynos4_mct_cpu_nb); diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 611cb09239eb..cc8a71c267b8 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -36,17 +36,6 @@ config ARM_EXYNOS_CPUFREQ If in doubt, say N. -config ARM_EXYNOS4210_CPUFREQ - bool "SAMSUNG EXYNOS4210" - depends on CPU_EXYNOS4210 - depends on ARM_EXYNOS_CPUFREQ - default y - help - This adds the CPUFreq driver for Samsung EXYNOS4210 - SoC (S5PV310 or S5PC210). - - If in doubt, say N. - config ARM_EXYNOS4X12_CPUFREQ bool "SAMSUNG EXYNOS4x12" depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index cdce92ae2e8b..2169bf792db7 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -54,7 +54,6 @@ obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o arm-exynos-cpufreq-y := exynos-cpufreq.o -arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 82d2fbb20f7e..ae5b2bd3a978 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -10,6 +10,7 @@ */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> @@ -168,10 +169,7 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) exynos_info->dev = &pdev->dev; - if (of_machine_is_compatible("samsung,exynos4210")) { - exynos_info->type = EXYNOS_SOC_4210; - ret = exynos4210_cpufreq_init(exynos_info); - } else if (of_machine_is_compatible("samsung,exynos4212")) { + if (of_machine_is_compatible("samsung,exynos4212")) { exynos_info->type = EXYNOS_SOC_4212; ret = exynos4x12_cpufreq_init(exynos_info); } else if (of_machine_is_compatible("samsung,exynos4412")) { diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h index 9f2062a7cc02..a3855e4d913d 100644 --- a/drivers/cpufreq/exynos-cpufreq.h +++ b/drivers/cpufreq/exynos-cpufreq.h @@ -18,7 +18,6 @@ enum cpufreq_level_index { }; enum exynos_soc_type { - EXYNOS_SOC_4210, EXYNOS_SOC_4212, EXYNOS_SOC_4412, EXYNOS_SOC_5250, @@ -53,14 +52,6 @@ struct exynos_dvfs_info { void __iomem *cmu_regs; }; -#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ -extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *); -#else -static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) -{ - return -EOPNOTSUPP; -} -#endif #ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *); #else diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c deleted file mode 100644 index 843ec824fd91..000000000000 --- a/drivers/cpufreq/exynos4210-cpufreq.c +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. - * http://www.samsung.com - * - * EXYNOS4210 - CPU frequency scaling support - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/err.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/slab.h> -#include <linux/cpufreq.h> -#include <linux/of.h> -#include <linux/of_address.h> - -#include "exynos-cpufreq.h" - -static struct clk *cpu_clk; -static struct clk *moutcore; -static struct clk *mout_mpll; -static struct clk *mout_apll; -static struct exynos_dvfs_info *cpufreq; - -static unsigned int exynos4210_volt_table[] = { - 1250000, 1150000, 1050000, 975000, 950000, -}; - -static struct cpufreq_frequency_table exynos4210_freq_table[] = { - {0, L0, 1200 * 1000}, - {0, L1, 1000 * 1000}, - {0, L2, 800 * 1000}, - {0, L3, 500 * 1000}, - {0, L4, 200 * 1000}, - {0, 0, CPUFREQ_TABLE_END}, -}; - -static struct apll_freq apll_freq_4210[] = { - /* - * values: - * freq - * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, RESERVED - * clock divider for COPY, HPM, RESERVED - * PLL M, P, S - */ - APLL_FREQ(1200, 0, 3, 7, 3, 4, 1, 7, 0, 5, 0, 0, 150, 3, 1), - APLL_FREQ(1000, 0, 3, 7, 3, 4, 1, 7, 0, 4, 0, 0, 250, 6, 1), - APLL_FREQ(800, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 200, 6, 1), - APLL_FREQ(500, 0, 3, 7, 3, 3, 1, 7, 0, 3, 0, 0, 250, 6, 2), - APLL_FREQ(200, 0, 1, 3, 1, 3, 1, 0, 0, 3, 0, 0, 200, 6, 3), -}; - -static void exynos4210_set_clkdiv(unsigned int div_index) -{ - unsigned int tmp; - - /* Change Divider - CPU0 */ - - tmp = apll_freq_4210[div_index].clk_div_cpu0; - - __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU); - - do { - tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU); - } while (tmp & 0x1111111); - - /* Change Divider - CPU1 */ - - tmp = apll_freq_4210[div_index].clk_div_cpu1; - - __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1); - - do { - tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1); - } while (tmp & 0x11); -} - -static void exynos4210_set_apll(unsigned int index) -{ - unsigned int tmp, freq = apll_freq_4210[index].freq; - - /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */ - clk_set_parent(moutcore, mout_mpll); - - do { - tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU) - >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT); - tmp &= 0x7; - } while (tmp != 0x2); - - clk_set_rate(mout_apll, freq * 1000); - - /* MUX_CORE_SEL = APLL */ - clk_set_parent(moutcore, mout_apll); - - do { - tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU); - tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK; - } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)); -} - -static void exynos4210_set_frequency(unsigned int old_index, - unsigned int new_index) -{ - if (old_index > new_index) { - exynos4210_set_clkdiv(new_index); - exynos4210_set_apll(new_index); - } else if (old_index < new_index) { - exynos4210_set_apll(new_index); - exynos4210_set_clkdiv(new_index); - } -} - -int exynos4210_cpufreq_init(struct exynos_dvfs_info *info) -{ - struct device_node *np; - unsigned long rate; - - /* - * HACK: This is a temporary workaround to get access to clock - * controller registers directly and remove static mappings and - * dependencies on platform headers. It is necessary to enable - * Exynos multi-platform support and will be removed together with - * this whole driver as soon as Exynos gets migrated to use - * cpufreq-dt driver. - */ - np = of_find_compatible_node(NULL, NULL, "samsung,exynos4210-clock"); - if (!np) { - pr_err("%s: failed to find clock controller DT node\n", - __func__); - return -ENODEV; - } - - info->cmu_regs = of_iomap(np, 0); - if (!info->cmu_regs) { - pr_err("%s: failed to map CMU registers\n", __func__); - return -EFAULT; - } - - cpu_clk = clk_get(NULL, "armclk"); - if (IS_ERR(cpu_clk)) - return PTR_ERR(cpu_clk); - - moutcore = clk_get(NULL, "moutcore"); - if (IS_ERR(moutcore)) - goto err_moutcore; - - mout_mpll = clk_get(NULL, "mout_mpll"); - if (IS_ERR(mout_mpll)) - goto err_mout_mpll; - - rate = clk_get_rate(mout_mpll) / 1000; - - mout_apll = clk_get(NULL, "mout_apll"); - if (IS_ERR(mout_apll)) - goto err_mout_apll; - - info->mpll_freq_khz = rate; - /* 800Mhz */ - info->pll_safe_idx = L2; - info->cpu_clk = cpu_clk; - info->volt_table = exynos4210_volt_table; - info->freq_table = exynos4210_freq_table; - info->set_freq = exynos4210_set_frequency; - - cpufreq = info; - - return 0; - -err_mout_apll: - clk_put(mout_mpll); -err_mout_mpll: - clk_put(moutcore); -err_moutcore: - clk_put(cpu_clk); - - pr_debug("%s: failed initialization\n", __func__); - return -EINVAL; -} diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c index b0dac7d6ba31..9e231f52150c 100644 --- a/drivers/cpufreq/s5pv210-cpufreq.c +++ b/drivers/cpufreq/s5pv210-cpufreq.c @@ -659,4 +659,4 @@ static struct platform_driver s5pv210_cpufreq_platdrv = { }, .probe = s5pv210_cpufreq_probe, }; -module_platform_driver(s5pv210_cpufreq_platdrv); +builtin_platform_driver(s5pv210_cpufreq_platdrv); diff --git a/drivers/cpuidle/cpuidle-at91.c b/drivers/cpuidle/cpuidle-at91.c index f2446c78d87c..9c5853b6ca4a 100644 --- a/drivers/cpuidle/cpuidle-at91.c +++ b/drivers/cpuidle/cpuidle-at91.c @@ -62,5 +62,4 @@ static struct platform_driver at91_cpuidle_driver = { }, .probe = at91_cpuidle_probe, }; - -module_platform_driver(at91_cpuidle_driver); +builtin_platform_driver(at91_cpuidle_driver); diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c index 9445e6cc02be..c13feec89ea1 100644 --- a/drivers/cpuidle/cpuidle-calxeda.c +++ b/drivers/cpuidle/cpuidle-calxeda.c @@ -75,5 +75,4 @@ static struct platform_driver calxeda_cpuidle_plat_driver = { }, .probe = calxeda_cpuidle_probe, }; - -module_platform_driver(calxeda_cpuidle_plat_driver); +builtin_platform_driver(calxeda_cpuidle_plat_driver); diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 1e3ef5ec4784..845bafcfa792 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -67,6 +67,8 @@ static int nap_loop(struct cpuidle_device *dev, return index; } +/* Register for fastsleep only in oneshot mode of broadcast */ +#ifdef CONFIG_TICK_ONESHOT static int fastsleep_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) @@ -90,7 +92,7 @@ static int fastsleep_loop(struct cpuidle_device *dev, return index; } - +#endif /* * States for dedicated partition case. */ @@ -216,7 +218,14 @@ static int powernv_add_idle_states(void) powernv_states[nr_idle_states].flags = 0; powernv_states[nr_idle_states].target_residency = 100; powernv_states[nr_idle_states].enter = &nap_loop; - } else if (flags[i] & OPAL_PM_SLEEP_ENABLED || + } + + /* + * All cpuidle states with CPUIDLE_FLAG_TIMER_STOP set must come + * within this config dependency check. + */ +#ifdef CONFIG_TICK_ONESHOT + if (flags[i] & OPAL_PM_SLEEP_ENABLED || flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) { /* Add FASTSLEEP state */ strcpy(powernv_states[nr_idle_states].name, "FastSleep"); @@ -225,7 +234,7 @@ static int powernv_add_idle_states(void) powernv_states[nr_idle_states].target_residency = 300000; powernv_states[nr_idle_states].enter = &fastsleep_loop; } - +#endif powernv_states[nr_idle_states].exit_latency = ((unsigned int)latency_ns[i]) / 1000; diff --git a/drivers/cpuidle/cpuidle-zynq.c b/drivers/cpuidle/cpuidle-zynq.c index 543292b1d38e..6f4257fc56e5 100644 --- a/drivers/cpuidle/cpuidle-zynq.c +++ b/drivers/cpuidle/cpuidle-zynq.c @@ -73,5 +73,4 @@ static struct platform_driver zynq_cpuidle_driver = { }, .probe = zynq_cpuidle_probe, }; - -module_platform_driver(zynq_cpuidle_driver); +builtin_platform_driver(zynq_cpuidle_driver); diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c index a432633bced4..1c6f98dd88f4 100644 --- a/drivers/crypto/marvell/cesa.c +++ b/drivers/crypto/marvell/cesa.c @@ -321,9 +321,8 @@ static int mv_cesa_get_sram(struct platform_device *pdev, int idx) const char *res_name = "sram"; struct resource *res; - engine->pool = of_get_named_gen_pool(cesa->dev->of_node, - "marvell,crypto-srams", - idx); + engine->pool = of_gen_pool_get(cesa->dev->of_node, + "marvell,crypto-srams", idx); if (engine->pool) { engine->sram = gen_pool_dma_alloc(engine->pool, cesa->sram_size, diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c index 7f8b66c915ed..fdda8e7ae302 100644 --- a/drivers/crypto/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/qat/qat_common/adf_accel_engine.c @@ -88,10 +88,7 @@ void adf_ae_fw_release(struct adf_accel_dev *accel_dev) qat_uclo_del_uof_obj(loader_data->fw_loader); qat_hal_deinit(loader_data->fw_loader); - - if (loader_data->uof_fw) - release_firmware(loader_data->uof_fw); - + release_firmware(loader_data->uof_fw); loader_data->uof_fw = NULL; loader_data->fw_loader = NULL; } diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index ccec327489da..db2926bff8a5 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -449,7 +449,7 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev, err: for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) { ring = &bank->rings[i]; - if (hw_data->tx_rings_mask & (1 << i) && ring->inflights) + if (hw_data->tx_rings_mask & (1 << i)) kfree(ring->inflights); } return -ENOMEM; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 449e785def17..e683761e0f8f 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -657,7 +657,7 @@ static int mmp_tdma_probe(struct platform_device *pdev) INIT_LIST_HEAD(&tdev->device.channels); if (pdev->dev.of_node) - pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); + pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0); else pool = sram_get_gpool("asram"); if (!pool) { diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 280bc0a63365..816dbe9f4b82 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -24,8 +24,6 @@ KASAN_SANITIZE := n lib-y := efi-stub-helper.o lib-$(CONFIG_EFI_ARMSTUB) += arm-stub.o fdt.o -CFLAGS_fdt.o += -I$(srctree)/scripts/dtc/libfdt/ - # # arm64 puts the stub in the kernel proper, which will unnecessarily retain all # code indefinitely unless it is annotated as __init/__initdata/__initconst etc. diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 8333f878919c..40343fa92c7b 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c @@ -657,8 +657,9 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev) } for (i = 0; i < kona_gpio->num_bank; i++) { bank = &kona_gpio->banks[i]; - irq_set_chained_handler(bank->irq, bcm_kona_gpio_irq_handler); - irq_set_handler_data(bank->irq, bank); + irq_set_chained_handler_and_data(bank->irq, + bcm_kona_gpio_irq_handler, + bank); } spin_lock_init(&kona_gpio->lock); diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 58faf04fce5d..55fa9853a7f2 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -348,8 +348,8 @@ static void dwapb_configure_irqs(struct dwapb_gpio *gpio, irq_gc->chip_types[1].handler = handle_edge_irq; if (!pp->irq_shared) { - irq_set_chained_handler(pp->irq, dwapb_irq_handler); - irq_set_handler_data(pp->irq, gpio); + irq_set_chained_handler_and_data(pp->irq, dwapb_irq_handler, + gpio); } else { /* * Request a shared IRQ since where MFD would have devices diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c index 01acf0a8cdb1..7bcfb87a5fa6 100644 --- a/drivers/gpio/gpio-msic.c +++ b/drivers/gpio/gpio-msic.c @@ -309,8 +309,7 @@ static int platform_msic_gpio_probe(struct platform_device *pdev) &msic_irqchip, handle_simple_irq); } - irq_set_chained_handler(mg->irq, msic_gpio_irq_handler); - irq_set_handler_data(mg->irq, mg); + irq_set_chained_handler_and_data(mg->irq, msic_gpio_irq_handler, mg); return 0; err: diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index be42ab368a80..bf4bd1d120c3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2052,14 +2052,14 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, if (is_of_node(fwnode)) { enum of_gpio_flags flags; - desc = of_get_named_gpiod_flags(of_node(fwnode), propname, 0, + desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname, 0, &flags); if (!IS_ERR(desc)) active_low = flags & OF_GPIO_ACTIVE_LOW; } else if (is_acpi_node(fwnode)) { struct acpi_gpio_info info; - desc = acpi_get_gpiod_by_index(acpi_node(fwnode), propname, 0, + desc = acpi_get_gpiod_by_index(to_acpi_node(fwnode), propname, 0, &info); if (!IS_ERR(desc)) active_low = info.active_low; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 22866d1c3d69..01657830b470 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -425,6 +425,8 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, unsigned irq_type); int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, struct amdgpu_fence **fence); +int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, + uint64_t seq, struct amdgpu_fence **fence); void amdgpu_fence_process(struct amdgpu_ring *ring); int amdgpu_fence_wait_next(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); @@ -435,9 +437,6 @@ int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible); int amdgpu_fence_wait_any(struct amdgpu_device *adev, struct amdgpu_fence **fences, bool intr); -long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, - u64 *target_seq, bool intr, - long timeout); struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); void amdgpu_fence_unref(struct amdgpu_fence **fence); @@ -1622,6 +1621,7 @@ struct amdgpu_vce { unsigned fb_version; atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; + uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; struct delayed_work idle_work; const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 36d34e0afbc3..f82a2dd83874 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include "amdgpu.h" +#include "amdgpu_trace.h" static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, struct amdgpu_bo_list **result, @@ -124,6 +125,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, gws_obj = entry->robj; if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; + + trace_amdgpu_bo_list_set(list, entry->robj); } for (i = 0; i < list->num_entries; ++i) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index f09b2cba40ca..d63135bf29c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -181,8 +181,6 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) } p->chunks[i].chunk_id = user_chunk.chunk_id; p->chunks[i].length_dw = user_chunk.length_dw; - if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_IB) - p->num_ibs++; size = p->chunks[i].length_dw; cdata = (void __user *)(unsigned long)user_chunk.chunk_data; @@ -199,7 +197,12 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) goto out; } - if (p->chunks[i].chunk_id == AMDGPU_CHUNK_ID_FENCE) { + switch (p->chunks[i].chunk_id) { + case AMDGPU_CHUNK_ID_IB: + p->num_ibs++; + break; + + case AMDGPU_CHUNK_ID_FENCE: size = sizeof(struct drm_amdgpu_cs_chunk_fence); if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) { uint32_t handle; @@ -221,6 +224,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) r = -EINVAL; goto out; } + break; + + case AMDGPU_CHUNK_ID_DEPENDENCIES: + break; + + default: + r = -EINVAL; + goto out; } } @@ -445,8 +456,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo for (i = 0; i < parser->nchunks; i++) drm_free_large(parser->chunks[i].kdata); kfree(parser->chunks); - for (i = 0; i < parser->num_ibs; i++) - amdgpu_ib_free(parser->adev, &parser->ibs[i]); + if (parser->ibs) + for (i = 0; i < parser->num_ibs; i++) + amdgpu_ib_free(parser->adev, &parser->ibs[i]); kfree(parser->ibs); if (parser->uf.bo) drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base); @@ -654,6 +666,55 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, return 0; } +static int amdgpu_cs_dependencies(struct amdgpu_device *adev, + struct amdgpu_cs_parser *p) +{ + struct amdgpu_ib *ib; + int i, j, r; + + if (!p->num_ibs) + return 0; + + /* Add dependencies to first IB */ + ib = &p->ibs[0]; + for (i = 0; i < p->nchunks; ++i) { + struct drm_amdgpu_cs_chunk_dep *deps; + struct amdgpu_cs_chunk *chunk; + unsigned num_deps; + + chunk = &p->chunks[i]; + + if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES) + continue; + + deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; + num_deps = chunk->length_dw * 4 / + sizeof(struct drm_amdgpu_cs_chunk_dep); + + for (j = 0; j < num_deps; ++j) { + struct amdgpu_fence *fence; + struct amdgpu_ring *ring; + + r = amdgpu_cs_get_ring(adev, deps[j].ip_type, + deps[j].ip_instance, + deps[j].ring, &ring); + if (r) + return r; + + r = amdgpu_fence_recreate(ring, p->filp, + deps[j].handle, + &fence); + if (r) + return r; + + amdgpu_sync_fence(&ib->sync, fence); + amdgpu_fence_unref(&fence); + } + } + + return 0; +} + int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct amdgpu_device *adev = dev->dev_private; @@ -688,11 +749,16 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) else DRM_ERROR("Failed to process the buffer list %d!\n", r); } - } else { + } + + if (!r) { reserved_buffers = true; r = amdgpu_cs_ib_fill(adev, &parser); } + if (!r) + r = amdgpu_cs_dependencies(adev, &parser); + if (r) { amdgpu_cs_parser_fini(&parser, r, reserved_buffers); up_read(&adev->exclusive_lock); @@ -730,9 +796,9 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, { union drm_amdgpu_wait_cs *wait = data; struct amdgpu_device *adev = dev->dev_private; - uint64_t seq[AMDGPU_MAX_RINGS] = {0}; - struct amdgpu_ring *ring = NULL; unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout); + struct amdgpu_fence *fence = NULL; + struct amdgpu_ring *ring = NULL; struct amdgpu_ctx *ctx; long r; @@ -745,9 +811,12 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, if (r) return r; - seq[ring->idx] = wait->in.handle; + r = amdgpu_fence_recreate(ring, filp, wait->in.handle, &fence); + if (r) + return r; - r = amdgpu_fence_wait_seq_timeout(adev, seq, true, timeout); + r = fence_wait_timeout(&fence->base, true, timeout); + amdgpu_fence_unref(&fence); amdgpu_ctx_put(ctx); if (r < 0) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fec487d1c870..ba46be361c9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1191,7 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) return -EINVAL; } - + adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); + if (adev->ip_block_enabled == NULL) + return -ENOMEM; if (adev->ip_blocks == NULL) { DRM_ERROR("No IP blocks found!\n"); @@ -1575,8 +1577,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); - if (adev->ip_block_enabled) - kfree(adev->ip_block_enabled); + kfree(adev->ip_block_enabled); adev->ip_block_enabled = NULL; adev->accel_working = false; /* free i2c buses */ @@ -2000,4 +2001,10 @@ int amdgpu_debugfs_init(struct drm_minor *minor) void amdgpu_debugfs_cleanup(struct drm_minor *minor) { } +#else +static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) +{ + return 0; +} +static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 5c9918d01bf9..a7189a1fa6a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -136,6 +136,38 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, } /** + * amdgpu_fence_recreate - recreate a fence from an user fence + * + * @ring: ring the fence is associated with + * @owner: creator of the fence + * @seq: user fence sequence number + * @fence: resulting amdgpu fence object + * + * Recreates a fence command from the user fence sequence number (all asics). + * Returns 0 on success, -ENOMEM on failure. + */ +int amdgpu_fence_recreate(struct amdgpu_ring *ring, void *owner, + uint64_t seq, struct amdgpu_fence **fence) +{ + struct amdgpu_device *adev = ring->adev; + + if (seq > ring->fence_drv.sync_seq[ring->idx]) + return -EINVAL; + + *fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL); + if ((*fence) == NULL) + return -ENOMEM; + + (*fence)->seq = seq; + (*fence)->ring = ring; + (*fence)->owner = owner; + fence_init(&(*fence)->base, &amdgpu_fence_ops, + &adev->fence_queue.lock, adev->fence_context + ring->idx, + (*fence)->seq); + return 0; +} + +/** * amdgpu_fence_check_signaled - callback from fence_queue * * this function is called with fence_queue lock held, which is also used @@ -517,12 +549,14 @@ static bool amdgpu_fence_any_seq_signaled(struct amdgpu_device *adev, u64 *seq) * the wait timeout, or an error for all other cases. * -EDEADLK is returned when a GPU lockup has been detected. */ -long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, u64 *target_seq, - bool intr, long timeout) +static long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev, + u64 *target_seq, bool intr, + long timeout) { uint64_t last_seq[AMDGPU_MAX_RINGS]; bool signaled; - int i, r; + int i; + long r; if (timeout == 0) { return amdgpu_fence_any_seq_signaled(adev, target_seq); @@ -1023,7 +1057,7 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) amdgpu_fence_process(ring); - seq_printf(m, "--- ring %d ---\n", i); + seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name); seq_printf(m, "Last signaled fence 0x%016llx\n", (unsigned long long)atomic64_read(&ring->fence_drv.last_seq)); seq_printf(m, "Last emitted 0x%016llx\n", @@ -1031,7 +1065,8 @@ static int amdgpu_debugfs_fence_info(struct seq_file *m, void *data) for (j = 0; j < AMDGPU_MAX_RINGS; ++j) { struct amdgpu_ring *other = adev->rings[j]; - if (i != j && other && other->fence_drv.initialized) + if (i != j && other && other->fence_drv.initialized && + ring->fence_drv.sync_seq[j]) seq_printf(m, "Last sync to ring %d 0x%016llx\n", j, ring->fence_drv.sync_seq[j]); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 0ec222295fee..975edb1000a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -496,7 +496,7 @@ error_unreserve: error_free: drm_free_large(vm_bos); - if (r) + if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } @@ -525,8 +525,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - invalid_flags = ~(AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | - AMDGPU_VM_PAGE_EXECUTABLE); + invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE | + AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE); if ((args->flags & invalid_flags)) { dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", args->flags, invalid_flags); @@ -579,7 +579,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, break; } - if (!r) + if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) amdgpu_gem_va_update_vm(adev, bo_va); drm_gem_object_unreference_unlocked(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index b56dd64bd4ea..961d7265c286 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -30,19 +30,21 @@ TRACE_EVENT(amdgpu_cs, TP_PROTO(struct amdgpu_cs_parser *p, int i), TP_ARGS(p, i), TP_STRUCT__entry( + __field(struct amdgpu_bo_list *, bo_list) __field(u32, ring) __field(u32, dw) __field(u32, fences) ), TP_fast_assign( + __entry->bo_list = p->bo_list; __entry->ring = p->ibs[i].ring->idx; __entry->dw = p->ibs[i].length_dw; __entry->fences = amdgpu_fence_count_emitted( p->ibs[i].ring); ), - TP_printk("ring=%u, dw=%u, fences=%u", - __entry->ring, __entry->dw, + TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", + __entry->bo_list, __entry->ring, __entry->dw, __entry->fences) ); @@ -61,6 +63,54 @@ TRACE_EVENT(amdgpu_vm_grab_id, TP_printk("vmid=%u, ring=%u", __entry->vmid, __entry->ring) ); +TRACE_EVENT(amdgpu_vm_bo_map, + TP_PROTO(struct amdgpu_bo_va *bo_va, + struct amdgpu_bo_va_mapping *mapping), + TP_ARGS(bo_va, mapping), + TP_STRUCT__entry( + __field(struct amdgpu_bo *, bo) + __field(long, start) + __field(long, last) + __field(u64, offset) + __field(u32, flags) + ), + + TP_fast_assign( + __entry->bo = bo_va->bo; + __entry->start = mapping->it.start; + __entry->last = mapping->it.last; + __entry->offset = mapping->offset; + __entry->flags = mapping->flags; + ), + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + __entry->bo, __entry->start, __entry->last, + __entry->offset, __entry->flags) +); + +TRACE_EVENT(amdgpu_vm_bo_unmap, + TP_PROTO(struct amdgpu_bo_va *bo_va, + struct amdgpu_bo_va_mapping *mapping), + TP_ARGS(bo_va, mapping), + TP_STRUCT__entry( + __field(struct amdgpu_bo *, bo) + __field(long, start) + __field(long, last) + __field(u64, offset) + __field(u32, flags) + ), + + TP_fast_assign( + __entry->bo = bo_va->bo; + __entry->start = mapping->it.start; + __entry->last = mapping->it.last; + __entry->offset = mapping->offset; + __entry->flags = mapping->flags; + ), + TP_printk("bo=%p, start=%lx, last=%lx, offset=%010llx, flags=%08x", + __entry->bo, __entry->start, __entry->last, + __entry->offset, __entry->flags) +); + TRACE_EVENT(amdgpu_vm_bo_update, TP_PROTO(struct amdgpu_bo_va_mapping *mapping), TP_ARGS(mapping), @@ -121,6 +171,21 @@ TRACE_EVENT(amdgpu_vm_flush, __entry->pd_addr, __entry->ring, __entry->id) ); +TRACE_EVENT(amdgpu_bo_list_set, + TP_PROTO(struct amdgpu_bo_list *list, struct amdgpu_bo *bo), + TP_ARGS(list, bo), + TP_STRUCT__entry( + __field(struct amdgpu_bo_list *, list) + __field(struct amdgpu_bo *, bo) + ), + + TP_fast_assign( + __entry->list = list; + __entry->bo = bo; + ), + TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) +); + DECLARE_EVENT_CLASS(amdgpu_fence_request, TP_PROTO(struct drm_device *dev, int ring, u32 seqno), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d3706a498293..dd3415d2e45d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -674,7 +674,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) return 0; if (gtt && gtt->userptr) { - ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); + ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 1127a504f118..d3ca73090e39 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -464,28 +464,42 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, * @p: parser context * @lo: address of lower dword * @hi: address of higher dword + * @size: minimum size * * Patch relocation inside command stream with real buffer address */ -int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi) +static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, + int lo, int hi, unsigned size, uint32_t index) { struct amdgpu_bo_va_mapping *mapping; struct amdgpu_ib *ib = &p->ibs[ib_idx]; struct amdgpu_bo *bo; uint64_t addr; + if (index == 0xffffffff) + index = 0; + addr = ((uint64_t)amdgpu_get_ib_value(p, ib_idx, lo)) | ((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32; + addr += ((uint64_t)size) * ((uint64_t)index); mapping = amdgpu_cs_find_mapping(p, addr, &bo); if (mapping == NULL) { - DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d\n", + DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n", + addr, lo, hi, size, index); + return -EINVAL; + } + + if ((addr + (uint64_t)size) > + ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) { + DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", addr, lo, hi); return -EINVAL; } addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; addr += amdgpu_bo_gpu_offset(bo); + addr -= ((uint64_t)size) * ((uint64_t)index); ib->ptr[lo] = addr & 0xFFFFFFFF; ib->ptr[hi] = addr >> 32; @@ -494,6 +508,48 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int } /** + * amdgpu_vce_validate_handle - validate stream handle + * + * @p: parser context + * @handle: handle to validate + * @allocated: allocated a new handle? + * + * Validates the handle and return the found session index or -EINVAL + * we we don't have another free session index. + */ +static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, + uint32_t handle, bool *allocated) +{ + unsigned i; + + *allocated = false; + + /* validate the handle */ + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { + if (atomic_read(&p->adev->vce.handles[i]) == handle) { + if (p->adev->vce.filp[i] != p->filp) { + DRM_ERROR("VCE handle collision detected!\n"); + return -EINVAL; + } + return i; + } + } + + /* handle not found try to alloc a new one */ + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { + if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { + p->adev->vce.filp[i] = p->filp; + p->adev->vce.img_size[i] = 0; + *allocated = true; + return i; + } + } + + DRM_ERROR("No more free VCE handles!\n"); + return -EINVAL; +} + +/** * amdgpu_vce_cs_parse - parse and validate the command stream * * @p: parser context @@ -501,10 +557,15 @@ int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int */ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) { - uint32_t handle = 0; - bool destroy = false; - int i, r, idx = 0; struct amdgpu_ib *ib = &p->ibs[ib_idx]; + unsigned fb_idx = 0, bs_idx = 0; + int session_idx = -1; + bool destroyed = false; + bool created = false; + bool allocated = false; + uint32_t tmp, handle = 0; + uint32_t *size = &tmp; + int i, r = 0, idx = 0; amdgpu_vce_note_usage(p->adev); @@ -514,16 +575,44 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) if ((len < 8) || (len & 3)) { DRM_ERROR("invalid VCE command length (%d)!\n", len); - return -EINVAL; + r = -EINVAL; + goto out; + } + + if (destroyed) { + DRM_ERROR("No other command allowed after destroy!\n"); + r = -EINVAL; + goto out; } switch (cmd) { case 0x00000001: // session handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); + session_idx = amdgpu_vce_validate_handle(p, handle, + &allocated); + if (session_idx < 0) + return session_idx; + size = &p->adev->vce.img_size[session_idx]; break; case 0x00000002: // task info + fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); + bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); + break; + case 0x01000001: // create + created = true; + if (!allocated) { + DRM_ERROR("Handle already in use!\n"); + r = -EINVAL; + goto out; + } + + *size = amdgpu_get_ib_value(p, ib_idx, idx + 8) * + amdgpu_get_ib_value(p, ib_idx, idx + 10) * + 8 * 3 / 2; + break; + case 0x04000001: // config extension case 0x04000002: // pic control case 0x04000005: // rate control @@ -534,60 +623,74 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) break; case 0x03000001: // encode - r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9); + r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, + *size, 0); if (r) - return r; + goto out; - r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11); + r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 12, idx + 11, + *size / 3, 0); if (r) - return r; + goto out; break; case 0x02000001: // destroy - destroy = true; + destroyed = true; break; case 0x05000001: // context buffer + r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, + *size * 2, 0); + if (r) + goto out; + break; + case 0x05000004: // video bitstream buffer + tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); + r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, + tmp, bs_idx); + if (r) + goto out; + break; + case 0x05000005: // feedback buffer - r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2); + r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, + 4096, fb_idx); if (r) - return r; + goto out; break; default: DRM_ERROR("invalid VCE command (0x%x)!\n", cmd); - return -EINVAL; + r = -EINVAL; + goto out; } - idx += len / 4; - } - - if (destroy) { - /* IB contains a destroy msg, free the handle */ - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) - atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); + if (session_idx == -1) { + DRM_ERROR("no session command at start of IB\n"); + r = -EINVAL; + goto out; + } - return 0; + idx += len / 4; } - /* create or encode, validate the handle */ - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { - if (atomic_read(&p->adev->vce.handles[i]) == handle) - return 0; + if (allocated && !created) { + DRM_ERROR("New session without create command!\n"); + r = -ENOENT; } - /* handle not found try to alloc a new one */ - for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { - if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { - p->adev->vce.filp[i] = p->filp; - return 0; - } +out: + if ((!r && destroyed) || (r && allocated)) { + /* + * IB contains a destroy msg or we have allocated an + * handle and got an error, anyway free the handle + */ + for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) + atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0); } - DRM_ERROR("No more free VCE handles!\n"); - - return -EINVAL; + return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index b6a9d0956c60..7ccdb5927da5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -33,7 +33,6 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); -int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx, int lo, int hi); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); bool amdgpu_vce_ring_emit_semaphore(struct amdgpu_ring *ring, struct amdgpu_semaphore *semaphore, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 407882b233c7..9a4e3b63f1cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1001,6 +1001,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, list_add(&mapping->list, &bo_va->mappings); interval_tree_insert(&mapping->it, &vm->va); + trace_amdgpu_vm_bo_map(bo_va, mapping); bo_va->addr = 0; @@ -1058,6 +1059,7 @@ error_free: mutex_lock(&vm->mutex); list_del(&mapping->list); interval_tree_remove(&mapping->it, &vm->va); + trace_amdgpu_vm_bo_unmap(bo_va, mapping); kfree(mapping); error_unlock: @@ -1099,6 +1101,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, mutex_lock(&vm->mutex); list_del(&mapping->list); interval_tree_remove(&mapping->it, &vm->va); + trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (bo_va->addr) { /* clear the old address */ @@ -1139,6 +1142,7 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, list_for_each_entry_safe(mapping, next, &bo_va->mappings, list) { list_del(&mapping->list); interval_tree_remove(&mapping->it, &vm->va); + trace_amdgpu_vm_bo_unmap(bo_va, mapping); if (bo_va->addr) list_add(&mapping->list, &vm->freed); else diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 5dab578d6462..341c56681841 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -2256,10 +2256,6 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) return -EINVAL; } - adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); - if (adev->ip_block_enabled == NULL) - return -ENOMEM; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h index 220865a44814..d19085a97064 100644 --- a/drivers/gpu/drm/amd/amdgpu/cikd.h +++ b/drivers/gpu/drm/amd/amdgpu/cikd.h @@ -552,4 +552,10 @@ #define VCE_CMD_IB_AUTO 0x00000005 #define VCE_CMD_SEMAPHORE 0x00000006 +/* valid for both DEFAULT_MTYPE and APE1_MTYPE */ +enum { + MTYPE_CACHED = 0, + MTYPE_NONCACHED = 3 +}; + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index e4936a452bc6..f75a31df30bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->mgcg_cgtt_local1 = 0x0; pi->clock_slow_down_step = 25000; pi->skip_clock_slow_down = 1; - pi->enable_nb_ps_policy = 1; + pi->enable_nb_ps_policy = 0; pi->caps_power_containment = true; pi->caps_cac = true; pi->didt_enabled = false; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h index 782a74107664..99e1afc89629 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.h @@ -46,7 +46,7 @@ /* Do not change the following, it is also defined in SMU8.h */ #define SMU_EnabledFeatureScoreboard_AcpDpmOn 0x00000001 -#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00100000 +#define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000 #define SMU_EnabledFeatureScoreboard_UvdDpmOn 0x00800000 #define SMU_EnabledFeatureScoreboard_VceDpmOn 0x01000000 diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 72c27ac915f2..aaca8d663f2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -3379,7 +3379,7 @@ static int dce_v8_0_hpd_irq(struct amdgpu_device *adev, uint32_t disp_int, mask, int_control, tmp; unsigned hpd; - if (entry->src_data > 6) { + if (entry->src_data >= adev->mode_info.num_hpd) { DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index cb7907447b81..2c188fb9fd22 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2010,6 +2010,46 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev, } /** + * gmc_v7_0_init_compute_vmid - gart enable + * + * @rdev: amdgpu_device pointer + * + * Initialize compute vmid sh_mem registers + * + */ +#define DEFAULT_SH_MEM_BASES (0x6000) +#define FIRST_COMPUTE_VMID (8) +#define LAST_COMPUTE_VMID (16) +static void gmc_v7_0_init_compute_vmid(struct amdgpu_device *adev) +{ + int i; + uint32_t sh_mem_config; + uint32_t sh_mem_bases; + + /* + * Configure apertures: + * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) + * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) + * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) + */ + sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); + sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT; + sh_mem_config |= MTYPE_NONCACHED << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT; + mutex_lock(&adev->srbm_mutex); + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + cik_srbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + WREG32(mmSH_MEM_CONFIG, sh_mem_config); + WREG32(mmSH_MEM_APE1_BASE, 1); + WREG32(mmSH_MEM_APE1_LIMIT, 0); + WREG32(mmSH_MEM_BASES, sh_mem_bases); + } + cik_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + +/** * gfx_v7_0_gpu_init - setup the 3D engine * * @adev: amdgpu_device pointer @@ -2230,6 +2270,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) cik_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + gmc_v7_0_init_compute_vmid(adev); + WREG32(mmSX_DEBUG_1, 0x20); WREG32(mmTA_CNTL_AUX, 0x00010000); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 14242bd33363..7b683fb2173c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1894,6 +1894,51 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev, mutex_unlock(&adev->grbm_idx_mutex); } +/** + * gmc_v8_0_init_compute_vmid - gart enable + * + * @rdev: amdgpu_device pointer + * + * Initialize compute vmid sh_mem registers + * + */ +#define DEFAULT_SH_MEM_BASES (0x6000) +#define FIRST_COMPUTE_VMID (8) +#define LAST_COMPUTE_VMID (16) +static void gmc_v8_0_init_compute_vmid(struct amdgpu_device *adev) +{ + int i; + uint32_t sh_mem_config; + uint32_t sh_mem_bases; + + /* + * Configure apertures: + * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB) + * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB) + * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB) + */ + sh_mem_bases = DEFAULT_SH_MEM_BASES | (DEFAULT_SH_MEM_BASES << 16); + + sh_mem_config = SH_MEM_ADDRESS_MODE_HSA64 << + SH_MEM_CONFIG__ADDRESS_MODE__SHIFT | + SH_MEM_ALIGNMENT_MODE_UNALIGNED << + SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT | + MTYPE_CC << SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT | + SH_MEM_CONFIG__PRIVATE_ATC_MASK; + + mutex_lock(&adev->srbm_mutex); + for (i = FIRST_COMPUTE_VMID; i < LAST_COMPUTE_VMID; i++) { + vi_srbm_select(adev, 0, 0, 0, i); + /* CP and shaders */ + WREG32(mmSH_MEM_CONFIG, sh_mem_config); + WREG32(mmSH_MEM_APE1_BASE, 1); + WREG32(mmSH_MEM_APE1_LIMIT, 0); + WREG32(mmSH_MEM_BASES, sh_mem_bases); + } + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); +} + static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) { u32 gb_addr_config; @@ -2113,6 +2158,8 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) vi_srbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); + gmc_v8_0_init_compute_vmid(adev); + mutex_lock(&adev->grbm_idx_mutex); /* * making sure that the following register writes will be broadcasted @@ -3081,7 +3128,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, AMDGPU_DOORBELL_KIQ << 2); WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, - AMDGPU_DOORBELL_MEC_RING7 << 2); + 0x7FFFF << 2); } tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, @@ -3097,6 +3144,12 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + ring->wptr = 0; + mqd->cp_hqd_pq_wptr = ring->wptr; + WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); + mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); + /* set the vmid for the queue */ mqd->cp_hqd_vmid = 0; WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index e3c1fde75363..7bb37b93993f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -439,6 +439,31 @@ static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) } /** + * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch + * + * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs context switch. + * + * Halt or unhalt the async dma engines context switch (VI). + */ +static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) +{ + u32 f32_cntl; + int i; + + for (i = 0; i < SDMA_MAX_INSTANCE; i++) { + f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); + if (enable) + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 1); + else + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, 0); + WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); + } +} + +/** * sdma_v3_0_enable - stop the async dma engines * * @adev: amdgpu_device pointer @@ -648,6 +673,8 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) /* unhalt the MEs */ sdma_v3_0_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v3_0_ctx_switch_enable(adev, true); /* start the gfx rings and rlc compute queues */ r = sdma_v3_0_gfx_resume(adev); @@ -1079,6 +1106,7 @@ static int sdma_v3_0_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + sdma_v3_0_ctx_switch_enable(adev, false); sdma_v3_0_enable(adev, false); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 90fc93c2c1d0..fa5a4448531d 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -1189,10 +1189,6 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) return -EINVAL; } - adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); - if (adev->ip_block_enabled == NULL) - return -ENOMEM; - return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 619dad1b2386..9daa2883ac18 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -516,17 +516,17 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, struct page *page_table; if (WARN_ON(!ppgtt->pdp.page_directory[pdpe])) - continue; + break; pd = ppgtt->pdp.page_directory[pdpe]; if (WARN_ON(!pd->page_table[pde])) - continue; + break; pt = pd->page_table[pde]; if (WARN_ON(!pt->page)) - continue; + break; page_table = pt->page; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f5edb3504167..2030f602cbf8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3491,6 +3491,7 @@ enum skl_disp_power_wells { #define BLM_POLARITY_PNV (1 << 0) /* pnv only */ #define BLC_HIST_CTL (dev_priv->info.display_mmio_offset + 0x61260) +#define BLM_HISTOGRAM_ENABLE (1 << 31) /* New registers for PCH-split platforms. Safe where new bits show up, the * register layout machtes with gen4 BLC_PWM_CTL[12]. */ diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index dcb1d25d6f05..1b61f9810387 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13303,6 +13303,16 @@ intel_check_primary_plane(struct drm_plane *plane, intel_crtc->atomic.wait_vblank = true; } + /* + * FIXME: Actually if we will still have any other plane enabled + * on the pipe we could let IPS enabled still, but for + * now lets consider that when we make primary invisible + * by setting DSPCNTR to 0 on update_primary_plane function + * IPS needs to be disable. + */ + if (!state->visible || !fb) + intel_crtc->atomic.disable_ips = true; + intel_crtc->atomic.fb_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); @@ -13400,6 +13410,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc) if (intel_crtc->atomic.disable_fbc) intel_fbc_disable(dev); + if (intel_crtc->atomic.disable_ips) + hsw_disable_ips(intel_crtc); + if (intel_crtc->atomic.pre_disable_primary) intel_pre_disable_primary(crtc); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 76afc62373d7..6e8faa253792 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -1140,6 +1140,9 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) static void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw) { + memset(&pipe_config->dpll_hw_state, 0, + sizeof(pipe_config->dpll_hw_state)); + switch (link_bw) { case DP_LINK_BW_1_62: pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 2afb31a46275..105928382e21 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -485,6 +485,7 @@ struct intel_crtc_atomic_commit { /* Sleepable operations to perform before commit */ bool wait_for_flips; bool disable_fbc; + bool disable_ips; bool pre_disable_primary; bool update_wm; unsigned disabled_planes; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 7d83527f95f7..55aad2322e10 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -907,6 +907,14 @@ static void i9xx_enable_backlight(struct intel_connector *connector) /* XXX: combine this into above write? */ intel_panel_actually_set_backlight(connector, panel->backlight.level); + + /* + * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is + * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2 + * that has backlight. + */ + if (IS_GEN2(dev)) + I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE); } static void i965_enable_backlight(struct intel_connector *connector) diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 0e690bf19fc9..af1ee517f372 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -555,10 +555,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, static inline void u_free(void *addr) { - if (!is_vmalloc_addr(addr)) - kfree(addr); - else - vfree(addr); + kvfree(addr); } static inline void * diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b0688b0c8908..4ecf5caa8c6d 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -4604,6 +4604,31 @@ void cik_compute_set_wptr(struct radeon_device *rdev, WDOORBELL32(ring->doorbell_index, ring->wptr); } +static void cik_compute_stop(struct radeon_device *rdev, + struct radeon_ring *ring) +{ + u32 j, tmp; + + cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); + /* Disable wptr polling. */ + tmp = RREG32(CP_PQ_WPTR_POLL_CNTL); + tmp &= ~WPTR_POLL_EN; + WREG32(CP_PQ_WPTR_POLL_CNTL, tmp); + /* Disable HQD. */ + if (RREG32(CP_HQD_ACTIVE) & 1) { + WREG32(CP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < rdev->usec_timeout; j++) { + if (!(RREG32(CP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32(CP_HQD_DEQUEUE_REQUEST, 0); + WREG32(CP_HQD_PQ_RPTR, 0); + WREG32(CP_HQD_PQ_WPTR, 0); + } + cik_srbm_select(rdev, 0, 0, 0, 0); +} + /** * cik_cp_compute_enable - enable/disable the compute CP MEs * @@ -4617,6 +4642,15 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) if (enable) WREG32(CP_MEC_CNTL, 0); else { + /* + * To make hibernation reliable we need to clear compute ring + * configuration before halting the compute ring. + */ + mutex_lock(&rdev->srbm_mutex); + cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]); + cik_compute_stop(rdev,&rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]); + mutex_unlock(&rdev->srbm_mutex); + WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index f86eb54e7763..d16f2eebd95e 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -268,6 +268,17 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) } rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; + + /* FIXME use something else than big hammer but after few days can not + * seem to find good combination so reset SDMA blocks as it seems we + * do not shut them down properly. This fix hibernation and does not + * affect suspend to ram. + */ + WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1); + (void)RREG32(SRBM_SOFT_RESET); + udelay(50); + WREG32(SRBM_SOFT_RESET, 0); + (void)RREG32(SRBM_SOFT_RESET); } /** diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index c89215275053..fa719c53449b 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -469,22 +469,22 @@ void radeon_audio_detect(struct drm_connector *connector, dig = radeon_encoder->enc_priv; if (status == connector_status_connected) { - struct radeon_connector *radeon_connector; - int sink_type; - if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) { radeon_encoder->audio = NULL; return; } - radeon_connector = to_radeon_connector(connector); - sink_type = radeon_dp_getsinktype(radeon_connector); + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + struct radeon_connector *radeon_connector = to_radeon_connector(connector); - if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && - sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) - radeon_encoder->audio = rdev->audio.dp_funcs; - else + if (radeon_dp_getsinktype(radeon_connector) == + CONNECTOR_OBJECT_ID_DISPLAYPORT) + radeon_encoder->audio = rdev->audio.dp_funcs; + else + radeon_encoder->audio = rdev->audio.hdmi_funcs; + } else { radeon_encoder->audio = rdev->audio.hdmi_funcs; + } dig->afmt->pin = radeon_audio_get_pin(connector->encoder); radeon_audio_enable(rdev, dig->afmt->pin, 0xf); diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index aeb676708e60..634793ea8418 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -257,7 +257,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; - info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index edafd3c2b170..06ac59fe332a 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -719,7 +719,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) return 0; if (gtt && gtt->userptr) { - ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); + ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index 3662157c2b15..ec10533a49b8 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -1129,12 +1129,12 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev, interval_tree_remove(&bo_va->it, &vm->va); spin_lock(&vm->status_lock); - if (list_empty(&bo_va->vm_status)) { + list_del(&bo_va->vm_status); + if (bo_va->it.start || bo_va->it.last) { bo_va->bo = radeon_bo_ref(bo_va->bo); list_add(&bo_va->vm_status, &vm->freed); } else { radeon_fence_unref(&bo_va->last_pt_update); - list_del(&bo_va->vm_status); kfree(bo_va); } spin_unlock(&vm->status_lock); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 3962176ee713..01b558fe3695 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -21,6 +21,7 @@ #include <drm/drm_fb_helper.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> +#include <linux/module.h> #include <linux/of_graph.h> #include <linux/component.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 4557f335a8a5..dc65161d7cad 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -19,6 +19,7 @@ #include <drm/drm_plane_helper.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h index 9d056417d88c..f9aaf37262be 100644 --- a/drivers/hsi/controllers/omap_ssi.h +++ b/drivers/hsi/controllers/omap_ssi.h @@ -24,6 +24,7 @@ #define __LINUX_HSI_OMAP_SSI_H__ #include <linux/device.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/hsi/hsi.h> #include <linux/gpio.h> diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index 8d594517cd29..7ffc748cb973 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c @@ -245,10 +245,7 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ipz_queue_ctor_exit0: ehca_gen_err("Couldn't alloc pages queue=%p " "nr_of_pages=%x", queue, nr_of_pages); - if (is_vmalloc_addr(queue->queue_pages)) - vfree(queue->queue_pages); - else - kfree(queue->queue_pages); + kvfree(queue->queue_pages); return 0; } @@ -270,10 +267,7 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) free_page((unsigned long)queue->queue_pages[i]); } - if (is_vmalloc_addr(queue->queue_pages)) - vfree(queue->queue_pages); - else - kfree(queue->queue_pages); + kvfree(queue->queue_pages); return 1; } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d3e5e9abe3b6..a57e9b749895 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -117,6 +117,7 @@ struct kmem_cache *amd_iommu_irq_cache; static void update_domain(struct protection_domain *domain); static int alloc_passthrough_domain(void); +static int protection_domain_init(struct protection_domain *domain); /**************************************************************************** * @@ -1881,12 +1882,9 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) if (!dma_dom) return NULL; - spin_lock_init(&dma_dom->domain.lock); - - dma_dom->domain.id = domain_id_alloc(); - if (dma_dom->domain.id == 0) + if (protection_domain_init(&dma_dom->domain)) goto free_dma_dom; - INIT_LIST_HEAD(&dma_dom->domain.dev_list); + dma_dom->domain.mode = PAGE_MODE_2_LEVEL; dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); dma_dom->domain.flags = PD_DMA_OPS_MASK; @@ -2916,6 +2914,18 @@ static void protection_domain_free(struct protection_domain *domain) kfree(domain); } +static int protection_domain_init(struct protection_domain *domain) +{ + spin_lock_init(&domain->lock); + mutex_init(&domain->api_lock); + domain->id = domain_id_alloc(); + if (!domain->id) + return -ENOMEM; + INIT_LIST_HEAD(&domain->dev_list); + + return 0; +} + static struct protection_domain *protection_domain_alloc(void) { struct protection_domain *domain; @@ -2924,12 +2934,8 @@ static struct protection_domain *protection_domain_alloc(void) if (!domain) return NULL; - spin_lock_init(&domain->lock); - mutex_init(&domain->api_lock); - domain->id = domain_id_alloc(); - if (!domain->id) + if (protection_domain_init(domain)) goto out_err; - INIT_LIST_HEAD(&domain->dev_list); add_domain_to_list(domain); diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index f14130121298..8e9ec81ce4bb 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -1389,8 +1389,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_device *smmu = smmu_domain->smmu; - if (smmu_domain->pgtbl_ops) - free_io_pgtable_ops(smmu_domain->pgtbl_ops); + free_io_pgtable_ops(smmu_domain->pgtbl_ops); /* Free the CD and ASID, if we allocated them */ if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index dce041b1c139..4cd0c29cb585 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -1566,7 +1566,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) return -ENODEV; } - if ((id & ID0_S1TS) && ((smmu->version == 1) || (id & ID0_ATOSNS))) { + if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) { smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; dev_notice(smmu->dev, "\taddress translation ops\n"); } diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 49e7542510d1..f286090931cc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -847,13 +847,24 @@ static int add_iommu_group(struct device *dev, void *data) { struct iommu_callback_data *cb = data; const struct iommu_ops *ops = cb->ops; + int ret; if (!ops->add_device) return 0; WARN_ON(dev->iommu_group); - return ops->add_device(dev); + ret = ops->add_device(dev); + + /* + * We ignore -ENODEV errors for now, as they just mean that the + * device is not translated by an IOMMU. We still care about + * other errors and fail to initialize when they happen. + */ + if (ret == -ENODEV) + ret = 0; + + return ret; } static int remove_iommu_group(struct device *dev, void *data) diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 4191614c4651..9ad35f72ab4c 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -39,6 +39,32 @@ config LEDS_88PM860X This option enables support for on-chip LED drivers found on Marvell Semiconductor 88PM8606 PMIC. +config LEDS_AAT1290 + tristate "LED support for the AAT1290" + depends on LEDS_CLASS_FLASH + depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS + depends on GPIOLIB + depends on OF + depends on PINCTRL + help + This option enables support for the LEDs on the AAT1290. + +config LEDS_BCM6328 + tristate "LED Support for Broadcom BCM6328" + depends on LEDS_CLASS + depends on OF + help + This option enables support for LEDs connected to the BCM6328 + LED HW controller accessed via MMIO registers. + +config LEDS_BCM6358 + tristate "LED Support for Broadcom BCM6358" + depends on LEDS_CLASS + depends on OF + help + This option enables support for LEDs connected to the BCM6358 + LED HW controller accessed via MMIO registers. + config LEDS_LM3530 tristate "LCD Backlight driver for LM3530" depends on LEDS_CLASS @@ -179,7 +205,7 @@ config LEDS_PCA9532_GPIO config LEDS_GPIO tristate "LED Support for GPIO connected LEDs" depends on LEDS_CLASS - depends on GPIOLIB + depends on GPIOLIB || COMPILE_TEST help This option enables support for the LEDs connected to GPIO outputs. To be useful the particular board must have LEDs @@ -203,6 +229,7 @@ config LEDS_LP55XX_COMMON tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501" depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501 select FW_LOADER + select FW_LOADER_USER_HELPER_FALLBACK help This option supports common operations for LP5521/5523/55231/5562/8501 devices. @@ -464,6 +491,25 @@ config LEDS_TCA6507 LED driver chips accessed via the I2C bus. Driver support brightness control and hardware-assisted blinking. +config LEDS_TLC591XX + tristate "LED driver for TLC59108 and TLC59116 controllers" + depends on LEDS_CLASS && I2C + select REGMAP_I2C + help + This option enables support for Texas Instruments TLC59108 + and TLC59116 LED controllers. + +config LEDS_MAX77693 + tristate "LED support for MAX77693 Flash" + depends on LEDS_CLASS_FLASH + depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS + depends on MFD_MAX77693 + depends on OF + help + This option enables support for the flash part of the MAX77693 + multifunction device. It has build in control for two leds in flash + and torch mode. + config LEDS_MAX8997 tristate "LED support for MAX8997 PMIC" depends on LEDS_CLASS && MFD_MAX8997 @@ -495,6 +541,15 @@ config LEDS_MENF21BMC This driver can also be built as a module. If so the module will be called leds-menf21bmc. +config LEDS_KTD2692 + tristate "LED support for KTD2692 flash LED controller" + depends on LEDS_CLASS_FLASH && GPIOLIB && OF + help + This option enables support for KTD2692 LED flash connected + through ExpressWire interface. + + Say Y to enable this driver. + comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)" config LEDS_BLINKM diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index bf4609338e10..8d6a24a2f513 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -7,6 +7,9 @@ obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o # LED Platform Drivers obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o +obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o +obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o +obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o @@ -31,6 +34,7 @@ obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o +obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o @@ -52,6 +56,7 @@ obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o obj-$(CONFIG_LEDS_NS2) += leds-ns2.o obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o +obj-$(CONFIG_LEDS_MAX77693) += leds-max77693.o obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o @@ -59,6 +64,7 @@ obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o obj-$(CONFIG_LEDS_VERSATILE) += leds-versatile.o obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o obj-$(CONFIG_LEDS_PM8941_WLED) += leds-pm8941-wled.o +obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o # LED SPI Drivers obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 7fb2a19ac649..beabfbc6f7cd 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -121,6 +121,11 @@ static void led_timer_function(unsigned long data) brightness = led_get_brightness(led_cdev); if (!brightness) { /* Time to switch the LED on. */ + if (led_cdev->delayed_set_value) { + led_cdev->blink_brightness = + led_cdev->delayed_set_value; + led_cdev->delayed_set_value = 0; + } brightness = led_cdev->blink_brightness; delay = led_cdev->blink_delay_on; } else { diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 9886dace5ad2..549de7e24cfd 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -119,10 +119,11 @@ void led_set_brightness(struct led_classdev *led_cdev, { int ret = 0; - /* delay brightness setting if need to stop soft-blink timer */ + /* delay brightness if soft-blink is active */ if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { led_cdev->delayed_set_value = brightness; - schedule_work(&led_cdev->set_brightness_work); + if (brightness == LED_OFF) + schedule_work(&led_cdev->set_brightness_work); return; } diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c new file mode 100644 index 000000000000..fd7c25fd29c1 --- /dev/null +++ b/drivers/leds/leds-aat1290.c @@ -0,0 +1,576 @@ +/* + * LED Flash class driver for the AAT1290 + * 1.5A Step-Up Current Regulator for Flash LEDs + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * Author: Jacek Anaszewski <[email protected]> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/led-class-flash.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <media/v4l2-flash-led-class.h> + +#define AAT1290_MOVIE_MODE_CURRENT_ADDR 17 +#define AAT1290_MAX_MM_CURR_PERCENT_0 16 +#define AAT1290_MAX_MM_CURR_PERCENT_100 1 + +#define AAT1290_FLASH_SAFETY_TIMER_ADDR 18 + +#define AAT1290_MOVIE_MODE_CONFIG_ADDR 19 +#define AAT1290_MOVIE_MODE_OFF 1 +#define AAT1290_MOVIE_MODE_ON 3 + +#define AAT1290_MM_CURRENT_RATIO_ADDR 20 +#define AAT1290_MM_TO_FL_1_92 1 + +#define AAT1290_MM_TO_FL_RATIO 1000 / 1920 +#define AAT1290_MAX_MM_CURRENT(fl_max) (fl_max * AAT1290_MM_TO_FL_RATIO) + +#define AAT1290_LATCH_TIME_MIN_US 500 +#define AAT1290_LATCH_TIME_MAX_US 1000 +#define AAT1290_EN_SET_TICK_TIME_US 1 +#define AAT1290_FLEN_OFF_DELAY_TIME_US 10 +#define AAT1290_FLASH_TM_NUM_LEVELS 16 +#define AAT1290_MM_CURRENT_SCALE_SIZE 15 + + +struct aat1290_led_config_data { + /* maximum LED current in movie mode */ + u32 max_mm_current; + /* maximum LED current in flash mode */ + u32 max_flash_current; + /* maximum flash timeout */ + u32 max_flash_tm; + /* external strobe capability */ + bool has_external_strobe; + /* max LED brightness level */ + enum led_brightness max_brightness; +}; + +struct aat1290_led { + /* platform device data */ + struct platform_device *pdev; + /* secures access to the device */ + struct mutex lock; + + /* corresponding LED Flash class device */ + struct led_classdev_flash fled_cdev; + /* V4L2 Flash device */ + struct v4l2_flash *v4l2_flash; + + /* FLEN pin */ + struct gpio_desc *gpio_fl_en; + /* EN|SET pin */ + struct gpio_desc *gpio_en_set; + /* movie mode current scale */ + int *mm_current_scale; + /* device mode */ + bool movie_mode; + + /* brightness cache */ + unsigned int torch_brightness; + /* assures led-triggers compatibility */ + struct work_struct work_brightness_set; +}; + +static struct aat1290_led *fled_cdev_to_led( + struct led_classdev_flash *fled_cdev) +{ + return container_of(fled_cdev, struct aat1290_led, fled_cdev); +} + +static void aat1290_as2cwire_write(struct aat1290_led *led, int addr, int value) +{ + int i; + + gpiod_direction_output(led->gpio_fl_en, 0); + gpiod_direction_output(led->gpio_en_set, 0); + + udelay(AAT1290_FLEN_OFF_DELAY_TIME_US); + + /* write address */ + for (i = 0; i < addr; ++i) { + udelay(AAT1290_EN_SET_TICK_TIME_US); + gpiod_direction_output(led->gpio_en_set, 0); + udelay(AAT1290_EN_SET_TICK_TIME_US); + gpiod_direction_output(led->gpio_en_set, 1); + } + + usleep_range(AAT1290_LATCH_TIME_MIN_US, AAT1290_LATCH_TIME_MAX_US); + + /* write data */ + for (i = 0; i < value; ++i) { + udelay(AAT1290_EN_SET_TICK_TIME_US); + gpiod_direction_output(led->gpio_en_set, 0); + udelay(AAT1290_EN_SET_TICK_TIME_US); + gpiod_direction_output(led->gpio_en_set, 1); + } + + usleep_range(AAT1290_LATCH_TIME_MIN_US, AAT1290_LATCH_TIME_MAX_US); +} + +static void aat1290_set_flash_safety_timer(struct aat1290_led *led, + unsigned int micro_sec) +{ + struct led_classdev_flash *fled_cdev = &led->fled_cdev; + struct led_flash_setting *flash_tm = &fled_cdev->timeout; + int flash_tm_reg = AAT1290_FLASH_TM_NUM_LEVELS - + (micro_sec / flash_tm->step) + 1; + + aat1290_as2cwire_write(led, AAT1290_FLASH_SAFETY_TIMER_ADDR, + flash_tm_reg); +} + +static void aat1290_brightness_set(struct aat1290_led *led, + enum led_brightness brightness) +{ + mutex_lock(&led->lock); + + if (brightness == 0) { + gpiod_direction_output(led->gpio_fl_en, 0); + gpiod_direction_output(led->gpio_en_set, 0); + led->movie_mode = false; + } else { + if (!led->movie_mode) { + aat1290_as2cwire_write(led, + AAT1290_MM_CURRENT_RATIO_ADDR, + AAT1290_MM_TO_FL_1_92); + led->movie_mode = true; + } + + aat1290_as2cwire_write(led, AAT1290_MOVIE_MODE_CURRENT_ADDR, + AAT1290_MAX_MM_CURR_PERCENT_0 - brightness); + aat1290_as2cwire_write(led, AAT1290_MOVIE_MODE_CONFIG_ADDR, + AAT1290_MOVIE_MODE_ON); + } + + mutex_unlock(&led->lock); +} + +/* LED subsystem callbacks */ + +static void aat1290_brightness_set_work(struct work_struct *work) +{ + struct aat1290_led *led = + container_of(work, struct aat1290_led, work_brightness_set); + + aat1290_brightness_set(led, led->torch_brightness); +} + +static void aat1290_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct aat1290_led *led = fled_cdev_to_led(fled_cdev); + + led->torch_brightness = brightness; + schedule_work(&led->work_brightness_set); +} + +static int aat1290_led_brightness_set_sync(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct aat1290_led *led = fled_cdev_to_led(fled_cdev); + + aat1290_brightness_set(led, brightness); + + return 0; +} + +static int aat1290_led_flash_strobe_set(struct led_classdev_flash *fled_cdev, + bool state) + +{ + struct aat1290_led *led = fled_cdev_to_led(fled_cdev); + struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct led_flash_setting *timeout = &fled_cdev->timeout; + + mutex_lock(&led->lock); + + if (state) { + aat1290_set_flash_safety_timer(led, timeout->val); + gpiod_direction_output(led->gpio_fl_en, 1); + } else { + gpiod_direction_output(led->gpio_fl_en, 0); + gpiod_direction_output(led->gpio_en_set, 0); + } + + /* + * To reenter movie mode after a flash event the part must be cycled + * off and back on to reset the movie mode and reprogrammed via the + * AS2Cwire. Therefore the brightness and movie_mode properties needs + * to be updated here to reflect the actual state. + */ + led_cdev->brightness = 0; + led->movie_mode = false; + + mutex_unlock(&led->lock); + + return 0; +} + +static int aat1290_led_flash_timeout_set(struct led_classdev_flash *fled_cdev, + u32 timeout) +{ + /* + * Don't do anything - flash timeout is cached in the led-class-flash + * core and will be applied in the strobe_set op, as writing the + * safety timer register spuriously turns the torch mode on. + */ + + return 0; +} + +static int aat1290_led_parse_dt(struct aat1290_led *led, + struct aat1290_led_config_data *cfg, + struct device_node **sub_node) +{ + struct led_classdev *led_cdev = &led->fled_cdev.led_cdev; + struct device *dev = &led->pdev->dev; + struct device_node *child_node; +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) + struct pinctrl *pinctrl; +#endif + int ret = 0; + + led->gpio_fl_en = devm_gpiod_get(dev, "flen", GPIOD_ASIS); + if (IS_ERR(led->gpio_fl_en)) { + ret = PTR_ERR(led->gpio_fl_en); + dev_err(dev, "Unable to claim gpio \"flen\".\n"); + return ret; + } + + led->gpio_en_set = devm_gpiod_get(dev, "enset", GPIOD_ASIS); + if (IS_ERR(led->gpio_en_set)) { + ret = PTR_ERR(led->gpio_en_set); + dev_err(dev, "Unable to claim gpio \"enset\".\n"); + return ret; + } + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) + pinctrl = devm_pinctrl_get_select_default(&led->pdev->dev); + if (IS_ERR(pinctrl)) { + cfg->has_external_strobe = false; + dev_info(dev, + "No support for external strobe detected.\n"); + } else { + cfg->has_external_strobe = true; + } +#endif + + child_node = of_get_next_available_child(dev->of_node, NULL); + if (!child_node) { + dev_err(dev, "No DT child node found for connected LED.\n"); + return -EINVAL; + } + + led_cdev->name = of_get_property(child_node, "label", NULL) ? : + child_node->name; + + ret = of_property_read_u32(child_node, "led-max-microamp", + &cfg->max_mm_current); + /* + * led-max-microamp will default to 1/20 of flash-max-microamp + * in case it is missing. + */ + if (ret < 0) + dev_warn(dev, + "led-max-microamp DT property missing\n"); + + ret = of_property_read_u32(child_node, "flash-max-microamp", + &cfg->max_flash_current); + if (ret < 0) { + dev_err(dev, + "flash-max-microamp DT property missing\n"); + return ret; + } + + ret = of_property_read_u32(child_node, "flash-max-timeout-us", + &cfg->max_flash_tm); + if (ret < 0) { + dev_err(dev, + "flash-max-timeout-us DT property missing\n"); + return ret; + } + + of_node_put(child_node); + + *sub_node = child_node; + + return ret; +} + +static void aat1290_led_validate_mm_current(struct aat1290_led *led, + struct aat1290_led_config_data *cfg) +{ + int i, b = 0, e = AAT1290_MM_CURRENT_SCALE_SIZE; + + while (e - b > 1) { + i = b + (e - b) / 2; + if (cfg->max_mm_current < led->mm_current_scale[i]) + e = i; + else + b = i; + } + + cfg->max_mm_current = led->mm_current_scale[b]; + cfg->max_brightness = b + 1; +} + +int init_mm_current_scale(struct aat1290_led *led, + struct aat1290_led_config_data *cfg) +{ + int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56, + 63, 71, 79, 89, 100 }; + int i, max_mm_current = + AAT1290_MAX_MM_CURRENT(cfg->max_flash_current); + + led->mm_current_scale = devm_kzalloc(&led->pdev->dev, + sizeof(max_mm_current_percent), + GFP_KERNEL); + if (!led->mm_current_scale) + return -ENOMEM; + + for (i = 0; i < AAT1290_MM_CURRENT_SCALE_SIZE; ++i) + led->mm_current_scale[i] = max_mm_current * + max_mm_current_percent[i] / 100; + + return 0; +} + +static int aat1290_led_get_configuration(struct aat1290_led *led, + struct aat1290_led_config_data *cfg, + struct device_node **sub_node) +{ + int ret; + + ret = aat1290_led_parse_dt(led, cfg, sub_node); + if (ret < 0) + return ret; + /* + * Init non-linear movie mode current scale basing + * on the max flash current from led configuration. + */ + ret = init_mm_current_scale(led, cfg); + if (ret < 0) + return ret; + + aat1290_led_validate_mm_current(led, cfg); + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) +#else + devm_kfree(&led->pdev->dev, led->mm_current_scale); +#endif + + return 0; +} + +static void aat1290_init_flash_timeout(struct aat1290_led *led, + struct aat1290_led_config_data *cfg) +{ + struct led_classdev_flash *fled_cdev = &led->fled_cdev; + struct led_flash_setting *setting; + + /* Init flash timeout setting */ + setting = &fled_cdev->timeout; + setting->min = cfg->max_flash_tm / AAT1290_FLASH_TM_NUM_LEVELS; + setting->max = cfg->max_flash_tm; + setting->step = setting->min; + setting->val = setting->max; +} + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) +static enum led_brightness aat1290_intensity_to_brightness( + struct v4l2_flash *v4l2_flash, + s32 intensity) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct aat1290_led *led = fled_cdev_to_led(fled_cdev); + int i; + + for (i = AAT1290_MM_CURRENT_SCALE_SIZE - 1; i >= 0; --i) + if (intensity >= led->mm_current_scale[i]) + return i + 1; + + return 1; +} + +static s32 aat1290_brightness_to_intensity(struct v4l2_flash *v4l2_flash, + enum led_brightness brightness) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct aat1290_led *led = fled_cdev_to_led(fled_cdev); + + return led->mm_current_scale[brightness - 1]; +} + +static int aat1290_led_external_strobe_set(struct v4l2_flash *v4l2_flash, + bool enable) +{ + struct aat1290_led *led = fled_cdev_to_led(v4l2_flash->fled_cdev); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct pinctrl *pinctrl; + + gpiod_direction_output(led->gpio_fl_en, 0); + gpiod_direction_output(led->gpio_en_set, 0); + + led->movie_mode = false; + led_cdev->brightness = 0; + + pinctrl = devm_pinctrl_get_select(&led->pdev->dev, + enable ? "isp" : "host"); + if (IS_ERR(pinctrl)) { + dev_warn(&led->pdev->dev, "Unable to switch strobe source.\n"); + return PTR_ERR(pinctrl); + } + + return 0; +} + +static void aat1290_init_v4l2_flash_config(struct aat1290_led *led, + struct aat1290_led_config_data *led_cfg, + struct v4l2_flash_config *v4l2_sd_cfg) +{ + struct led_classdev *led_cdev = &led->fled_cdev.led_cdev; + struct led_flash_setting *s; + + strlcpy(v4l2_sd_cfg->dev_name, led_cdev->name, + sizeof(v4l2_sd_cfg->dev_name)); + + s = &v4l2_sd_cfg->torch_intensity; + s->min = led->mm_current_scale[0]; + s->max = led_cfg->max_mm_current; + s->step = 1; + s->val = s->max; + + v4l2_sd_cfg->has_external_strobe = led_cfg->has_external_strobe; +} + +static const struct v4l2_flash_ops v4l2_flash_ops = { + .external_strobe_set = aat1290_led_external_strobe_set, + .intensity_to_led_brightness = aat1290_intensity_to_brightness, + .led_brightness_to_intensity = aat1290_brightness_to_intensity, +}; +#else +static inline void aat1290_init_v4l2_flash_config(struct aat1290_led *led, + struct aat1290_led_config_data *led_cfg, + struct v4l2_flash_config *v4l2_sd_cfg) +{ +} +static const struct v4l2_flash_ops v4l2_flash_ops; +#endif + +static const struct led_flash_ops flash_ops = { + .strobe_set = aat1290_led_flash_strobe_set, + .timeout_set = aat1290_led_flash_timeout_set, +}; + +static int aat1290_led_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *sub_node = NULL; + struct aat1290_led *led; + struct led_classdev *led_cdev; + struct led_classdev_flash *fled_cdev; + struct aat1290_led_config_data led_cfg = {}; + struct v4l2_flash_config v4l2_sd_cfg = {}; + int ret; + + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + + led->pdev = pdev; + platform_set_drvdata(pdev, led); + + fled_cdev = &led->fled_cdev; + fled_cdev->ops = &flash_ops; + led_cdev = &fled_cdev->led_cdev; + + ret = aat1290_led_get_configuration(led, &led_cfg, &sub_node); + if (ret < 0) + return ret; + + mutex_init(&led->lock); + + /* Initialize LED Flash class device */ + led_cdev->brightness_set = aat1290_led_brightness_set; + led_cdev->brightness_set_sync = aat1290_led_brightness_set_sync; + led_cdev->max_brightness = led_cfg.max_brightness; + led_cdev->flags |= LED_DEV_CAP_FLASH; + INIT_WORK(&led->work_brightness_set, aat1290_brightness_set_work); + + aat1290_init_flash_timeout(led, &led_cfg); + + /* Register LED Flash class device */ + ret = led_classdev_flash_register(&pdev->dev, fled_cdev); + if (ret < 0) + goto err_flash_register; + + aat1290_init_v4l2_flash_config(led, &led_cfg, &v4l2_sd_cfg); + + /* Create V4L2 Flash subdev. */ + led->v4l2_flash = v4l2_flash_init(dev, sub_node, fled_cdev, NULL, + &v4l2_flash_ops, &v4l2_sd_cfg); + if (IS_ERR(led->v4l2_flash)) { + ret = PTR_ERR(led->v4l2_flash); + goto error_v4l2_flash_init; + } + + return 0; + +error_v4l2_flash_init: + led_classdev_flash_unregister(fled_cdev); +err_flash_register: + mutex_destroy(&led->lock); + + return ret; +} + +static int aat1290_led_remove(struct platform_device *pdev) +{ + struct aat1290_led *led = platform_get_drvdata(pdev); + + v4l2_flash_release(led->v4l2_flash); + led_classdev_flash_unregister(&led->fled_cdev); + cancel_work_sync(&led->work_brightness_set); + + mutex_destroy(&led->lock); + + return 0; +} + +static const struct of_device_id aat1290_led_dt_match[] = { + { .compatible = "skyworks,aat1290" }, + {}, +}; + +static struct platform_driver aat1290_led_driver = { + .probe = aat1290_led_probe, + .remove = aat1290_led_remove, + .driver = { + .name = "aat1290", + .of_match_table = aat1290_led_dt_match, + }, +}; + +module_platform_driver(aat1290_led_driver); + +MODULE_AUTHOR("Jacek Anaszewski <[email protected]>"); +MODULE_DESCRIPTION("Skyworks Current Regulator for Flash LEDs"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c new file mode 100644 index 000000000000..986fe1e28f84 --- /dev/null +++ b/drivers/leds/leds-bcm6328.c @@ -0,0 +1,413 @@ +/* + * Driver for BCM6328 memory-mapped LEDs, based on leds-syscon.c + * + * Copyright 2015 Álvaro Fernández Rojas <[email protected]> + * Copyright 2015 Jonas Gorski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/io.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#define BCM6328_REG_INIT 0x00 +#define BCM6328_REG_MODE_HI 0x04 +#define BCM6328_REG_MODE_LO 0x08 +#define BCM6328_REG_HWDIS 0x0c +#define BCM6328_REG_STROBE 0x10 +#define BCM6328_REG_LNKACTSEL_HI 0x14 +#define BCM6328_REG_LNKACTSEL_LO 0x18 +#define BCM6328_REG_RBACK 0x1c +#define BCM6328_REG_SERMUX 0x20 + +#define BCM6328_LED_MAX_COUNT 24 +#define BCM6328_LED_DEF_DELAY 500 +#define BCM6328_LED_INTERVAL_MS 20 + +#define BCM6328_LED_INTV_MASK 0x3f +#define BCM6328_LED_FAST_INTV_SHIFT 6 +#define BCM6328_LED_FAST_INTV_MASK (BCM6328_LED_INTV_MASK << \ + BCM6328_LED_FAST_INTV_SHIFT) +#define BCM6328_SERIAL_LED_EN BIT(12) +#define BCM6328_SERIAL_LED_MUX BIT(13) +#define BCM6328_SERIAL_LED_CLK_NPOL BIT(14) +#define BCM6328_SERIAL_LED_DATA_PPOL BIT(15) +#define BCM6328_SERIAL_LED_SHIFT_DIR BIT(16) +#define BCM6328_LED_SHIFT_TEST BIT(30) +#define BCM6328_LED_TEST BIT(31) + +#define BCM6328_LED_MODE_MASK 3 +#define BCM6328_LED_MODE_OFF 0 +#define BCM6328_LED_MODE_FAST 1 +#define BCM6328_LED_MODE_BLINK 2 +#define BCM6328_LED_MODE_ON 3 +#define BCM6328_LED_SHIFT(X) ((X) << 1) + +/** + * struct bcm6328_led - state container for bcm6328 based LEDs + * @cdev: LED class device for this LED + * @mem: memory resource + * @lock: memory lock + * @pin: LED pin number + * @blink_leds: blinking LEDs + * @blink_delay: blinking delay + * @active_low: LED is active low + */ +struct bcm6328_led { + struct led_classdev cdev; + void __iomem *mem; + spinlock_t *lock; + unsigned long pin; + unsigned long *blink_leds; + unsigned long *blink_delay; + bool active_low; +}; + +static void bcm6328_led_write(void __iomem *reg, unsigned long data) +{ + iowrite32be(data, reg); +} + +static unsigned long bcm6328_led_read(void __iomem *reg) +{ + return ioread32be(reg); +} + +/** + * LEDMode 64 bits / 24 LEDs + * bits [31:0] -> LEDs 8-23 + * bits [47:32] -> LEDs 0-7 + * bits [63:48] -> unused + */ +static unsigned long bcm6328_pin2shift(unsigned long pin) +{ + if (pin < 8) + return pin + 16; /* LEDs 0-7 (bits 47:32) */ + else + return pin - 8; /* LEDs 8-23 (bits 31:0) */ +} + +static void bcm6328_led_mode(struct bcm6328_led *led, unsigned long value) +{ + void __iomem *mode; + unsigned long val, shift; + + shift = bcm6328_pin2shift(led->pin); + if (shift / 16) + mode = led->mem + BCM6328_REG_MODE_HI; + else + mode = led->mem + BCM6328_REG_MODE_LO; + + val = bcm6328_led_read(mode); + val &= ~(BCM6328_LED_MODE_MASK << BCM6328_LED_SHIFT(shift % 16)); + val |= (value << BCM6328_LED_SHIFT(shift % 16)); + bcm6328_led_write(mode, val); +} + +static void bcm6328_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct bcm6328_led *led = + container_of(led_cdev, struct bcm6328_led, cdev); + unsigned long flags; + + spin_lock_irqsave(led->lock, flags); + *(led->blink_leds) &= ~BIT(led->pin); + if ((led->active_low && value == LED_OFF) || + (!led->active_low && value != LED_OFF)) + bcm6328_led_mode(led, BCM6328_LED_MODE_OFF); + else + bcm6328_led_mode(led, BCM6328_LED_MODE_ON); + spin_unlock_irqrestore(led->lock, flags); +} + +static int bcm6328_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, unsigned long *delay_off) +{ + struct bcm6328_led *led = + container_of(led_cdev, struct bcm6328_led, cdev); + unsigned long delay, flags; + + if (!*delay_on) + *delay_on = BCM6328_LED_DEF_DELAY; + if (!*delay_off) + *delay_off = BCM6328_LED_DEF_DELAY; + + if (*delay_on != *delay_off) { + dev_dbg(led_cdev->dev, + "fallback to soft blinking (delay_on != delay_off)\n"); + return -EINVAL; + } + + delay = *delay_on / BCM6328_LED_INTERVAL_MS; + if (delay == 0) + delay = 1; + else if (delay > BCM6328_LED_INTV_MASK) { + dev_dbg(led_cdev->dev, + "fallback to soft blinking (delay > %ums)\n", + BCM6328_LED_INTV_MASK * BCM6328_LED_INTERVAL_MS); + return -EINVAL; + } + + spin_lock_irqsave(led->lock, flags); + if (*(led->blink_leds) == 0 || + *(led->blink_leds) == BIT(led->pin) || + *(led->blink_delay) == delay) { + unsigned long val; + + *(led->blink_leds) |= BIT(led->pin); + *(led->blink_delay) = delay; + + val = bcm6328_led_read(led->mem + BCM6328_REG_INIT); + val &= ~BCM6328_LED_FAST_INTV_MASK; + val |= (delay << BCM6328_LED_FAST_INTV_SHIFT); + bcm6328_led_write(led->mem + BCM6328_REG_INIT, val); + + bcm6328_led_mode(led, BCM6328_LED_MODE_BLINK); + + spin_unlock_irqrestore(led->lock, flags); + } else { + spin_unlock_irqrestore(led->lock, flags); + dev_dbg(led_cdev->dev, + "fallback to soft blinking (delay already set)\n"); + return -EINVAL; + } + + return 0; +} + +static int bcm6328_hwled(struct device *dev, struct device_node *nc, u32 reg, + void __iomem *mem, spinlock_t *lock) +{ + int i, cnt; + unsigned long flags, val; + + spin_lock_irqsave(lock, flags); + val = bcm6328_led_read(mem + BCM6328_REG_HWDIS); + val &= ~BIT(reg); + bcm6328_led_write(mem + BCM6328_REG_HWDIS, val); + spin_unlock_irqrestore(lock, flags); + + /* Only LEDs 0-7 can be activity/link controlled */ + if (reg >= 8) + return 0; + + cnt = of_property_count_elems_of_size(nc, "brcm,link-signal-sources", + sizeof(u32)); + for (i = 0; i < cnt; i++) { + u32 sel; + void __iomem *addr; + + if (reg < 4) + addr = mem + BCM6328_REG_LNKACTSEL_LO; + else + addr = mem + BCM6328_REG_LNKACTSEL_HI; + + of_property_read_u32_index(nc, "brcm,link-signal-sources", i, + &sel); + + if (reg / 4 != sel / 4) { + dev_warn(dev, "invalid link signal source\n"); + continue; + } + + spin_lock_irqsave(lock, flags); + val = bcm6328_led_read(addr); + val |= (BIT(reg) << (((sel % 4) * 4) + 16)); + bcm6328_led_write(addr, val); + spin_unlock_irqrestore(lock, flags); + } + + cnt = of_property_count_elems_of_size(nc, + "brcm,activity-signal-sources", + sizeof(u32)); + for (i = 0; i < cnt; i++) { + u32 sel; + void __iomem *addr; + + if (reg < 4) + addr = mem + BCM6328_REG_LNKACTSEL_LO; + else + addr = mem + BCM6328_REG_LNKACTSEL_HI; + + of_property_read_u32_index(nc, "brcm,activity-signal-sources", + i, &sel); + + if (reg / 4 != sel / 4) { + dev_warn(dev, "invalid activity signal source\n"); + continue; + } + + spin_lock_irqsave(lock, flags); + val = bcm6328_led_read(addr); + val |= (BIT(reg) << ((sel % 4) * 4)); + bcm6328_led_write(addr, val); + spin_unlock_irqrestore(lock, flags); + } + + return 0; +} + +static int bcm6328_led(struct device *dev, struct device_node *nc, u32 reg, + void __iomem *mem, spinlock_t *lock, + unsigned long *blink_leds, unsigned long *blink_delay) +{ + struct bcm6328_led *led; + unsigned long flags; + const char *state; + int rc; + + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + + led->pin = reg; + led->mem = mem; + led->lock = lock; + led->blink_leds = blink_leds; + led->blink_delay = blink_delay; + + if (of_property_read_bool(nc, "active-low")) + led->active_low = true; + + led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name; + led->cdev.default_trigger = of_get_property(nc, + "linux,default-trigger", + NULL); + + if (!of_property_read_string(nc, "default-state", &state)) { + spin_lock_irqsave(lock, flags); + if (!strcmp(state, "on")) { + led->cdev.brightness = LED_FULL; + bcm6328_led_mode(led, BCM6328_LED_MODE_ON); + } else if (!strcmp(state, "keep")) { + void __iomem *mode; + unsigned long val, shift; + + shift = bcm6328_pin2shift(led->pin); + if (shift / 16) + mode = mem + BCM6328_REG_MODE_HI; + else + mode = mem + BCM6328_REG_MODE_LO; + + val = bcm6328_led_read(mode) >> (shift % 16); + val &= BCM6328_LED_MODE_MASK; + if (val == BCM6328_LED_MODE_ON) + led->cdev.brightness = LED_FULL; + else { + led->cdev.brightness = LED_OFF; + bcm6328_led_mode(led, BCM6328_LED_MODE_OFF); + } + } else { + led->cdev.brightness = LED_OFF; + bcm6328_led_mode(led, BCM6328_LED_MODE_OFF); + } + spin_unlock_irqrestore(lock, flags); + } + + led->cdev.brightness_set = bcm6328_led_set; + led->cdev.blink_set = bcm6328_blink_set; + + rc = led_classdev_register(dev, &led->cdev); + if (rc < 0) + return rc; + + dev_dbg(dev, "registered LED %s\n", led->cdev.name); + + return 0; +} + +static int bcm6328_leds_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct device_node *child; + struct resource *mem_r; + void __iomem *mem; + spinlock_t *lock; + unsigned long val, *blink_leds, *blink_delay; + + mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_r) + return -EINVAL; + + mem = devm_ioremap_resource(dev, mem_r); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + lock = devm_kzalloc(dev, sizeof(*lock), GFP_KERNEL); + if (!lock) + return -ENOMEM; + + blink_leds = devm_kzalloc(dev, sizeof(*blink_leds), GFP_KERNEL); + if (!blink_leds) + return -ENOMEM; + + blink_delay = devm_kzalloc(dev, sizeof(*blink_delay), GFP_KERNEL); + if (!blink_delay) + return -ENOMEM; + + spin_lock_init(lock); + + bcm6328_led_write(mem + BCM6328_REG_HWDIS, ~0); + bcm6328_led_write(mem + BCM6328_REG_LNKACTSEL_HI, 0); + bcm6328_led_write(mem + BCM6328_REG_LNKACTSEL_LO, 0); + + val = bcm6328_led_read(mem + BCM6328_REG_INIT); + val &= ~BCM6328_SERIAL_LED_EN; + if (of_property_read_bool(np, "brcm,serial-leds")) + val |= BCM6328_SERIAL_LED_EN; + bcm6328_led_write(mem + BCM6328_REG_INIT, val); + + for_each_available_child_of_node(np, child) { + int rc; + u32 reg; + + if (of_property_read_u32(child, "reg", ®)) + continue; + + if (reg >= BCM6328_LED_MAX_COUNT) { + dev_err(dev, "invalid LED (>= %d)\n", + BCM6328_LED_MAX_COUNT); + continue; + } + + if (of_property_read_bool(child, "brcm,hardware-controlled")) + rc = bcm6328_hwled(dev, child, reg, mem, lock); + else + rc = bcm6328_led(dev, child, reg, mem, lock, + blink_leds, blink_delay); + + if (rc < 0) + return rc; + } + + return 0; +} + +static const struct of_device_id bcm6328_leds_of_match[] = { + { .compatible = "brcm,bcm6328-leds", }, + { }, +}; + +static struct platform_driver bcm6328_leds_driver = { + .probe = bcm6328_leds_probe, + .driver = { + .name = "leds-bcm6328", + .of_match_table = bcm6328_leds_of_match, + }, +}; + +module_platform_driver(bcm6328_leds_driver); + +MODULE_AUTHOR("Álvaro Fernández Rojas <[email protected]>"); +MODULE_AUTHOR("Jonas Gorski <[email protected]>"); +MODULE_DESCRIPTION("LED driver for BCM6328 controllers"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:leds-bcm6328"); diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c new file mode 100644 index 000000000000..21f96930b3be --- /dev/null +++ b/drivers/leds/leds-bcm6358.c @@ -0,0 +1,243 @@ +/* + * Driver for BCM6358 memory-mapped LEDs, based on leds-syscon.c + * + * Copyright 2015 Álvaro Fernández Rojas <[email protected]> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> + +#define BCM6358_REG_MODE 0x0 +#define BCM6358_REG_CTRL 0x4 + +#define BCM6358_SLED_CLKDIV_MASK 3 +#define BCM6358_SLED_CLKDIV_1 0 +#define BCM6358_SLED_CLKDIV_2 1 +#define BCM6358_SLED_CLKDIV_4 2 +#define BCM6358_SLED_CLKDIV_8 3 + +#define BCM6358_SLED_POLARITY BIT(2) +#define BCM6358_SLED_BUSY BIT(3) + +#define BCM6358_SLED_MAX_COUNT 32 +#define BCM6358_SLED_WAIT 100 + +/** + * struct bcm6358_led - state container for bcm6358 based LEDs + * @cdev: LED class device for this LED + * @mem: memory resource + * @lock: memory lock + * @pin: LED pin number + * @active_low: LED is active low + */ +struct bcm6358_led { + struct led_classdev cdev; + void __iomem *mem; + spinlock_t *lock; + unsigned long pin; + bool active_low; +}; + +static void bcm6358_led_write(void __iomem *reg, unsigned long data) +{ + iowrite32be(data, reg); +} + +static unsigned long bcm6358_led_read(void __iomem *reg) +{ + return ioread32be(reg); +} + +static unsigned long bcm6358_led_busy(void __iomem *mem) +{ + unsigned long val; + + while ((val = bcm6358_led_read(mem + BCM6358_REG_CTRL)) & + BCM6358_SLED_BUSY) + udelay(BCM6358_SLED_WAIT); + + return val; +} + +static void bcm6358_led_mode(struct bcm6358_led *led, unsigned long value) +{ + unsigned long val; + + bcm6358_led_busy(led->mem); + + val = bcm6358_led_read(led->mem + BCM6358_REG_MODE); + if ((led->active_low && value == LED_OFF) || + (!led->active_low && value != LED_OFF)) + val |= BIT(led->pin); + else + val &= ~(BIT(led->pin)); + bcm6358_led_write(led->mem + BCM6358_REG_MODE, val); +} + +static void bcm6358_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct bcm6358_led *led = + container_of(led_cdev, struct bcm6358_led, cdev); + unsigned long flags; + + spin_lock_irqsave(led->lock, flags); + bcm6358_led_mode(led, value); + spin_unlock_irqrestore(led->lock, flags); +} + +static int bcm6358_led(struct device *dev, struct device_node *nc, u32 reg, + void __iomem *mem, spinlock_t *lock) +{ + struct bcm6358_led *led; + unsigned long flags; + const char *state; + int rc; + + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + + led->pin = reg; + led->mem = mem; + led->lock = lock; + + if (of_property_read_bool(nc, "active-low")) + led->active_low = true; + + led->cdev.name = of_get_property(nc, "label", NULL) ? : nc->name; + led->cdev.default_trigger = of_get_property(nc, + "linux,default-trigger", + NULL); + + spin_lock_irqsave(lock, flags); + if (!of_property_read_string(nc, "default-state", &state)) { + if (!strcmp(state, "on")) { + led->cdev.brightness = LED_FULL; + } else if (!strcmp(state, "keep")) { + unsigned long val; + + bcm6358_led_busy(led->mem); + + val = bcm6358_led_read(led->mem + BCM6358_REG_MODE); + val &= BIT(led->pin); + if ((led->active_low && !val) || + (!led->active_low && val)) + led->cdev.brightness = LED_FULL; + else + led->cdev.brightness = LED_OFF; + } else { + led->cdev.brightness = LED_OFF; + } + } else { + led->cdev.brightness = LED_OFF; + } + bcm6358_led_mode(led, led->cdev.brightness); + spin_unlock_irqrestore(lock, flags); + + led->cdev.brightness_set = bcm6358_led_set; + + rc = led_classdev_register(dev, &led->cdev); + if (rc < 0) + return rc; + + dev_dbg(dev, "registered LED %s\n", led->cdev.name); + + return 0; +} + +static int bcm6358_leds_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; + struct device_node *child; + struct resource *mem_r; + void __iomem *mem; + spinlock_t *lock; /* memory lock */ + unsigned long val; + u32 clk_div; + + mem_r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_r) + return -EINVAL; + + mem = devm_ioremap_resource(dev, mem_r); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + lock = devm_kzalloc(dev, sizeof(*lock), GFP_KERNEL); + if (!lock) + return -ENOMEM; + + spin_lock_init(lock); + + val = bcm6358_led_busy(mem); + val &= ~(BCM6358_SLED_POLARITY | BCM6358_SLED_CLKDIV_MASK); + if (of_property_read_bool(np, "brcm,clk-dat-low")) + val |= BCM6358_SLED_POLARITY; + of_property_read_u32(np, "brcm,clk-div", &clk_div); + switch (clk_div) { + case 8: + val |= BCM6358_SLED_CLKDIV_8; + break; + case 4: + val |= BCM6358_SLED_CLKDIV_4; + break; + case 2: + val |= BCM6358_SLED_CLKDIV_2; + break; + default: + val |= BCM6358_SLED_CLKDIV_1; + break; + } + bcm6358_led_write(mem + BCM6358_REG_CTRL, val); + + for_each_available_child_of_node(np, child) { + int rc; + u32 reg; + + if (of_property_read_u32(child, "reg", ®)) + continue; + + if (reg >= BCM6358_SLED_MAX_COUNT) { + dev_err(dev, "invalid LED (%u >= %d)\n", reg, + BCM6358_SLED_MAX_COUNT); + continue; + } + + rc = bcm6358_led(dev, child, reg, mem, lock); + if (rc < 0) + return rc; + } + + return 0; +} + +static const struct of_device_id bcm6358_leds_of_match[] = { + { .compatible = "brcm,bcm6358-leds", }, + { }, +}; + +static struct platform_driver bcm6358_leds_driver = { + .probe = bcm6358_leds_probe, + .driver = { + .name = "leds-bcm6358", + .of_match_table = bcm6358_leds_of_match, + }, +}; + +module_platform_driver(bcm6358_leds_driver); + +MODULE_AUTHOR("Álvaro Fernández Rojas <[email protected]>"); +MODULE_DESCRIPTION("LED driver for BCM6358 controllers"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:leds-bcm6358"); diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c index 06dbe18a2065..b316df4a8c1e 100644 --- a/drivers/leds/leds-cobalt-raq.c +++ b/drivers/leds/leds-cobalt-raq.c @@ -108,20 +108,8 @@ err_null: return retval; } -static int cobalt_raq_led_remove(struct platform_device *pdev) -{ - led_classdev_unregister(&raq_power_off_led); - led_classdev_unregister(&raq_web_led); - - if (led_port) - led_port = NULL; - - return 0; -} - static struct platform_driver cobalt_raq_led_driver = { .probe = cobalt_raq_led_probe, - .remove = cobalt_raq_led_remove, .driver = { .name = "cobalt-raq-leds", }, @@ -131,5 +119,4 @@ static int __init cobalt_raq_led_init(void) { return platform_driver_register(&cobalt_raq_led_driver); } - -module_init(cobalt_raq_led_init); +device_initcall(cobalt_raq_led_init); diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 15eb3f86f670..af1876a3a77c 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> #include <linux/leds.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> @@ -191,15 +192,17 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) goto err; } - np = of_node(child); + np = to_of_node(child); if (fwnode_property_present(child, "label")) { fwnode_property_read_string(child, "label", &led.name); } else { if (IS_ENABLED(CONFIG_OF) && !led.name && np) led.name = np->name; - if (!led.name) - return ERR_PTR(-EINVAL); + if (!led.name) { + ret = -EINVAL; + goto err; + } } fwnode_property_read_string(child, "linux,default-trigger", &led.default_trigger); @@ -217,18 +220,19 @@ static struct gpio_leds_priv *gpio_leds_create(struct platform_device *pdev) if (fwnode_property_present(child, "retain-state-suspended")) led.retain_state_suspended = 1; - ret = create_gpio_led(&led, &priv->leds[priv->num_leds++], + ret = create_gpio_led(&led, &priv->leds[priv->num_leds], dev, NULL); if (ret < 0) { fwnode_handle_put(child); goto err; } + priv->num_leds++; } return priv; err: - for (count = priv->num_leds - 2; count >= 0; count--) + for (count = priv->num_leds - 1; count >= 0; count--) delete_gpio_led(&priv->leds[count]); return ERR_PTR(ret); } diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c new file mode 100644 index 000000000000..2ae8c4d17ff8 --- /dev/null +++ b/drivers/leds/leds-ktd2692.c @@ -0,0 +1,443 @@ +/* + * LED driver : leds-ktd2692.c + * + * Copyright (C) 2015 Samsung Electronics + * Ingi Kim <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/led-class-flash.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/workqueue.h> + +/* Value related the movie mode */ +#define KTD2692_MOVIE_MODE_CURRENT_LEVELS 16 +#define KTD2692_MM_TO_FL_RATIO(x) ((x) / 3) +#define KTD2962_MM_MIN_CURR_THRESHOLD_SCALE 8 + +/* Value related the flash mode */ +#define KTD2692_FLASH_MODE_TIMEOUT_LEVELS 8 +#define KTD2692_FLASH_MODE_TIMEOUT_DISABLE 0 +#define KTD2692_FLASH_MODE_CURR_PERCENT(x) (((x) * 16) / 100) + +/* Macro for getting offset of flash timeout */ +#define GET_TIMEOUT_OFFSET(timeout, step) ((timeout) / (step)) + +/* Base register address */ +#define KTD2692_REG_LVP_BASE 0x00 +#define KTD2692_REG_FLASH_TIMEOUT_BASE 0x20 +#define KTD2692_REG_MM_MIN_CURR_THRESHOLD_BASE 0x40 +#define KTD2692_REG_MOVIE_CURRENT_BASE 0x60 +#define KTD2692_REG_FLASH_CURRENT_BASE 0x80 +#define KTD2692_REG_MODE_BASE 0xA0 + +/* Set bit coding time for expresswire interface */ +#define KTD2692_TIME_RESET_US 700 +#define KTD2692_TIME_DATA_START_TIME_US 10 +#define KTD2692_TIME_HIGH_END_OF_DATA_US 350 +#define KTD2692_TIME_LOW_END_OF_DATA_US 10 +#define KTD2692_TIME_SHORT_BITSET_US 4 +#define KTD2692_TIME_LONG_BITSET_US 12 + +/* KTD2692 default length of name */ +#define KTD2692_NAME_LENGTH 20 + +enum ktd2692_bitset { + KTD2692_LOW = 0, + KTD2692_HIGH, +}; + +/* Movie / Flash Mode Control */ +enum ktd2692_led_mode { + KTD2692_MODE_DISABLE = 0, /* default */ + KTD2692_MODE_MOVIE, + KTD2692_MODE_FLASH, +}; + +struct ktd2692_led_config_data { + /* maximum LED current in movie mode */ + u32 movie_max_microamp; + /* maximum LED current in flash mode */ + u32 flash_max_microamp; + /* maximum flash timeout */ + u32 flash_max_timeout; + /* max LED brightness level */ + enum led_brightness max_brightness; +}; + +struct ktd2692_context { + /* Related LED Flash class device */ + struct led_classdev_flash fled_cdev; + + /* secures access to the device */ + struct mutex lock; + struct regulator *regulator; + struct work_struct work_brightness_set; + + struct gpio_desc *aux_gpio; + struct gpio_desc *ctrl_gpio; + + enum ktd2692_led_mode mode; + enum led_brightness torch_brightness; +}; + +static struct ktd2692_context *fled_cdev_to_led( + struct led_classdev_flash *fled_cdev) +{ + return container_of(fled_cdev, struct ktd2692_context, fled_cdev); +} + +static void ktd2692_expresswire_start(struct ktd2692_context *led) +{ + gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH); + udelay(KTD2692_TIME_DATA_START_TIME_US); +} + +static void ktd2692_expresswire_reset(struct ktd2692_context *led) +{ + gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW); + udelay(KTD2692_TIME_RESET_US); +} + +static void ktd2692_expresswire_end(struct ktd2692_context *led) +{ + gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW); + udelay(KTD2692_TIME_LOW_END_OF_DATA_US); + gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH); + udelay(KTD2692_TIME_HIGH_END_OF_DATA_US); +} + +static void ktd2692_expresswire_set_bit(struct ktd2692_context *led, bool bit) +{ + /* + * The Low Bit(0) and High Bit(1) is based on a time detection + * algorithm between time low and time high + * Time_(L_LB) : Low time of the Low Bit(0) + * Time_(H_LB) : High time of the LOW Bit(0) + * Time_(L_HB) : Low time of the High Bit(1) + * Time_(H_HB) : High time of the High Bit(1) + * + * It can be simplified to: + * Low Bit(0) : 2 * Time_(H_LB) < Time_(L_LB) + * High Bit(1) : 2 * Time_(L_HB) < Time_(H_HB) + * HIGH ___ ____ _.. _________ ___ + * |_________| |_.. |____| |__| + * LOW <L_LB> <H_LB> <L_HB> <H_HB> + * [ Low Bit (0) ] [ High Bit(1) ] + */ + if (bit) { + gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW); + udelay(KTD2692_TIME_SHORT_BITSET_US); + gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH); + udelay(KTD2692_TIME_LONG_BITSET_US); + } else { + gpiod_direction_output(led->ctrl_gpio, KTD2692_LOW); + udelay(KTD2692_TIME_LONG_BITSET_US); + gpiod_direction_output(led->ctrl_gpio, KTD2692_HIGH); + udelay(KTD2692_TIME_SHORT_BITSET_US); + } +} + +static void ktd2692_expresswire_write(struct ktd2692_context *led, u8 value) +{ + int i; + + ktd2692_expresswire_start(led); + for (i = 7; i >= 0; i--) + ktd2692_expresswire_set_bit(led, value & BIT(i)); + ktd2692_expresswire_end(led); +} + +static void ktd2692_brightness_set(struct ktd2692_context *led, + enum led_brightness brightness) +{ + mutex_lock(&led->lock); + + if (brightness == LED_OFF) { + led->mode = KTD2692_MODE_DISABLE; + gpiod_direction_output(led->aux_gpio, KTD2692_LOW); + } else { + ktd2692_expresswire_write(led, brightness | + KTD2692_REG_MOVIE_CURRENT_BASE); + led->mode = KTD2692_MODE_MOVIE; + } + + ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE); + mutex_unlock(&led->lock); +} + +static void ktd2692_brightness_set_work(struct work_struct *work) +{ + struct ktd2692_context *led = + container_of(work, struct ktd2692_context, work_brightness_set); + + ktd2692_brightness_set(led, led->torch_brightness); +} + +static void ktd2692_led_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct ktd2692_context *led = fled_cdev_to_led(fled_cdev); + + led->torch_brightness = brightness; + schedule_work(&led->work_brightness_set); +} + +static int ktd2692_led_brightness_set_sync(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct ktd2692_context *led = fled_cdev_to_led(fled_cdev); + + ktd2692_brightness_set(led, brightness); + + return 0; +} + +static int ktd2692_led_flash_strobe_set(struct led_classdev_flash *fled_cdev, + bool state) +{ + struct ktd2692_context *led = fled_cdev_to_led(fled_cdev); + struct led_flash_setting *timeout = &fled_cdev->timeout; + u32 flash_tm_reg; + + mutex_lock(&led->lock); + + if (state) { + flash_tm_reg = GET_TIMEOUT_OFFSET(timeout->val, timeout->step); + ktd2692_expresswire_write(led, flash_tm_reg + | KTD2692_REG_FLASH_TIMEOUT_BASE); + + led->mode = KTD2692_MODE_FLASH; + gpiod_direction_output(led->aux_gpio, KTD2692_HIGH); + } else { + led->mode = KTD2692_MODE_DISABLE; + gpiod_direction_output(led->aux_gpio, KTD2692_LOW); + } + + ktd2692_expresswire_write(led, led->mode | KTD2692_REG_MODE_BASE); + + fled_cdev->led_cdev.brightness = LED_OFF; + led->mode = KTD2692_MODE_DISABLE; + + mutex_unlock(&led->lock); + + return 0; +} + +static int ktd2692_led_flash_timeout_set(struct led_classdev_flash *fled_cdev, + u32 timeout) +{ + return 0; +} + +static void ktd2692_init_movie_current_max(struct ktd2692_led_config_data *cfg) +{ + u32 offset, step; + u32 movie_current_microamp; + + offset = KTD2692_MOVIE_MODE_CURRENT_LEVELS; + step = KTD2692_MM_TO_FL_RATIO(cfg->flash_max_microamp) + / KTD2692_MOVIE_MODE_CURRENT_LEVELS; + + do { + movie_current_microamp = step * offset; + offset--; + } while ((movie_current_microamp > cfg->movie_max_microamp) && + (offset > 0)); + + cfg->max_brightness = offset; +} + +static void ktd2692_init_flash_timeout(struct led_classdev_flash *fled_cdev, + struct ktd2692_led_config_data *cfg) +{ + struct led_flash_setting *setting; + + setting = &fled_cdev->timeout; + setting->min = KTD2692_FLASH_MODE_TIMEOUT_DISABLE; + setting->max = cfg->flash_max_timeout; + setting->step = cfg->flash_max_timeout + / (KTD2692_FLASH_MODE_TIMEOUT_LEVELS - 1); + setting->val = cfg->flash_max_timeout; +} + +static void ktd2692_setup(struct ktd2692_context *led) +{ + led->mode = KTD2692_MODE_DISABLE; + ktd2692_expresswire_reset(led); + gpiod_direction_output(led->aux_gpio, KTD2692_LOW); + + ktd2692_expresswire_write(led, (KTD2962_MM_MIN_CURR_THRESHOLD_SCALE - 1) + | KTD2692_REG_MM_MIN_CURR_THRESHOLD_BASE); + ktd2692_expresswire_write(led, KTD2692_FLASH_MODE_CURR_PERCENT(45) + | KTD2692_REG_FLASH_CURRENT_BASE); +} + +static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev, + struct ktd2692_led_config_data *cfg) +{ + struct device_node *np = dev->of_node; + struct device_node *child_node; + int ret; + + if (!dev->of_node) + return -ENXIO; + + led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS); + if (IS_ERR(led->ctrl_gpio)) { + ret = PTR_ERR(led->ctrl_gpio); + dev_err(dev, "cannot get ctrl-gpios %d\n", ret); + return ret; + } + + led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS); + if (IS_ERR(led->aux_gpio)) { + ret = PTR_ERR(led->aux_gpio); + dev_err(dev, "cannot get aux-gpios %d\n", ret); + return ret; + } + + led->regulator = devm_regulator_get(dev, "vin"); + if (IS_ERR(led->regulator)) + led->regulator = NULL; + + if (led->regulator) { + ret = regulator_enable(led->regulator); + if (ret) + dev_err(dev, "Failed to enable supply: %d\n", ret); + } + + child_node = of_get_next_available_child(np, NULL); + if (!child_node) { + dev_err(dev, "No DT child node found for connected LED.\n"); + return -EINVAL; + } + + led->fled_cdev.led_cdev.name = + of_get_property(child_node, "label", NULL) ? : child_node->name; + + ret = of_property_read_u32(child_node, "led-max-microamp", + &cfg->movie_max_microamp); + if (ret) { + dev_err(dev, "failed to parse led-max-microamp\n"); + return ret; + } + + ret = of_property_read_u32(child_node, "flash-max-microamp", + &cfg->flash_max_microamp); + if (ret) { + dev_err(dev, "failed to parse flash-max-microamp\n"); + return ret; + } + + ret = of_property_read_u32(child_node, "flash-max-timeout-us", + &cfg->flash_max_timeout); + if (ret) + dev_err(dev, "failed to parse flash-max-timeout-us\n"); + + of_node_put(child_node); + return ret; +} + +static const struct led_flash_ops flash_ops = { + .strobe_set = ktd2692_led_flash_strobe_set, + .timeout_set = ktd2692_led_flash_timeout_set, +}; + +static int ktd2692_probe(struct platform_device *pdev) +{ + struct ktd2692_context *led; + struct led_classdev *led_cdev; + struct led_classdev_flash *fled_cdev; + struct ktd2692_led_config_data led_cfg; + int ret; + + led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + + fled_cdev = &led->fled_cdev; + led_cdev = &fled_cdev->led_cdev; + + ret = ktd2692_parse_dt(led, &pdev->dev, &led_cfg); + if (ret) + return ret; + + ktd2692_init_flash_timeout(fled_cdev, &led_cfg); + ktd2692_init_movie_current_max(&led_cfg); + + fled_cdev->ops = &flash_ops; + + led_cdev->max_brightness = led_cfg.max_brightness; + led_cdev->brightness_set = ktd2692_led_brightness_set; + led_cdev->brightness_set_sync = ktd2692_led_brightness_set_sync; + led_cdev->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH; + + mutex_init(&led->lock); + INIT_WORK(&led->work_brightness_set, ktd2692_brightness_set_work); + + platform_set_drvdata(pdev, led); + + ret = led_classdev_flash_register(&pdev->dev, fled_cdev); + if (ret) { + dev_err(&pdev->dev, "can't register LED %s\n", led_cdev->name); + mutex_destroy(&led->lock); + return ret; + } + + ktd2692_setup(led); + + return 0; +} + +static int ktd2692_remove(struct platform_device *pdev) +{ + struct ktd2692_context *led = platform_get_drvdata(pdev); + int ret; + + led_classdev_flash_unregister(&led->fled_cdev); + cancel_work_sync(&led->work_brightness_set); + + if (led->regulator) { + ret = regulator_disable(led->regulator); + if (ret) + dev_err(&pdev->dev, + "Failed to disable supply: %d\n", ret); + } + + mutex_destroy(&led->lock); + + return 0; +} + +static const struct of_device_id ktd2692_match[] = { + { .compatible = "kinetic,ktd2692", }, + { /* sentinel */ }, +}; + +static struct platform_driver ktd2692_driver = { + .driver = { + .name = "ktd2692", + .of_match_table = ktd2692_match, + }, + .probe = ktd2692_probe, + .remove = ktd2692_remove, +}; + +module_platform_driver(ktd2692_driver); + +MODULE_AUTHOR("Ingi Kim <[email protected]>"); +MODULE_DESCRIPTION("Kinetic KTD2692 LED driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c index 9e1716f8098c..584dbbcec659 100644 --- a/drivers/leds/leds-lp5523.c +++ b/drivers/leds/leds-lp5523.c @@ -50,6 +50,7 @@ #define LP5523_REG_OP_MODE 0x01 #define LP5523_REG_ENABLE_LEDS_MSB 0x04 #define LP5523_REG_ENABLE_LEDS_LSB 0x05 +#define LP5523_REG_LED_CTRL_BASE 0x06 #define LP5523_REG_LED_PWM_BASE 0x16 #define LP5523_REG_LED_CURRENT_BASE 0x26 #define LP5523_REG_CONFIG 0x36 @@ -57,6 +58,7 @@ #define LP5523_REG_RESET 0x3D #define LP5523_REG_LED_TEST_CTRL 0x41 #define LP5523_REG_LED_TEST_ADC 0x42 +#define LP5523_REG_MASTER_FADER_BASE 0x48 #define LP5523_REG_CH1_PROG_START 0x4C #define LP5523_REG_CH2_PROG_START 0x4D #define LP5523_REG_CH3_PROG_START 0x4E @@ -78,6 +80,9 @@ #define LP5523_EXT_CLK_USED 0x08 #define LP5523_ENG_STATUS_MASK 0x07 +#define LP5523_FADER_MAPPING_MASK 0xC0 +#define LP5523_FADER_MAPPING_SHIFT 6 + /* Memory Page Selection */ #define LP5523_PAGE_ENG1 0 #define LP5523_PAGE_ENG2 1 @@ -666,6 +671,137 @@ release_lock: return pos; } +#define show_fader(nr) \ +static ssize_t show_master_fader##nr(struct device *dev, \ + struct device_attribute *attr, \ + char *buf) \ +{ \ + return show_master_fader(dev, attr, buf, nr); \ +} + +#define store_fader(nr) \ +static ssize_t store_master_fader##nr(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t len) \ +{ \ + return store_master_fader(dev, attr, buf, len, nr); \ +} + +static ssize_t show_master_fader(struct device *dev, + struct device_attribute *attr, + char *buf, int nr) +{ + struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); + struct lp55xx_chip *chip = led->chip; + int ret; + u8 val; + + mutex_lock(&chip->lock); + ret = lp55xx_read(chip, LP5523_REG_MASTER_FADER_BASE + nr - 1, &val); + mutex_unlock(&chip->lock); + + if (ret == 0) + ret = sprintf(buf, "%u\n", val); + + return ret; +} +show_fader(1) +show_fader(2) +show_fader(3) + +static ssize_t store_master_fader(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len, int nr) +{ + struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); + struct lp55xx_chip *chip = led->chip; + int ret; + unsigned long val; + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + if (val > 0xff) + return -EINVAL; + + mutex_lock(&chip->lock); + ret = lp55xx_write(chip, LP5523_REG_MASTER_FADER_BASE + nr - 1, + (u8)val); + mutex_unlock(&chip->lock); + + if (ret == 0) + ret = len; + + return ret; +} +store_fader(1) +store_fader(2) +store_fader(3) + +static ssize_t show_master_fader_leds(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); + struct lp55xx_chip *chip = led->chip; + int i, ret, pos = 0; + u8 val; + + mutex_lock(&chip->lock); + + for (i = 0; i < LP5523_MAX_LEDS; i++) { + ret = lp55xx_read(chip, LP5523_REG_LED_CTRL_BASE + i, &val); + if (ret) + goto leave; + + val = (val & LP5523_FADER_MAPPING_MASK) + >> LP5523_FADER_MAPPING_SHIFT; + if (val > 3) { + ret = -EINVAL; + goto leave; + } + buf[pos++] = val + '0'; + } + buf[pos++] = '\n'; + ret = pos; +leave: + mutex_unlock(&chip->lock); + return ret; +} + +static ssize_t store_master_fader_leds(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct lp55xx_led *led = i2c_get_clientdata(to_i2c_client(dev)); + struct lp55xx_chip *chip = led->chip; + int i, n, ret; + u8 val; + + n = min_t(int, len, LP5523_MAX_LEDS); + + mutex_lock(&chip->lock); + + for (i = 0; i < n; i++) { + if (buf[i] >= '0' && buf[i] <= '3') { + val = (buf[i] - '0') << LP5523_FADER_MAPPING_SHIFT; + ret = lp55xx_update_bits(chip, + LP5523_REG_LED_CTRL_BASE + i, + LP5523_FADER_MAPPING_MASK, + val); + if (ret) + goto leave; + } else { + ret = -EINVAL; + goto leave; + } + } + ret = len; +leave: + mutex_unlock(&chip->lock); + return ret; +} + static void lp5523_led_brightness_work(struct work_struct *work) { struct lp55xx_led *led = container_of(work, struct lp55xx_led, @@ -688,6 +824,14 @@ static LP55XX_DEV_ATTR_WO(engine1_load, store_engine1_load); static LP55XX_DEV_ATTR_WO(engine2_load, store_engine2_load); static LP55XX_DEV_ATTR_WO(engine3_load, store_engine3_load); static LP55XX_DEV_ATTR_RO(selftest, lp5523_selftest); +static LP55XX_DEV_ATTR_RW(master_fader1, show_master_fader1, + store_master_fader1); +static LP55XX_DEV_ATTR_RW(master_fader2, show_master_fader2, + store_master_fader2); +static LP55XX_DEV_ATTR_RW(master_fader3, show_master_fader3, + store_master_fader3); +static LP55XX_DEV_ATTR_RW(master_fader_leds, show_master_fader_leds, + store_master_fader_leds); static struct attribute *lp5523_attributes[] = { &dev_attr_engine1_mode.attr, @@ -700,6 +844,10 @@ static struct attribute *lp5523_attributes[] = { &dev_attr_engine2_leds.attr, &dev_attr_engine3_leds.attr, &dev_attr_selftest.attr, + &dev_attr_master_fader1.attr, + &dev_attr_master_fader2.attr, + &dev_attr_master_fader3.attr, + &dev_attr_master_fader_leds.attr, NULL, }; diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c index 77c26bc32eed..96d51e9879c9 100644 --- a/drivers/leds/leds-lp55xx-common.c +++ b/drivers/leds/leds-lp55xx-common.c @@ -223,7 +223,7 @@ static int lp55xx_request_firmware(struct lp55xx_chip *chip) const char *name = chip->cl->name; struct device *dev = &chip->cl->dev; - return request_firmware_nowait(THIS_MODULE, true, name, dev, + return request_firmware_nowait(THIS_MODULE, false, name, dev, GFP_KERNEL, chip, lp55xx_firmware_loaded); } diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c new file mode 100644 index 000000000000..b8b0eec7b540 --- /dev/null +++ b/drivers/leds/leds-max77693.c @@ -0,0 +1,1097 @@ +/* + * LED Flash class driver for the flash cell of max77693 mfd. + * + * Copyright (C) 2015, Samsung Electronics Co., Ltd. + * + * Authors: Jacek Anaszewski <[email protected]> + * Andrzej Hajda <[email protected]> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + */ + +#include <linux/led-class-flash.h> +#include <linux/mfd/max77693.h> +#include <linux/mfd/max77693-private.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/workqueue.h> +#include <media/v4l2-flash-led-class.h> + +#define MODE_OFF 0 +#define MODE_FLASH(a) (1 << (a)) +#define MODE_TORCH(a) (1 << (2 + (a))) +#define MODE_FLASH_EXTERNAL(a) (1 << (4 + (a))) + +#define MODE_FLASH_MASK (MODE_FLASH(FLED1) | MODE_FLASH(FLED2) | \ + MODE_FLASH_EXTERNAL(FLED1) | \ + MODE_FLASH_EXTERNAL(FLED2)) +#define MODE_TORCH_MASK (MODE_TORCH(FLED1) | MODE_TORCH(FLED2)) + +#define FLED1_IOUT (1 << 0) +#define FLED2_IOUT (1 << 1) + +enum max77693_fled { + FLED1, + FLED2, +}; + +enum max77693_led_mode { + FLASH, + TORCH, +}; + +struct max77693_led_config_data { + const char *label[2]; + u32 iout_torch_max[2]; + u32 iout_flash_max[2]; + u32 flash_timeout_max[2]; + u32 num_leds; + u32 boost_mode; + u32 boost_vout; + u32 low_vsys; +}; + +struct max77693_sub_led { + /* corresponding FLED output identifier */ + int fled_id; + /* corresponding LED Flash class device */ + struct led_classdev_flash fled_cdev; + /* assures led-triggers compatibility */ + struct work_struct work_brightness_set; + /* V4L2 Flash device */ + struct v4l2_flash *v4l2_flash; + + /* brightness cache */ + unsigned int torch_brightness; + /* flash timeout cache */ + unsigned int flash_timeout; + /* flash faults that may have occurred */ + u32 flash_faults; +}; + +struct max77693_led_device { + /* parent mfd regmap */ + struct regmap *regmap; + /* platform device data */ + struct platform_device *pdev; + /* secures access to the device */ + struct mutex lock; + + /* sub led data */ + struct max77693_sub_led sub_leds[2]; + + /* maximum torch current values for FLED outputs */ + u32 iout_torch_max[2]; + /* maximum flash current values for FLED outputs */ + u32 iout_flash_max[2]; + + /* current flash timeout cache */ + unsigned int current_flash_timeout; + /* ITORCH register cache */ + u8 torch_iout_reg; + /* mode of fled outputs */ + unsigned int mode_flags; + /* recently strobed fled */ + int strobing_sub_led_id; + /* bitmask of FLED outputs use state (bit 0. - FLED1, bit 1. - FLED2) */ + u8 fled_mask; + /* FLED modes that can be set */ + u8 allowed_modes; + + /* arrangement of current outputs */ + bool iout_joint; +}; + +static u8 max77693_led_iout_to_reg(u32 ua) +{ + if (ua < FLASH_IOUT_MIN) + ua = FLASH_IOUT_MIN; + return (ua - FLASH_IOUT_MIN) / FLASH_IOUT_STEP; +} + +static u8 max77693_flash_timeout_to_reg(u32 us) +{ + return (us - FLASH_TIMEOUT_MIN) / FLASH_TIMEOUT_STEP; +} + +static inline struct max77693_sub_led *flcdev_to_sub_led( + struct led_classdev_flash *fled_cdev) +{ + return container_of(fled_cdev, struct max77693_sub_led, fled_cdev); +} + +static inline struct max77693_led_device *sub_led_to_led( + struct max77693_sub_led *sub_led) +{ + return container_of(sub_led, struct max77693_led_device, + sub_leds[sub_led->fled_id]); +} + +static inline u8 max77693_led_vsys_to_reg(u32 mv) +{ + return ((mv - MAX_FLASH1_VSYS_MIN) / MAX_FLASH1_VSYS_STEP) << 2; +} + +static inline u8 max77693_led_vout_to_reg(u32 mv) +{ + return (mv - FLASH_VOUT_MIN) / FLASH_VOUT_STEP + FLASH_VOUT_RMIN; +} + +static inline bool max77693_fled_used(struct max77693_led_device *led, + int fled_id) +{ + u8 fled_bit = (fled_id == FLED1) ? FLED1_IOUT : FLED2_IOUT; + + return led->fled_mask & fled_bit; +} + +static int max77693_set_mode_reg(struct max77693_led_device *led, u8 mode) +{ + struct regmap *rmap = led->regmap; + int ret, v = 0, i; + + for (i = FLED1; i <= FLED2; ++i) { + if (mode & MODE_TORCH(i)) + v |= FLASH_EN_ON << TORCH_EN_SHIFT(i); + + if (mode & MODE_FLASH(i)) { + v |= FLASH_EN_ON << FLASH_EN_SHIFT(i); + } else if (mode & MODE_FLASH_EXTERNAL(i)) { + v |= FLASH_EN_FLASH << FLASH_EN_SHIFT(i); + /* + * Enable hw triggering also for torch mode, as some + * camera sensors use torch led to fathom ambient light + * conditions before strobing the flash. + */ + v |= FLASH_EN_TORCH << TORCH_EN_SHIFT(i); + } + } + + /* Reset the register only prior setting flash modes */ + if (mode & ~(MODE_TORCH(FLED1) | MODE_TORCH(FLED2))) { + ret = regmap_write(rmap, MAX77693_LED_REG_FLASH_EN, 0); + if (ret < 0) + return ret; + } + + return regmap_write(rmap, MAX77693_LED_REG_FLASH_EN, v); +} + +static int max77693_add_mode(struct max77693_led_device *led, u8 mode) +{ + u8 new_mode_flags; + int i, ret; + + if (led->iout_joint) + /* Span the mode on FLED2 for joint iouts case */ + mode |= (mode << 1); + + /* + * FLASH_EXTERNAL mode activates FLASHEN and TORCHEN pins in the device. + * Corresponding register bit fields interfere with SW triggered modes, + * thus clear them to ensure proper device configuration. + */ + for (i = FLED1; i <= FLED2; ++i) + if (mode & MODE_FLASH_EXTERNAL(i)) + led->mode_flags &= (~MODE_TORCH(i) & ~MODE_FLASH(i)); + + new_mode_flags = mode | led->mode_flags; + new_mode_flags &= led->allowed_modes; + + if (new_mode_flags ^ led->mode_flags) + led->mode_flags = new_mode_flags; + else + return 0; + + ret = max77693_set_mode_reg(led, led->mode_flags); + if (ret < 0) + return ret; + + /* + * Clear flash mode flag after setting the mode to avoid spurious flash + * strobing on each subsequent torch mode setting. + */ + if (mode & MODE_FLASH_MASK) + led->mode_flags &= ~mode; + + return ret; +} + +static int max77693_clear_mode(struct max77693_led_device *led, + u8 mode) +{ + if (led->iout_joint) + /* Clear mode also on FLED2 for joint iouts case */ + mode |= (mode << 1); + + led->mode_flags &= ~mode; + + return max77693_set_mode_reg(led, led->mode_flags); +} + +static void max77693_add_allowed_modes(struct max77693_led_device *led, + int fled_id, enum max77693_led_mode mode) +{ + if (mode == FLASH) + led->allowed_modes |= (MODE_FLASH(fled_id) | + MODE_FLASH_EXTERNAL(fled_id)); + else + led->allowed_modes |= MODE_TORCH(fled_id); +} + +static void max77693_distribute_currents(struct max77693_led_device *led, + int fled_id, enum max77693_led_mode mode, + u32 micro_amp, u32 iout_max[2], u32 iout[2]) +{ + if (!led->iout_joint) { + iout[fled_id] = micro_amp; + max77693_add_allowed_modes(led, fled_id, mode); + return; + } + + iout[FLED1] = min(micro_amp, iout_max[FLED1]); + iout[FLED2] = micro_amp - iout[FLED1]; + + if (mode == FLASH) + led->allowed_modes &= ~MODE_FLASH_MASK; + else + led->allowed_modes &= ~MODE_TORCH_MASK; + + max77693_add_allowed_modes(led, FLED1, mode); + + if (iout[FLED2]) + max77693_add_allowed_modes(led, FLED2, mode); +} + +static int max77693_set_torch_current(struct max77693_led_device *led, + int fled_id, u32 micro_amp) +{ + struct regmap *rmap = led->regmap; + u8 iout1_reg = 0, iout2_reg = 0; + u32 iout[2]; + + max77693_distribute_currents(led, fled_id, TORCH, micro_amp, + led->iout_torch_max, iout); + + if (fled_id == FLED1 || led->iout_joint) { + iout1_reg = max77693_led_iout_to_reg(iout[FLED1]); + led->torch_iout_reg &= TORCH_IOUT_MASK(TORCH_IOUT2_SHIFT); + } + if (fled_id == FLED2 || led->iout_joint) { + iout2_reg = max77693_led_iout_to_reg(iout[FLED2]); + led->torch_iout_reg &= TORCH_IOUT_MASK(TORCH_IOUT1_SHIFT); + } + + led->torch_iout_reg |= ((iout1_reg << TORCH_IOUT1_SHIFT) | + (iout2_reg << TORCH_IOUT2_SHIFT)); + + return regmap_write(rmap, MAX77693_LED_REG_ITORCH, + led->torch_iout_reg); +} + +static int max77693_set_flash_current(struct max77693_led_device *led, + int fled_id, + u32 micro_amp) +{ + struct regmap *rmap = led->regmap; + u8 iout1_reg, iout2_reg; + u32 iout[2]; + int ret = -EINVAL; + + max77693_distribute_currents(led, fled_id, FLASH, micro_amp, + led->iout_flash_max, iout); + + if (fled_id == FLED1 || led->iout_joint) { + iout1_reg = max77693_led_iout_to_reg(iout[FLED1]); + ret = regmap_write(rmap, MAX77693_LED_REG_IFLASH1, + iout1_reg); + if (ret < 0) + return ret; + } + if (fled_id == FLED2 || led->iout_joint) { + iout2_reg = max77693_led_iout_to_reg(iout[FLED2]); + ret = regmap_write(rmap, MAX77693_LED_REG_IFLASH2, + iout2_reg); + } + + return ret; +} + +static int max77693_set_timeout(struct max77693_led_device *led, u32 microsec) +{ + struct regmap *rmap = led->regmap; + u8 v; + int ret; + + v = max77693_flash_timeout_to_reg(microsec) | FLASH_TMR_LEVEL; + + ret = regmap_write(rmap, MAX77693_LED_REG_FLASH_TIMER, v); + if (ret < 0) + return ret; + + led->current_flash_timeout = microsec; + + return 0; +} + +static int max77693_get_strobe_status(struct max77693_led_device *led, + bool *state) +{ + struct regmap *rmap = led->regmap; + unsigned int v; + int ret; + + ret = regmap_read(rmap, MAX77693_LED_REG_FLASH_STATUS, &v); + if (ret < 0) + return ret; + + *state = v & FLASH_STATUS_FLASH_ON; + + return ret; +} + +static int max77693_get_flash_faults(struct max77693_sub_led *sub_led) +{ + struct max77693_led_device *led = sub_led_to_led(sub_led); + struct regmap *rmap = led->regmap; + unsigned int v; + u8 fault_open_mask, fault_short_mask; + int ret; + + sub_led->flash_faults = 0; + + if (led->iout_joint) { + fault_open_mask = FLASH_INT_FLED1_OPEN | FLASH_INT_FLED2_OPEN; + fault_short_mask = FLASH_INT_FLED1_SHORT | + FLASH_INT_FLED2_SHORT; + } else { + fault_open_mask = (sub_led->fled_id == FLED1) ? + FLASH_INT_FLED1_OPEN : + FLASH_INT_FLED2_OPEN; + fault_short_mask = (sub_led->fled_id == FLED1) ? + FLASH_INT_FLED1_SHORT : + FLASH_INT_FLED2_SHORT; + } + + ret = regmap_read(rmap, MAX77693_LED_REG_FLASH_INT, &v); + if (ret < 0) + return ret; + + if (v & fault_open_mask) + sub_led->flash_faults |= LED_FAULT_OVER_VOLTAGE; + if (v & fault_short_mask) + sub_led->flash_faults |= LED_FAULT_SHORT_CIRCUIT; + if (v & FLASH_INT_OVER_CURRENT) + sub_led->flash_faults |= LED_FAULT_OVER_CURRENT; + + return 0; +} + +static int max77693_setup(struct max77693_led_device *led, + struct max77693_led_config_data *led_cfg) +{ + struct regmap *rmap = led->regmap; + int i, first_led, last_led, ret; + u32 max_flash_curr[2]; + u8 v; + + /* + * Initialize only flash current. Torch current doesn't + * require initialization as ITORCH register is written with + * new value each time brightness_set op is called. + */ + if (led->iout_joint) { + first_led = FLED1; + last_led = FLED1; + max_flash_curr[FLED1] = led_cfg->iout_flash_max[FLED1] + + led_cfg->iout_flash_max[FLED2]; + } else { + first_led = max77693_fled_used(led, FLED1) ? FLED1 : FLED2; + last_led = max77693_fled_used(led, FLED2) ? FLED2 : FLED1; + max_flash_curr[FLED1] = led_cfg->iout_flash_max[FLED1]; + max_flash_curr[FLED2] = led_cfg->iout_flash_max[FLED2]; + } + + for (i = first_led; i <= last_led; ++i) { + ret = max77693_set_flash_current(led, i, + max_flash_curr[i]); + if (ret < 0) + return ret; + } + + v = TORCH_TMR_NO_TIMER | MAX77693_LED_TRIG_TYPE_LEVEL; + ret = regmap_write(rmap, MAX77693_LED_REG_ITORCHTIMER, v); + if (ret < 0) + return ret; + + if (led_cfg->low_vsys > 0) + v = max77693_led_vsys_to_reg(led_cfg->low_vsys) | + MAX_FLASH1_MAX_FL_EN; + else + v = 0; + + ret = regmap_write(rmap, MAX77693_LED_REG_MAX_FLASH1, v); + if (ret < 0) + return ret; + ret = regmap_write(rmap, MAX77693_LED_REG_MAX_FLASH2, 0); + if (ret < 0) + return ret; + + if (led_cfg->boost_mode == MAX77693_LED_BOOST_FIXED) + v = FLASH_BOOST_FIXED; + else + v = led_cfg->boost_mode | led_cfg->boost_mode << 1; + + if (max77693_fled_used(led, FLED1) && max77693_fled_used(led, FLED2)) + v |= FLASH_BOOST_LEDNUM_2; + + ret = regmap_write(rmap, MAX77693_LED_REG_VOUT_CNTL, v); + if (ret < 0) + return ret; + + v = max77693_led_vout_to_reg(led_cfg->boost_vout); + ret = regmap_write(rmap, MAX77693_LED_REG_VOUT_FLASH1, v); + if (ret < 0) + return ret; + + return max77693_set_mode_reg(led, MODE_OFF); +} + +static int __max77693_led_brightness_set(struct max77693_led_device *led, + int fled_id, enum led_brightness value) +{ + int ret; + + mutex_lock(&led->lock); + + if (value == 0) { + ret = max77693_clear_mode(led, MODE_TORCH(fled_id)); + if (ret < 0) + dev_dbg(&led->pdev->dev, + "Failed to clear torch mode (%d)\n", + ret); + goto unlock; + } + + ret = max77693_set_torch_current(led, fled_id, value * TORCH_IOUT_STEP); + if (ret < 0) { + dev_dbg(&led->pdev->dev, + "Failed to set torch current (%d)\n", + ret); + goto unlock; + } + + ret = max77693_add_mode(led, MODE_TORCH(fled_id)); + if (ret < 0) + dev_dbg(&led->pdev->dev, + "Failed to set torch mode (%d)\n", + ret); +unlock: + mutex_unlock(&led->lock); + return ret; +} + +static void max77693_led_brightness_set_work( + struct work_struct *work) +{ + struct max77693_sub_led *sub_led = + container_of(work, struct max77693_sub_led, + work_brightness_set); + struct max77693_led_device *led = sub_led_to_led(sub_led); + + __max77693_led_brightness_set(led, sub_led->fled_id, + sub_led->torch_brightness); +} + +/* LED subsystem callbacks */ + +static int max77693_led_brightness_set_sync( + struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + struct max77693_led_device *led = sub_led_to_led(sub_led); + + return __max77693_led_brightness_set(led, sub_led->fled_id, value); +} + +static void max77693_led_brightness_set( + struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct led_classdev_flash *fled_cdev = lcdev_to_flcdev(led_cdev); + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + + sub_led->torch_brightness = value; + schedule_work(&sub_led->work_brightness_set); +} + +static int max77693_led_flash_brightness_set( + struct led_classdev_flash *fled_cdev, + u32 brightness) +{ + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + struct max77693_led_device *led = sub_led_to_led(sub_led); + int ret; + + mutex_lock(&led->lock); + ret = max77693_set_flash_current(led, sub_led->fled_id, brightness); + mutex_unlock(&led->lock); + + return ret; +} + +static int max77693_led_flash_strobe_set( + struct led_classdev_flash *fled_cdev, + bool state) +{ + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + struct max77693_led_device *led = sub_led_to_led(sub_led); + int fled_id = sub_led->fled_id; + int ret; + + mutex_lock(&led->lock); + + if (!state) { + ret = max77693_clear_mode(led, MODE_FLASH(fled_id)); + goto unlock; + } + + if (sub_led->flash_timeout != led->current_flash_timeout) { + ret = max77693_set_timeout(led, sub_led->flash_timeout); + if (ret < 0) + goto unlock; + } + + led->strobing_sub_led_id = fled_id; + + ret = max77693_add_mode(led, MODE_FLASH(fled_id)); + if (ret < 0) + goto unlock; + + ret = max77693_get_flash_faults(sub_led); + +unlock: + mutex_unlock(&led->lock); + return ret; +} + +static int max77693_led_flash_fault_get( + struct led_classdev_flash *fled_cdev, + u32 *fault) +{ + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + + *fault = sub_led->flash_faults; + + return 0; +} + +static int max77693_led_flash_strobe_get( + struct led_classdev_flash *fled_cdev, + bool *state) +{ + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + struct max77693_led_device *led = sub_led_to_led(sub_led); + int ret; + + if (!state) + return -EINVAL; + + mutex_lock(&led->lock); + + ret = max77693_get_strobe_status(led, state); + + *state = !!(*state && (led->strobing_sub_led_id == sub_led->fled_id)); + + mutex_unlock(&led->lock); + + return ret; +} + +static int max77693_led_flash_timeout_set( + struct led_classdev_flash *fled_cdev, + u32 timeout) +{ + struct max77693_sub_led *sub_led = flcdev_to_sub_led(fled_cdev); + struct max77693_led_device *led = sub_led_to_led(sub_led); + + mutex_lock(&led->lock); + sub_led->flash_timeout = timeout; + mutex_unlock(&led->lock); + + return 0; +} + +static int max77693_led_parse_dt(struct max77693_led_device *led, + struct max77693_led_config_data *cfg, + struct device_node **sub_nodes) +{ + struct device *dev = &led->pdev->dev; + struct max77693_sub_led *sub_leds = led->sub_leds; + struct device_node *node = dev->of_node, *child_node; + struct property *prop; + u32 led_sources[2]; + int i, ret, fled_id; + + of_property_read_u32(node, "maxim,boost-mode", &cfg->boost_mode); + of_property_read_u32(node, "maxim,boost-mvout", &cfg->boost_vout); + of_property_read_u32(node, "maxim,mvsys-min", &cfg->low_vsys); + + for_each_available_child_of_node(node, child_node) { + prop = of_find_property(child_node, "led-sources", NULL); + if (prop) { + const __be32 *srcs = NULL; + + for (i = 0; i < ARRAY_SIZE(led_sources); ++i) { + srcs = of_prop_next_u32(prop, srcs, + &led_sources[i]); + if (!srcs) + break; + } + } else { + dev_err(dev, + "led-sources DT property missing\n"); + of_node_put(child_node); + return -EINVAL; + } + + if (i == 2) { + fled_id = FLED1; + led->fled_mask = FLED1_IOUT | FLED2_IOUT; + } else if (led_sources[0] == FLED1) { + fled_id = FLED1; + led->fled_mask |= FLED1_IOUT; + } else if (led_sources[0] == FLED2) { + fled_id = FLED2; + led->fled_mask |= FLED2_IOUT; + } else { + dev_err(dev, + "Wrong led-sources DT property value.\n"); + of_node_put(child_node); + return -EINVAL; + } + + if (sub_nodes[fled_id]) { + dev_err(dev, + "Conflicting \"led-sources\" DT properties\n"); + return -EINVAL; + } + + sub_nodes[fled_id] = child_node; + sub_leds[fled_id].fled_id = fled_id; + + cfg->label[fled_id] = + of_get_property(child_node, "label", NULL) ? : + child_node->name; + + ret = of_property_read_u32(child_node, "led-max-microamp", + &cfg->iout_torch_max[fled_id]); + if (ret < 0) { + cfg->iout_torch_max[fled_id] = TORCH_IOUT_MIN; + dev_warn(dev, "led-max-microamp DT property missing\n"); + } + + ret = of_property_read_u32(child_node, "flash-max-microamp", + &cfg->iout_flash_max[fled_id]); + if (ret < 0) { + cfg->iout_flash_max[fled_id] = FLASH_IOUT_MIN; + dev_warn(dev, + "flash-max-microamp DT property missing\n"); + } + + ret = of_property_read_u32(child_node, "flash-max-timeout-us", + &cfg->flash_timeout_max[fled_id]); + if (ret < 0) { + cfg->flash_timeout_max[fled_id] = FLASH_TIMEOUT_MIN; + dev_warn(dev, + "flash-max-timeout-us DT property missing\n"); + } + + if (++cfg->num_leds == 2 || + (max77693_fled_used(led, FLED1) && + max77693_fled_used(led, FLED2))) { + of_node_put(child_node); + break; + } + } + + if (cfg->num_leds == 0) { + dev_err(dev, "No DT child node found for connected LED(s).\n"); + return -EINVAL; + } + + return 0; +} + +static void clamp_align(u32 *v, u32 min, u32 max, u32 step) +{ + *v = clamp_val(*v, min, max); + if (step > 1) + *v = (*v - min) / step * step + min; +} + +static void max77693_align_iout_current(struct max77693_led_device *led, + u32 *iout, u32 min, u32 max, u32 step) +{ + int i; + + if (led->iout_joint) { + if (iout[FLED1] > min) { + iout[FLED1] /= 2; + iout[FLED2] = iout[FLED1]; + } else { + iout[FLED1] = min; + iout[FLED2] = 0; + return; + } + } + + for (i = FLED1; i <= FLED2; ++i) + if (max77693_fled_used(led, i)) + clamp_align(&iout[i], min, max, step); + else + iout[i] = 0; +} + +static void max77693_led_validate_configuration(struct max77693_led_device *led, + struct max77693_led_config_data *cfg) +{ + u32 flash_iout_max = cfg->boost_mode ? FLASH_IOUT_MAX_2LEDS : + FLASH_IOUT_MAX_1LED; + int i; + + if (cfg->num_leds == 1 && + max77693_fled_used(led, FLED1) && max77693_fled_used(led, FLED2)) + led->iout_joint = true; + + cfg->boost_mode = clamp_val(cfg->boost_mode, MAX77693_LED_BOOST_NONE, + MAX77693_LED_BOOST_FIXED); + + /* Boost must be enabled if both current outputs are used */ + if ((cfg->boost_mode == MAX77693_LED_BOOST_NONE) && led->iout_joint) + cfg->boost_mode = MAX77693_LED_BOOST_FIXED; + + max77693_align_iout_current(led, cfg->iout_torch_max, + TORCH_IOUT_MIN, TORCH_IOUT_MAX, TORCH_IOUT_STEP); + + max77693_align_iout_current(led, cfg->iout_flash_max, + FLASH_IOUT_MIN, flash_iout_max, FLASH_IOUT_STEP); + + for (i = 0; i < ARRAY_SIZE(cfg->flash_timeout_max); ++i) + clamp_align(&cfg->flash_timeout_max[i], FLASH_TIMEOUT_MIN, + FLASH_TIMEOUT_MAX, FLASH_TIMEOUT_STEP); + + clamp_align(&cfg->boost_vout, FLASH_VOUT_MIN, FLASH_VOUT_MAX, + FLASH_VOUT_STEP); + + if (cfg->low_vsys) + clamp_align(&cfg->low_vsys, MAX_FLASH1_VSYS_MIN, + MAX_FLASH1_VSYS_MAX, MAX_FLASH1_VSYS_STEP); +} + +static int max77693_led_get_configuration(struct max77693_led_device *led, + struct max77693_led_config_data *cfg, + struct device_node **sub_nodes) +{ + int ret; + + ret = max77693_led_parse_dt(led, cfg, sub_nodes); + if (ret < 0) + return ret; + + max77693_led_validate_configuration(led, cfg); + + memcpy(led->iout_torch_max, cfg->iout_torch_max, + sizeof(led->iout_torch_max)); + memcpy(led->iout_flash_max, cfg->iout_flash_max, + sizeof(led->iout_flash_max)); + + return 0; +} + +static const struct led_flash_ops flash_ops = { + .flash_brightness_set = max77693_led_flash_brightness_set, + .strobe_set = max77693_led_flash_strobe_set, + .strobe_get = max77693_led_flash_strobe_get, + .timeout_set = max77693_led_flash_timeout_set, + .fault_get = max77693_led_flash_fault_get, +}; + +static void max77693_init_flash_settings(struct max77693_sub_led *sub_led, + struct max77693_led_config_data *led_cfg) +{ + struct led_classdev_flash *fled_cdev = &sub_led->fled_cdev; + struct max77693_led_device *led = sub_led_to_led(sub_led); + int fled_id = sub_led->fled_id; + struct led_flash_setting *setting; + + /* Init flash intensity setting */ + setting = &fled_cdev->brightness; + setting->min = FLASH_IOUT_MIN; + setting->max = led->iout_joint ? + led_cfg->iout_flash_max[FLED1] + + led_cfg->iout_flash_max[FLED2] : + led_cfg->iout_flash_max[fled_id]; + setting->step = FLASH_IOUT_STEP; + setting->val = setting->max; + + /* Init flash timeout setting */ + setting = &fled_cdev->timeout; + setting->min = FLASH_TIMEOUT_MIN; + setting->max = led_cfg->flash_timeout_max[fled_id]; + setting->step = FLASH_TIMEOUT_STEP; + setting->val = setting->max; +} + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) + +static int max77693_led_external_strobe_set( + struct v4l2_flash *v4l2_flash, + bool enable) +{ + struct max77693_sub_led *sub_led = + flcdev_to_sub_led(v4l2_flash->fled_cdev); + struct max77693_led_device *led = sub_led_to_led(sub_led); + int fled_id = sub_led->fled_id; + int ret; + + mutex_lock(&led->lock); + + if (enable) + ret = max77693_add_mode(led, MODE_FLASH_EXTERNAL(fled_id)); + else + ret = max77693_clear_mode(led, MODE_FLASH_EXTERNAL(fled_id)); + + mutex_unlock(&led->lock); + + return ret; +} + +static void max77693_init_v4l2_flash_config(struct max77693_sub_led *sub_led, + struct max77693_led_config_data *led_cfg, + struct v4l2_flash_config *v4l2_sd_cfg) +{ + struct max77693_led_device *led = sub_led_to_led(sub_led); + struct device *dev = &led->pdev->dev; + struct max77693_dev *iodev = dev_get_drvdata(dev->parent); + struct i2c_client *i2c = iodev->i2c; + struct led_flash_setting *s; + + snprintf(v4l2_sd_cfg->dev_name, sizeof(v4l2_sd_cfg->dev_name), + "%s %d-%04x", sub_led->fled_cdev.led_cdev.name, + i2c_adapter_id(i2c->adapter), i2c->addr); + + s = &v4l2_sd_cfg->torch_intensity; + s->min = TORCH_IOUT_MIN; + s->max = sub_led->fled_cdev.led_cdev.max_brightness * TORCH_IOUT_STEP; + s->step = TORCH_IOUT_STEP; + s->val = s->max; + + /* Init flash faults config */ + v4l2_sd_cfg->flash_faults = LED_FAULT_OVER_VOLTAGE | + LED_FAULT_SHORT_CIRCUIT | + LED_FAULT_OVER_CURRENT; + + v4l2_sd_cfg->has_external_strobe = true; +} + +static const struct v4l2_flash_ops v4l2_flash_ops = { + .external_strobe_set = max77693_led_external_strobe_set, +}; +#else +static inline void max77693_init_v4l2_flash_config( + struct max77693_sub_led *sub_led, + struct max77693_led_config_data *led_cfg, + struct v4l2_flash_config *v4l2_sd_cfg) +{ +} +static const struct v4l2_flash_ops v4l2_flash_ops; +#endif + +static void max77693_init_fled_cdev(struct max77693_sub_led *sub_led, + struct max77693_led_config_data *led_cfg) +{ + struct max77693_led_device *led = sub_led_to_led(sub_led); + int fled_id = sub_led->fled_id; + struct led_classdev_flash *fled_cdev; + struct led_classdev *led_cdev; + + /* Initialize LED Flash class device */ + fled_cdev = &sub_led->fled_cdev; + fled_cdev->ops = &flash_ops; + led_cdev = &fled_cdev->led_cdev; + + led_cdev->name = led_cfg->label[fled_id]; + + led_cdev->brightness_set = max77693_led_brightness_set; + led_cdev->brightness_set_sync = max77693_led_brightness_set_sync; + led_cdev->max_brightness = (led->iout_joint ? + led_cfg->iout_torch_max[FLED1] + + led_cfg->iout_torch_max[FLED2] : + led_cfg->iout_torch_max[fled_id]) / + TORCH_IOUT_STEP; + led_cdev->flags |= LED_DEV_CAP_FLASH; + INIT_WORK(&sub_led->work_brightness_set, + max77693_led_brightness_set_work); + + max77693_init_flash_settings(sub_led, led_cfg); + + /* Init flash timeout cache */ + sub_led->flash_timeout = fled_cdev->timeout.val; +} + +static int max77693_register_led(struct max77693_sub_led *sub_led, + struct max77693_led_config_data *led_cfg, + struct device_node *sub_node) +{ + struct max77693_led_device *led = sub_led_to_led(sub_led); + struct led_classdev_flash *fled_cdev = &sub_led->fled_cdev; + struct device *dev = &led->pdev->dev; + struct v4l2_flash_config v4l2_sd_cfg = {}; + int ret; + + /* Register in the LED subsystem */ + ret = led_classdev_flash_register(dev, fled_cdev); + if (ret < 0) + return ret; + + max77693_init_v4l2_flash_config(sub_led, led_cfg, &v4l2_sd_cfg); + + /* Register in the V4L2 subsystem. */ + sub_led->v4l2_flash = v4l2_flash_init(dev, sub_node, fled_cdev, NULL, + &v4l2_flash_ops, &v4l2_sd_cfg); + if (IS_ERR(sub_led->v4l2_flash)) { + ret = PTR_ERR(sub_led->v4l2_flash); + goto err_v4l2_flash_init; + } + + return 0; + +err_v4l2_flash_init: + led_classdev_flash_unregister(fled_cdev); + return ret; +} + +static int max77693_led_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct max77693_dev *iodev = dev_get_drvdata(dev->parent); + struct max77693_led_device *led; + struct max77693_sub_led *sub_leds; + struct device_node *sub_nodes[2] = {}; + struct max77693_led_config_data led_cfg = {}; + int init_fled_cdev[2], i, ret; + + led = devm_kzalloc(dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + + led->pdev = pdev; + led->regmap = iodev->regmap; + led->allowed_modes = MODE_FLASH_MASK; + sub_leds = led->sub_leds; + + platform_set_drvdata(pdev, led); + ret = max77693_led_get_configuration(led, &led_cfg, sub_nodes); + if (ret < 0) + return ret; + + ret = max77693_setup(led, &led_cfg); + if (ret < 0) + return ret; + + mutex_init(&led->lock); + + init_fled_cdev[FLED1] = + led->iout_joint || max77693_fled_used(led, FLED1); + init_fled_cdev[FLED2] = + !led->iout_joint && max77693_fled_used(led, FLED2); + + for (i = FLED1; i <= FLED2; ++i) { + if (!init_fled_cdev[i]) + continue; + + /* Initialize LED Flash class device */ + max77693_init_fled_cdev(&sub_leds[i], &led_cfg); + + /* + * Register LED Flash class device and corresponding + * V4L2 Flash device. + */ + ret = max77693_register_led(&sub_leds[i], &led_cfg, + sub_nodes[i]); + if (ret < 0) { + /* + * At this moment FLED1 might have been already + * registered and it needs to be released. + */ + if (i == FLED2) + goto err_register_led2; + else + goto err_register_led1; + } + } + + return 0; + +err_register_led2: + /* It is possible than only FLED2 was to be registered */ + if (!init_fled_cdev[FLED1]) + goto err_register_led1; + v4l2_flash_release(sub_leds[FLED1].v4l2_flash); + led_classdev_flash_unregister(&sub_leds[FLED1].fled_cdev); +err_register_led1: + mutex_destroy(&led->lock); + + return ret; +} + +static int max77693_led_remove(struct platform_device *pdev) +{ + struct max77693_led_device *led = platform_get_drvdata(pdev); + struct max77693_sub_led *sub_leds = led->sub_leds; + + if (led->iout_joint || max77693_fled_used(led, FLED1)) { + v4l2_flash_release(sub_leds[FLED1].v4l2_flash); + led_classdev_flash_unregister(&sub_leds[FLED1].fled_cdev); + cancel_work_sync(&sub_leds[FLED1].work_brightness_set); + } + + if (!led->iout_joint && max77693_fled_used(led, FLED2)) { + v4l2_flash_release(sub_leds[FLED2].v4l2_flash); + led_classdev_flash_unregister(&sub_leds[FLED2].fled_cdev); + cancel_work_sync(&sub_leds[FLED2].work_brightness_set); + } + + mutex_destroy(&led->lock); + + return 0; +} + +static const struct of_device_id max77693_led_dt_match[] = { + { .compatible = "maxim,max77693-led" }, + {}, +}; + +static struct platform_driver max77693_led_driver = { + .probe = max77693_led_probe, + .remove = max77693_led_remove, + .driver = { + .name = "max77693-led", + .of_match_table = max77693_led_dt_match, + }, +}; + +module_platform_driver(max77693_led_driver); + +MODULE_AUTHOR("Jacek Anaszewski <[email protected]>"); +MODULE_AUTHOR("Andrzej Hajda <[email protected]>"); +MODULE_DESCRIPTION("Maxim MAX77693 led flash driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/leds/leds-tlc591xx.c b/drivers/leds/leds-tlc591xx.c new file mode 100644 index 000000000000..de16c29d7895 --- /dev/null +++ b/drivers/leds/leds-tlc591xx.c @@ -0,0 +1,300 @@ +/* + * Copyright 2014 Belkin Inc. + * Copyright 2015 Andrew Lunn <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include <linux/i2c.h> +#include <linux/leds.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/workqueue.h> + +#define TLC591XX_MAX_LEDS 16 + +#define TLC591XX_REG_MODE1 0x00 +#define MODE1_RESPON_ADDR_MASK 0xF0 +#define MODE1_NORMAL_MODE (0 << 4) +#define MODE1_SPEED_MODE (1 << 4) + +#define TLC591XX_REG_MODE2 0x01 +#define MODE2_DIM (0 << 5) +#define MODE2_BLINK (1 << 5) +#define MODE2_OCH_STOP (0 << 3) +#define MODE2_OCH_ACK (1 << 3) + +#define TLC591XX_REG_PWM(x) (0x02 + (x)) + +#define TLC591XX_REG_GRPPWM 0x12 +#define TLC591XX_REG_GRPFREQ 0x13 + +/* LED Driver Output State, determine the source that drives LED outputs */ +#define LEDOUT_OFF 0x0 /* Output LOW */ +#define LEDOUT_ON 0x1 /* Output HI-Z */ +#define LEDOUT_DIM 0x2 /* Dimming */ +#define LEDOUT_BLINK 0x3 /* Blinking */ +#define LEDOUT_MASK 0x3 + +#define ldev_to_led(c) container_of(c, struct tlc591xx_led, ldev) +#define work_to_led(work) container_of(work, struct tlc591xx_led, work) + +struct tlc591xx_led { + bool active; + unsigned int led_no; + struct led_classdev ldev; + struct work_struct work; + struct tlc591xx_priv *priv; +}; + +struct tlc591xx_priv { + struct tlc591xx_led leds[TLC591XX_MAX_LEDS]; + struct regmap *regmap; + unsigned int reg_ledout_offset; +}; + +struct tlc591xx { + unsigned int max_leds; + unsigned int reg_ledout_offset; +}; + +static const struct tlc591xx tlc59116 = { + .max_leds = 16, + .reg_ledout_offset = 0x14, +}; + +static const struct tlc591xx tlc59108 = { + .max_leds = 8, + .reg_ledout_offset = 0x0c, +}; + +static int +tlc591xx_set_mode(struct regmap *regmap, u8 mode) +{ + int err; + u8 val; + + err = regmap_write(regmap, TLC591XX_REG_MODE1, MODE1_NORMAL_MODE); + if (err) + return err; + + val = MODE2_OCH_STOP | mode; + + return regmap_write(regmap, TLC591XX_REG_MODE2, val); +} + +static int +tlc591xx_set_ledout(struct tlc591xx_priv *priv, struct tlc591xx_led *led, + u8 val) +{ + unsigned int i = (led->led_no % 4) * 2; + unsigned int mask = LEDOUT_MASK << i; + unsigned int addr = priv->reg_ledout_offset + (led->led_no >> 2); + + val = val << i; + + return regmap_update_bits(priv->regmap, addr, mask, val); +} + +static int +tlc591xx_set_pwm(struct tlc591xx_priv *priv, struct tlc591xx_led *led, + u8 brightness) +{ + u8 pwm = TLC591XX_REG_PWM(led->led_no); + + return regmap_write(priv->regmap, pwm, brightness); +} + +static void +tlc591xx_led_work(struct work_struct *work) +{ + struct tlc591xx_led *led = work_to_led(work); + struct tlc591xx_priv *priv = led->priv; + enum led_brightness brightness = led->ldev.brightness; + int err; + + switch (brightness) { + case 0: + err = tlc591xx_set_ledout(priv, led, LEDOUT_OFF); + break; + case LED_FULL: + err = tlc591xx_set_ledout(priv, led, LEDOUT_ON); + break; + default: + err = tlc591xx_set_ledout(priv, led, LEDOUT_DIM); + if (!err) + err = tlc591xx_set_pwm(priv, led, brightness); + } + + if (err) + dev_err(led->ldev.dev, "Failed setting brightness\n"); +} + +static void +tlc591xx_brightness_set(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct tlc591xx_led *led = ldev_to_led(led_cdev); + + led->ldev.brightness = brightness; + schedule_work(&led->work); +} + +static void +tlc591xx_destroy_devices(struct tlc591xx_priv *priv, unsigned int j) +{ + int i = j; + + while (--i >= 0) { + if (priv->leds[i].active) { + led_classdev_unregister(&priv->leds[i].ldev); + cancel_work_sync(&priv->leds[i].work); + } + } +} + +static int +tlc591xx_configure(struct device *dev, + struct tlc591xx_priv *priv, + const struct tlc591xx *tlc591xx) +{ + unsigned int i; + int err = 0; + + tlc591xx_set_mode(priv->regmap, MODE2_DIM); + for (i = 0; i < TLC591XX_MAX_LEDS; i++) { + struct tlc591xx_led *led = &priv->leds[i]; + + if (!led->active) + continue; + + led->priv = priv; + led->led_no = i; + led->ldev.brightness_set = tlc591xx_brightness_set; + led->ldev.max_brightness = LED_FULL; + INIT_WORK(&led->work, tlc591xx_led_work); + err = led_classdev_register(dev, &led->ldev); + if (err < 0) { + dev_err(dev, "couldn't register LED %s\n", + led->ldev.name); + goto exit; + } + } + + return 0; + +exit: + tlc591xx_destroy_devices(priv, i); + return err; +} + +static const struct regmap_config tlc591xx_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x1e, +}; + +static const struct of_device_id of_tlc591xx_leds_match[] = { + { .compatible = "ti,tlc59116", + .data = &tlc59116 }, + { .compatible = "ti,tlc59108", + .data = &tlc59108 }, + {}, +}; +MODULE_DEVICE_TABLE(of, of_tlc591xx_leds_match); + +static int +tlc591xx_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device_node *np = client->dev.of_node, *child; + struct device *dev = &client->dev; + const struct of_device_id *match; + const struct tlc591xx *tlc591xx; + struct tlc591xx_priv *priv; + int err, count, reg; + + match = of_match_device(of_tlc591xx_leds_match, dev); + if (!match) + return -ENODEV; + + tlc591xx = match->data; + if (!np) + return -ENODEV; + + count = of_get_child_count(np); + if (!count || count > tlc591xx->max_leds) + return -EINVAL; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->regmap = devm_regmap_init_i2c(client, &tlc591xx_regmap); + if (IS_ERR(priv->regmap)) { + err = PTR_ERR(priv->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", err); + return err; + } + priv->reg_ledout_offset = tlc591xx->reg_ledout_offset; + + i2c_set_clientdata(client, priv); + + for_each_child_of_node(np, child) { + err = of_property_read_u32(child, "reg", ®); + if (err) + return err; + if (reg < 0 || reg >= tlc591xx->max_leds) + return -EINVAL; + if (priv->leds[reg].active) + return -EINVAL; + priv->leds[reg].active = true; + priv->leds[reg].ldev.name = + of_get_property(child, "label", NULL) ? : child->name; + priv->leds[reg].ldev.default_trigger = + of_get_property(child, "linux,default-trigger", NULL); + } + return tlc591xx_configure(dev, priv, tlc591xx); +} + +static int +tlc591xx_remove(struct i2c_client *client) +{ + struct tlc591xx_priv *priv = i2c_get_clientdata(client); + + tlc591xx_destroy_devices(priv, TLC591XX_MAX_LEDS); + + return 0; +} + +static const struct i2c_device_id tlc591xx_id[] = { + { "tlc59116" }, + { "tlc59108" }, + {}, +}; +MODULE_DEVICE_TABLE(i2c, tlc591xx_id); + +static struct i2c_driver tlc591xx_driver = { + .driver = { + .name = "tlc591xx", + .of_match_table = of_match_ptr(of_tlc591xx_leds_match), + }, + .probe = tlc591xx_probe, + .remove = tlc591xx_remove, + .id_table = tlc591xx_id, +}; + +module_i2c_driver(tlc591xx_driver); + +MODULE_AUTHOR("Andrew Lunn <[email protected]>"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("TLC591XX LED driver"); diff --git a/drivers/leds/leds.h b/drivers/leds/leds.h index 79efe57c7405..bc89d7ace2c4 100644 --- a/drivers/leds/leds.h +++ b/drivers/leds/leds.h @@ -13,7 +13,6 @@ #ifndef __LEDS_H_INCLUDED #define __LEDS_H_INCLUDED -#include <linux/device.h> #include <linux/rwsem.h> #include <linux/leds.h> @@ -50,27 +49,4 @@ void led_stop_software_blink(struct led_classdev *led_cdev); extern struct rw_semaphore leds_list_lock; extern struct list_head leds_list; -#ifdef CONFIG_LEDS_TRIGGERS -void led_trigger_set_default(struct led_classdev *led_cdev); -void led_trigger_set(struct led_classdev *led_cdev, - struct led_trigger *trigger); -void led_trigger_remove(struct led_classdev *led_cdev); - -static inline void *led_get_trigger_data(struct led_classdev *led_cdev) -{ - return led_cdev->trigger_data; -} - -#else -#define led_trigger_set_default(x) do {} while (0) -#define led_trigger_set(x, y) do {} while (0) -#define led_trigger_remove(x) do {} while (0) -#define led_get_trigger_data(x) (NULL) -#endif - -ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count); -ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, - char *buf); - #endif /* __LEDS_H_INCLUDED */ diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c index f3755e0aa935..f80acb36ff07 100644 --- a/drivers/mailbox/pl320-ipc.c +++ b/drivers/mailbox/pl320-ipc.c @@ -195,4 +195,4 @@ static int __init ipc_init(void) { return amba_driver_register(&pl320_driver); } -module_init(ipc_init); +subsys_initcall(ipc_init); diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index fe080ad0e558..ce64fc851251 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -157,7 +157,7 @@ int bch_journal_read(struct cache_set *c, struct list_head *list) for_each_cache(ca, c, iter) { struct journal_device *ja = &ca->journal; - unsigned long bitmap[SB_JOURNAL_BUCKETS / BITS_PER_LONG]; + DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS); unsigned i, l, r, m; uint64_t seq; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4dd2bb7167f0..94980bfca434 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -760,14 +760,8 @@ static void bcache_device_free(struct bcache_device *d) bio_split_pool_free(&d->bio_split_hook); if (d->bio_split) bioset_free(d->bio_split); - if (is_vmalloc_addr(d->full_dirty_stripes)) - vfree(d->full_dirty_stripes); - else - kfree(d->full_dirty_stripes); - if (is_vmalloc_addr(d->stripe_sectors_dirty)) - vfree(d->stripe_sectors_dirty); - else - kfree(d->stripe_sectors_dirty); + kvfree(d->full_dirty_stripes); + kvfree(d->stripe_sectors_dirty); closure_debug_destroy(&d->cl); } diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index 98df7572b5f7..1d04c4859c70 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -52,10 +52,7 @@ struct closure; #define free_heap(heap) \ do { \ - if (is_vmalloc_addr((heap)->data)) \ - vfree((heap)->data); \ - else \ - kfree((heap)->data); \ + kvfree((heap)->data); \ (heap)->data = NULL; \ } while (0) @@ -163,10 +160,7 @@ do { \ #define free_fifo(fifo) \ do { \ - if (is_vmalloc_addr((fifo)->data)) \ - vfree((fifo)->data); \ - else \ - kfree((fifo)->data); \ + kvfree((fifo)->data); \ (fifo)->data = NULL; \ } while (0) diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c index 6d6e0ca91fb4..58f65486de33 100644 --- a/drivers/media/platform/coda/coda-common.c +++ b/drivers/media/platform/coda/coda-common.c @@ -2155,9 +2155,9 @@ static int coda_probe(struct platform_device *pdev) } /* Get IRAM pool from device tree or platform data */ - pool = of_get_named_gen_pool(np, "iram", 0); + pool = of_gen_pool_get(np, "iram", 0); if (!pool && pdata) - pool = dev_get_gen_pool(pdata->iram_dev); + pool = gen_pool_get(pdata->iram_dev); if (!pool) { dev_err(&pdev->dev, "iram pool not available\n"); return -ENOMEM; diff --git a/drivers/media/v4l2-core/Kconfig b/drivers/media/v4l2-core/Kconfig index f7a01a72eb9e..b4b022933e29 100644 --- a/drivers/media/v4l2-core/Kconfig +++ b/drivers/media/v4l2-core/Kconfig @@ -44,6 +44,17 @@ config V4L2_MEM2MEM_DEV tristate depends on VIDEOBUF2_CORE +# Used by LED subsystem flash drivers +config V4L2_FLASH_LED_CLASS + tristate "V4L2 flash API for LED flash class devices" + depends on VIDEO_V4L2_SUBDEV_API + depends on LEDS_CLASS_FLASH + ---help--- + Say Y here to enable V4L2 flash API support for LED flash + class drivers. + + When in doubt, say N. + # Used by drivers that need Videobuf modules config VIDEOBUF_GEN tristate diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile index 63d29f27538c..dc3de00d68b5 100644 --- a/drivers/media/v4l2-core/Makefile +++ b/drivers/media/v4l2-core/Makefile @@ -22,6 +22,8 @@ obj-$(CONFIG_VIDEO_TUNER) += tuner.o obj-$(CONFIG_V4L2_MEM2MEM_DEV) += v4l2-mem2mem.o +obj-$(CONFIG_V4L2_FLASH_LED_CLASS) += v4l2-flash-led-class.o + obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o obj-$(CONFIG_VIDEOBUF_DMA_CONTIG) += videobuf-dma-contig.o diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c index 85a6a34128a8..5bada202b2d3 100644 --- a/drivers/media/v4l2-core/v4l2-async.c +++ b/drivers/media/v4l2-core/v4l2-async.c @@ -22,10 +22,10 @@ #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> -static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd) +static bool match_i2c(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) { #if IS_ENABLED(CONFIG_I2C) - struct i2c_client *client = i2c_verify_client(dev); + struct i2c_client *client = i2c_verify_client(sd->dev); return client && asd->match.i2c.adapter_id == client->adapter->nr && asd->match.i2c.address == client->addr; @@ -34,14 +34,24 @@ static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd) #endif } -static bool match_devname(struct device *dev, struct v4l2_async_subdev *asd) +static bool match_devname(struct v4l2_subdev *sd, + struct v4l2_async_subdev *asd) { - return !strcmp(asd->match.device_name.name, dev_name(dev)); + return !strcmp(asd->match.device_name.name, dev_name(sd->dev)); } -static bool match_of(struct device *dev, struct v4l2_async_subdev *asd) +static bool match_of(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) { - return dev->of_node == asd->match.of.node; + return sd->of_node == asd->match.of.node; +} + +static bool match_custom(struct v4l2_subdev *sd, struct v4l2_async_subdev *asd) +{ + if (!asd->match.custom.match) + /* Match always */ + return true; + + return asd->match.custom.match(sd->dev, asd); } static LIST_HEAD(subdev_list); @@ -51,17 +61,14 @@ static DEFINE_MUTEX(list_lock); static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier, struct v4l2_subdev *sd) { + bool (*match)(struct v4l2_subdev *, struct v4l2_async_subdev *); struct v4l2_async_subdev *asd; - bool (*match)(struct device *, struct v4l2_async_subdev *); list_for_each_entry(asd, ¬ifier->waiting, list) { /* bus_type has been verified valid before */ switch (asd->match_type) { case V4L2_ASYNC_MATCH_CUSTOM: - match = asd->match.custom.match; - if (!match) - /* Match always */ - return asd; + match = match_custom; break; case V4L2_ASYNC_MATCH_DEVNAME: match = match_devname; @@ -79,7 +86,7 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier * } /* match cannot be NULL here */ - if (match(sd->dev, asd)) + if (match(sd, asd)) return asd; } @@ -266,6 +273,14 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd) { struct v4l2_async_notifier *notifier; + /* + * No reference taken. The reference is held by the device + * (struct v4l2_subdev.dev), and async sub-device does not + * exist independently of the device at any point of time. + */ + if (!sd->of_node && sd->dev) + sd->of_node = sd->dev->of_node; + mutex_lock(&list_lock); INIT_LIST_HEAD(&sd->async_list); diff --git a/drivers/media/v4l2-core/v4l2-flash-led-class.c b/drivers/media/v4l2-core/v4l2-flash-led-class.c new file mode 100644 index 000000000000..5bdfb8d5263a --- /dev/null +++ b/drivers/media/v4l2-core/v4l2-flash-led-class.c @@ -0,0 +1,710 @@ +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/led-class-flash.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <media/v4l2-flash-led-class.h> + +#define has_flash_op(v4l2_flash, op) \ + (v4l2_flash && v4l2_flash->ops->op) + +#define call_flash_op(v4l2_flash, op, arg) \ + (has_flash_op(v4l2_flash, op) ? \ + v4l2_flash->ops->op(v4l2_flash, arg) : \ + -EINVAL) + +enum ctrl_init_data_id { + LED_MODE, + TORCH_INTENSITY, + FLASH_INTENSITY, + INDICATOR_INTENSITY, + FLASH_TIMEOUT, + STROBE_SOURCE, + /* + * Only above values are applicable to + * the 'ctrls' array in the struct v4l2_flash. + */ + FLASH_STROBE, + STROBE_STOP, + STROBE_STATUS, + FLASH_FAULT, + NUM_FLASH_CTRLS, +}; + +static enum led_brightness __intensity_to_led_brightness( + struct v4l2_ctrl *ctrl, s32 intensity) +{ + intensity -= ctrl->minimum; + intensity /= (u32) ctrl->step; + + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Therefore it must be possible to set it to 0 level which in + * the LED subsystem reflects LED_OFF state. + */ + if (ctrl->minimum) + ++intensity; + + return intensity; +} + +static s32 __led_brightness_to_intensity(struct v4l2_ctrl *ctrl, + enum led_brightness brightness) +{ + /* + * Indicator LEDs, unlike torch LEDs, are turned on/off basing on + * the state of V4L2_CID_FLASH_INDICATOR_INTENSITY control only. + * Do not decrement brightness read from the LED subsystem for + * indicator LED as it may equal 0. For torch LEDs this function + * is called only when V4L2_FLASH_LED_MODE_TORCH is set and the + * brightness read is guaranteed to be greater than 0. In the mode + * V4L2_FLASH_LED_MODE_NONE the cached torch intensity value is used. + */ + if (ctrl->id != V4L2_CID_FLASH_INDICATOR_INTENSITY) + --brightness; + + return (brightness * ctrl->step) + ctrl->minimum; +} + +static void v4l2_flash_set_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + enum led_brightness brightness; + + if (has_flash_op(v4l2_flash, intensity_to_led_brightness)) + brightness = call_flash_op(v4l2_flash, + intensity_to_led_brightness, + ctrl->val); + else + brightness = __intensity_to_led_brightness(ctrl, ctrl->val); + /* + * In case a LED Flash class driver provides ops for custom + * brightness <-> intensity conversion, it also must have defined + * related v4l2 control step == 1. In such a case a backward conversion + * from led brightness to v4l2 intensity is required to find out the + * the aligned intensity value. + */ + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + brightness); + + if (ctrl == ctrls[TORCH_INTENSITY]) { + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return; + + led_set_brightness(&v4l2_flash->fled_cdev->led_cdev, + brightness); + } else { + led_set_brightness(&v4l2_flash->iled_cdev->led_cdev, + brightness); + } +} + +static int v4l2_flash_update_led_brightness(struct v4l2_flash *v4l2_flash, + struct v4l2_ctrl *ctrl) +{ + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + struct led_classdev *led_cdev; + int ret; + + if (ctrl == ctrls[TORCH_INTENSITY]) { + /* + * Update torch brightness only if in TORCH_MODE. In other modes + * torch led is turned off, which would spuriously inform the + * user space that V4L2_CID_FLASH_TORCH_INTENSITY control value + * has changed to 0. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + return 0; + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + } else { + led_cdev = &v4l2_flash->iled_cdev->led_cdev; + } + + ret = led_update_brightness(led_cdev); + if (ret < 0) + return ret; + + if (has_flash_op(v4l2_flash, led_brightness_to_intensity)) + ctrl->val = call_flash_op(v4l2_flash, + led_brightness_to_intensity, + led_cdev->brightness); + else + ctrl->val = __led_brightness_to_intensity(ctrl, + led_cdev->brightness); + + return 0; +} + +static int v4l2_flash_g_volatile_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + bool is_strobing; + int ret; + + switch (c->id) { + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + return v4l2_flash_update_led_brightness(v4l2_flash, c); + case V4L2_CID_FLASH_INTENSITY: + ret = led_update_flash_brightness(fled_cdev); + if (ret < 0) + return ret; + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + c->val = fled_cdev->brightness.val; + return 0; + case V4L2_CID_FLASH_STROBE_STATUS: + ret = led_get_flash_strobe(fled_cdev, &is_strobing); + if (ret < 0) + return ret; + c->val = is_strobing; + return 0; + case V4L2_CID_FLASH_FAULT: + /* LED faults map directly to V4L2 flash faults */ + return led_get_flash_fault(fled_cdev, &c->val); + default: + return -EINVAL; + } +} + +static bool __software_strobe_mode_inactive(struct v4l2_ctrl **ctrls) +{ + return ((ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) || + (ctrls[STROBE_SOURCE] && (ctrls[STROBE_SOURCE]->val != + V4L2_FLASH_STROBE_SOURCE_SOFTWARE))); +} + +static int v4l2_flash_s_ctrl(struct v4l2_ctrl *c) +{ + struct v4l2_flash *v4l2_flash = v4l2_ctrl_to_v4l2_flash(c); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + bool external_strobe; + int ret = 0; + + switch (c->id) { + case V4L2_CID_FLASH_LED_MODE: + switch (c->val) { + case V4L2_FLASH_LED_MODE_NONE: + led_set_brightness(led_cdev, LED_OFF); + return led_set_flash_strobe(fled_cdev, false); + case V4L2_FLASH_LED_MODE_FLASH: + /* Turn the torch LED off */ + led_set_brightness(led_cdev, LED_OFF); + if (ctrls[STROBE_SOURCE]) { + external_strobe = (ctrls[STROBE_SOURCE]->val == + V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + + ret = call_flash_op(v4l2_flash, + external_strobe_set, + external_strobe); + } + return ret; + case V4L2_FLASH_LED_MODE_TORCH: + if (ctrls[STROBE_SOURCE]) { + ret = call_flash_op(v4l2_flash, + external_strobe_set, + false); + if (ret < 0) + return ret; + } + /* Stop flash strobing */ + ret = led_set_flash_strobe(fled_cdev, false); + if (ret < 0) + return ret; + + v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[TORCH_INTENSITY]); + return 0; + } + break; + case V4L2_CID_FLASH_STROBE_SOURCE: + external_strobe = (c->val == V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + /* + * For some hardware arrangements setting strobe source may + * affect torch mode. Therefore, if not in the flash mode, + * cache only this setting. It will be applied upon switching + * to flash mode. + */ + if (ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_FLASH) + return 0; + + return call_flash_op(v4l2_flash, external_strobe_set, + external_strobe); + case V4L2_CID_FLASH_STROBE: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, true); + case V4L2_CID_FLASH_STROBE_STOP: + if (__software_strobe_mode_inactive(ctrls)) + return -EBUSY; + return led_set_flash_strobe(fled_cdev, false); + case V4L2_CID_FLASH_TIMEOUT: + /* + * No conversion is needed as LED Flash class also uses + * microseconds for flash timeout units. + */ + return led_set_flash_timeout(fled_cdev, c->val); + case V4L2_CID_FLASH_INTENSITY: + /* + * No conversion is needed as LED Flash class also uses + * microamperes for flash intensity units. + */ + return led_set_flash_brightness(fled_cdev, c->val); + case V4L2_CID_FLASH_TORCH_INTENSITY: + case V4L2_CID_FLASH_INDICATOR_INTENSITY: + v4l2_flash_set_led_brightness(v4l2_flash, c); + return 0; + } + + return -EINVAL; +} + +static const struct v4l2_ctrl_ops v4l2_flash_ctrl_ops = { + .g_volatile_ctrl = v4l2_flash_g_volatile_ctrl, + .s_ctrl = v4l2_flash_s_ctrl, +}; + +static void __lfs_to_v4l2_ctrl_config(struct led_flash_setting *s, + struct v4l2_ctrl_config *c) +{ + c->min = s->min; + c->max = s->max; + c->step = s->step; + c->def = s->val; +} + +static void __fill_ctrl_init_data(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg, + struct v4l2_flash_ctrl_data *ctrl_init_data) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + const struct led_flash_ops *fled_cdev_ops = fled_cdev->ops; + struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct v4l2_ctrl_config *ctrl_cfg; + u32 mask; + + /* Init FLASH_FAULT ctrl data */ + if (flash_cfg->flash_faults) { + ctrl_init_data[FLASH_FAULT].cid = V4L2_CID_FLASH_FAULT; + ctrl_cfg = &ctrl_init_data[FLASH_FAULT].config; + ctrl_cfg->id = V4L2_CID_FLASH_FAULT; + ctrl_cfg->max = flash_cfg->flash_faults; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_LED_MODE ctrl data */ + mask = 1 << V4L2_FLASH_LED_MODE_NONE | + 1 << V4L2_FLASH_LED_MODE_TORCH; + if (led_cdev->flags & LED_DEV_CAP_FLASH) + mask |= 1 << V4L2_FLASH_LED_MODE_FLASH; + + ctrl_init_data[LED_MODE].cid = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg = &ctrl_init_data[LED_MODE].config; + ctrl_cfg->id = V4L2_CID_FLASH_LED_MODE; + ctrl_cfg->max = V4L2_FLASH_LED_MODE_TORCH; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_LED_MODE_NONE; + ctrl_cfg->flags = 0; + + /* Init TORCH_INTENSITY ctrl data */ + ctrl_init_data[TORCH_INTENSITY].cid = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg = &ctrl_init_data[TORCH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->torch_intensity, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TORCH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + + /* Init INDICATOR_INTENSITY ctrl data */ + if (v4l2_flash->iled_cdev) { + ctrl_init_data[INDICATOR_INTENSITY].cid = + V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg = &ctrl_init_data[INDICATOR_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&flash_cfg->indicator_intensity, + ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INDICATOR_INTENSITY; + ctrl_cfg->min = 0; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } + + if (!(led_cdev->flags & LED_DEV_CAP_FLASH)) + return; + + /* Init FLASH_STROBE ctrl data */ + ctrl_init_data[FLASH_STROBE].cid = V4L2_CID_FLASH_STROBE; + ctrl_cfg = &ctrl_init_data[FLASH_STROBE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE; + + /* Init STROBE_STOP ctrl data */ + ctrl_init_data[STROBE_STOP].cid = V4L2_CID_FLASH_STROBE_STOP; + ctrl_cfg = &ctrl_init_data[STROBE_STOP].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STOP; + + /* Init FLASH_STROBE_SOURCE ctrl data */ + if (flash_cfg->has_external_strobe) { + mask = (1 << V4L2_FLASH_STROBE_SOURCE_SOFTWARE) | + (1 << V4L2_FLASH_STROBE_SOURCE_EXTERNAL); + ctrl_init_data[STROBE_SOURCE].cid = + V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg = &ctrl_init_data[STROBE_SOURCE].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_SOURCE; + ctrl_cfg->max = V4L2_FLASH_STROBE_SOURCE_EXTERNAL; + ctrl_cfg->menu_skip_mask = ~mask; + ctrl_cfg->def = V4L2_FLASH_STROBE_SOURCE_SOFTWARE; + } + + /* Init STROBE_STATUS ctrl data */ + if (fled_cdev_ops->strobe_get) { + ctrl_init_data[STROBE_STATUS].cid = + V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg = &ctrl_init_data[STROBE_STATUS].config; + ctrl_cfg->id = V4L2_CID_FLASH_STROBE_STATUS; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_READ_ONLY; + } + + /* Init FLASH_TIMEOUT ctrl data */ + if (fled_cdev_ops->timeout_set) { + ctrl_init_data[FLASH_TIMEOUT].cid = V4L2_CID_FLASH_TIMEOUT; + ctrl_cfg = &ctrl_init_data[FLASH_TIMEOUT].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->timeout, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_TIMEOUT; + } + + /* Init FLASH_INTENSITY ctrl data */ + if (fled_cdev_ops->flash_brightness_set) { + ctrl_init_data[FLASH_INTENSITY].cid = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg = &ctrl_init_data[FLASH_INTENSITY].config; + __lfs_to_v4l2_ctrl_config(&fled_cdev->brightness, ctrl_cfg); + ctrl_cfg->id = V4L2_CID_FLASH_INTENSITY; + ctrl_cfg->flags = V4L2_CTRL_FLAG_VOLATILE | + V4L2_CTRL_FLAG_EXECUTE_ON_WRITE; + } +} + +static int v4l2_flash_init_controls(struct v4l2_flash *v4l2_flash, + struct v4l2_flash_config *flash_cfg) + +{ + struct v4l2_flash_ctrl_data *ctrl_init_data; + struct v4l2_ctrl *ctrl; + struct v4l2_ctrl_config *ctrl_cfg; + int i, ret, num_ctrls = 0; + + v4l2_flash->ctrls = devm_kzalloc(v4l2_flash->sd.dev, + sizeof(*v4l2_flash->ctrls) * + (STROBE_SOURCE + 1), GFP_KERNEL); + if (!v4l2_flash->ctrls) + return -ENOMEM; + + /* allocate memory dynamically so as not to exceed stack frame size */ + ctrl_init_data = kcalloc(NUM_FLASH_CTRLS, sizeof(*ctrl_init_data), + GFP_KERNEL); + if (!ctrl_init_data) + return -ENOMEM; + + __fill_ctrl_init_data(v4l2_flash, flash_cfg, ctrl_init_data); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) + if (ctrl_init_data[i].cid) + ++num_ctrls; + + v4l2_ctrl_handler_init(&v4l2_flash->hdl, num_ctrls); + + for (i = 0; i < NUM_FLASH_CTRLS; ++i) { + ctrl_cfg = &ctrl_init_data[i].config; + if (!ctrl_init_data[i].cid) + continue; + + if (ctrl_cfg->id == V4L2_CID_FLASH_LED_MODE || + ctrl_cfg->id == V4L2_CID_FLASH_STROBE_SOURCE) + ctrl = v4l2_ctrl_new_std_menu(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->max, + ctrl_cfg->menu_skip_mask, + ctrl_cfg->def); + else + ctrl = v4l2_ctrl_new_std(&v4l2_flash->hdl, + &v4l2_flash_ctrl_ops, + ctrl_cfg->id, + ctrl_cfg->min, + ctrl_cfg->max, + ctrl_cfg->step, + ctrl_cfg->def); + + if (ctrl) + ctrl->flags |= ctrl_cfg->flags; + + if (i <= STROBE_SOURCE) + v4l2_flash->ctrls[i] = ctrl; + } + + kfree(ctrl_init_data); + + if (v4l2_flash->hdl.error) { + ret = v4l2_flash->hdl.error; + goto error_free_handler; + } + + v4l2_ctrl_handler_setup(&v4l2_flash->hdl); + + v4l2_flash->sd.ctrl_handler = &v4l2_flash->hdl; + + return 0; + +error_free_handler: + v4l2_ctrl_handler_free(&v4l2_flash->hdl); + return ret; +} + +static int __sync_device_with_v4l2_controls(struct v4l2_flash *v4l2_flash) +{ + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct v4l2_ctrl **ctrls = v4l2_flash->ctrls; + int ret = 0; + + v4l2_flash_set_led_brightness(v4l2_flash, ctrls[TORCH_INTENSITY]); + + if (ctrls[INDICATOR_INTENSITY]) + v4l2_flash_set_led_brightness(v4l2_flash, + ctrls[INDICATOR_INTENSITY]); + + if (ctrls[FLASH_TIMEOUT]) { + ret = led_set_flash_timeout(fled_cdev, + ctrls[FLASH_TIMEOUT]->val); + if (ret < 0) + return ret; + } + + if (ctrls[FLASH_INTENSITY]) { + ret = led_set_flash_brightness(fled_cdev, + ctrls[FLASH_INTENSITY]->val); + if (ret < 0) + return ret; + } + + /* + * For some hardware arrangements setting strobe source may affect + * torch mode. Synchronize strobe source setting only if not in torch + * mode. For torch mode case it will get synchronized upon switching + * to flash mode. + */ + if (ctrls[STROBE_SOURCE] && + ctrls[LED_MODE]->val != V4L2_FLASH_LED_MODE_TORCH) + ret = call_flash_op(v4l2_flash, external_strobe_set, + ctrls[STROBE_SOURCE]->val); + + return ret; +} + +/* + * V4L2 subdev internal operations + */ + +static int v4l2_flash_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev; + struct led_classdev *led_cdev_ind = NULL; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + mutex_lock(&led_cdev->led_access); + + led_sysfs_disable(led_cdev); + led_trigger_remove(led_cdev); + + mutex_unlock(&led_cdev->led_access); + + if (iled_cdev) { + led_cdev_ind = &iled_cdev->led_cdev; + + mutex_lock(&led_cdev_ind->led_access); + + led_sysfs_disable(led_cdev_ind); + led_trigger_remove(led_cdev_ind); + + mutex_unlock(&led_cdev_ind->led_access); + } + + ret = __sync_device_with_v4l2_controls(v4l2_flash); + if (ret < 0) + goto out_sync_device; + + return 0; +out_sync_device: + mutex_lock(&led_cdev->led_access); + led_sysfs_enable(led_cdev); + mutex_unlock(&led_cdev->led_access); + + if (led_cdev_ind) { + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static int v4l2_flash_close(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh) +{ + struct v4l2_flash *v4l2_flash = v4l2_subdev_to_v4l2_flash(sd); + struct led_classdev_flash *fled_cdev = v4l2_flash->fled_cdev; + struct led_classdev *led_cdev = &fled_cdev->led_cdev; + struct led_classdev_flash *iled_cdev = v4l2_flash->iled_cdev; + int ret = 0; + + if (!v4l2_fh_is_singular(&fh->vfh)) + return 0; + + mutex_lock(&led_cdev->led_access); + + if (v4l2_flash->ctrls[STROBE_SOURCE]) + ret = v4l2_ctrl_s_ctrl(v4l2_flash->ctrls[STROBE_SOURCE], + V4L2_FLASH_STROBE_SOURCE_SOFTWARE); + led_sysfs_enable(led_cdev); + + mutex_unlock(&led_cdev->led_access); + + if (iled_cdev) { + struct led_classdev *led_cdev_ind = &iled_cdev->led_cdev; + + mutex_lock(&led_cdev_ind->led_access); + led_sysfs_enable(led_cdev_ind); + mutex_unlock(&led_cdev_ind->led_access); + } + + return ret; +} + +static const struct v4l2_subdev_internal_ops v4l2_flash_subdev_internal_ops = { + .open = v4l2_flash_open, + .close = v4l2_flash_close, +}; + +static const struct v4l2_subdev_core_ops v4l2_flash_core_ops = { + .queryctrl = v4l2_subdev_queryctrl, + .querymenu = v4l2_subdev_querymenu, +}; + +static const struct v4l2_subdev_ops v4l2_flash_subdev_ops = { + .core = &v4l2_flash_core_ops, +}; + +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct device_node *of_node, + struct led_classdev_flash *fled_cdev, + struct led_classdev_flash *iled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config) +{ + struct v4l2_flash *v4l2_flash; + struct led_classdev *led_cdev; + struct v4l2_subdev *sd; + int ret; + + if (!fled_cdev || !ops || !config) + return ERR_PTR(-EINVAL); + + led_cdev = &fled_cdev->led_cdev; + + v4l2_flash = devm_kzalloc(led_cdev->dev, sizeof(*v4l2_flash), + GFP_KERNEL); + if (!v4l2_flash) + return ERR_PTR(-ENOMEM); + + sd = &v4l2_flash->sd; + v4l2_flash->fled_cdev = fled_cdev; + v4l2_flash->iled_cdev = iled_cdev; + v4l2_flash->ops = ops; + sd->dev = dev; + sd->of_node = of_node; + v4l2_subdev_init(sd, &v4l2_flash_subdev_ops); + sd->internal_ops = &v4l2_flash_subdev_internal_ops; + sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + strlcpy(sd->name, config->dev_name, sizeof(sd->name)); + + ret = media_entity_init(&sd->entity, 0, NULL, 0); + if (ret < 0) + return ERR_PTR(ret); + + sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_FLASH; + + ret = v4l2_flash_init_controls(v4l2_flash, config); + if (ret < 0) + goto err_init_controls; + + if (sd->of_node) + of_node_get(sd->of_node); + else + of_node_get(led_cdev->dev->of_node); + + ret = v4l2_async_register_subdev(sd); + if (ret < 0) + goto err_async_register_sd; + + return v4l2_flash; + +err_async_register_sd: + of_node_put(led_cdev->dev->of_node); + v4l2_ctrl_handler_free(sd->ctrl_handler); +err_init_controls: + media_entity_cleanup(&sd->entity); + + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(v4l2_flash_init); + +void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ + struct v4l2_subdev *sd; + struct led_classdev *led_cdev; + + if (IS_ERR_OR_NULL(v4l2_flash)) + return; + + sd = &v4l2_flash->sd; + led_cdev = &v4l2_flash->fled_cdev->led_cdev; + + v4l2_async_unregister_subdev(sd); + + if (sd->of_node) + of_node_put(sd->of_node); + else + of_node_put(led_cdev->dev->of_node); + + v4l2_ctrl_handler_free(sd->ctrl_handler); + media_entity_cleanup(&sd->entity); +} +EXPORT_SYMBOL_GPL(v4l2_flash_release); + +MODULE_AUTHOR("Jacek Anaszewski <[email protected]>"); +MODULE_DESCRIPTION("V4L2 Flash sub-device helpers"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index aeabaa5aedf7..48db922075e2 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -419,10 +419,10 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh) } if (host->cmd_flags & DMA_DATA) { - if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1, + if (1 != dma_map_sg(&host->chip->pdev->dev, &host->req->sg, 1, host->req->data_dir == READ - ? PCI_DMA_FROMDEVICE - : PCI_DMA_TODEVICE)) { + ? DMA_FROM_DEVICE + : DMA_TO_DEVICE)) { host->req->error = -ENOMEM; return host->req->error; } @@ -487,9 +487,9 @@ static void jmb38x_ms_complete_cmd(struct memstick_host *msh, int last) writel(0, host->addr + DMA_CONTROL); if (host->cmd_flags & DMA_DATA) { - pci_unmap_sg(host->chip->pdev, &host->req->sg, 1, + dma_unmap_sg(&host->chip->pdev->dev, &host->req->sg, 1, host->req->data_dir == READ - ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); + ? DMA_FROM_DEVICE : DMA_TO_DEVICE); } else { t_val = readl(host->addr + INT_STATUS_ENABLE); if (host->req->data_dir == READ) @@ -925,7 +925,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev, int pci_dev_busy = 0; int rc, cnt; - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc) return rc; diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index e2a4f5f415b2..ef09ba0289d7 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -754,7 +754,7 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto error2; pci_set_master(pdev); - error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (error) goto error3; @@ -787,8 +787,8 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) } /* This is just a precation, so don't fail */ - dev->dummy_dma_page = pci_alloc_consistent(pdev, PAGE_SIZE, - &dev->dummy_dma_page_physical_address); + dev->dummy_dma_page = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &dev->dummy_dma_page_physical_address, GFP_KERNEL); r592_stop_dma(dev , 0); if (request_irq(dev->irq, &r592_irq, IRQF_SHARED, @@ -805,7 +805,7 @@ error7: free_irq(dev->irq, dev); error6: if (dev->dummy_dma_page) - pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); kthread_stop(dev->io_thread); @@ -845,7 +845,7 @@ static void r592_remove(struct pci_dev *pdev) memstick_free_host(dev->host); if (dev->dummy_dma_page) - pci_free_consistent(pdev, PAGE_SIZE, dev->dummy_dma_page, + dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page, dev->dummy_dma_page_physical_address); } diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 977bd3a3eed0..120df5c08741 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -417,9 +417,8 @@ static int __init asic3_irq_probe(struct platform_device *pdev) asic3_write_register(asic, ASIC3_OFFSET(INTR, INT_MASK), ASIC3_INTMASK_GINTMASK); - irq_set_chained_handler(asic->irq_nr, asic3_irq_demux); + irq_set_chained_handler_and_data(asic->irq_nr, asic3_irq_demux, asic); irq_set_irq_type(asic->irq_nr, IRQ_TYPE_EDGE_RISING); - irq_set_handler_data(asic->irq_nr, asic); return 0; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 9df2b6801f76..b2b411da297b 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -43,6 +43,7 @@ #include <linux/regulator/consumer.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> #include <linux/platform_data/hsmmc-omap.h> /* OMAP HSMMC Host Controller Registers */ @@ -218,7 +219,6 @@ struct omap_hsmmc_host { unsigned int flags; #define AUTO_CMD23 (1 << 0) /* Auto CMD23 support */ #define HSMMC_SDIO_IRQ_ENABLED (1 << 1) /* SDIO irq enabled */ -#define HSMMC_WAKE_IRQ_ENABLED (1 << 2) struct omap_hsmmc_next next_data; struct omap_hsmmc_platform_data *pdata; @@ -1117,22 +1117,6 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) return IRQ_HANDLED; } -static irqreturn_t omap_hsmmc_wake_irq(int irq, void *dev_id) -{ - struct omap_hsmmc_host *host = dev_id; - - /* cirq is level triggered, disable to avoid infinite loop */ - spin_lock(&host->irq_lock); - if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { - disable_irq_nosync(host->wake_irq); - host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; - } - spin_unlock(&host->irq_lock); - pm_request_resume(host->dev); /* no use counter */ - - return IRQ_HANDLED; -} - static void set_sd_bus_power(struct omap_hsmmc_host *host) { unsigned long i; @@ -1665,7 +1649,6 @@ static void omap_hsmmc_enable_sdio_irq(struct mmc_host *mmc, int enable) static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) { - struct mmc_host *mmc = host->mmc; int ret; /* @@ -1677,11 +1660,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) if (!host->dev->of_node || !host->wake_irq) return -ENODEV; - /* Prevent auto-enabling of IRQ */ - irq_set_status_flags(host->wake_irq, IRQ_NOAUTOEN); - ret = devm_request_irq(host->dev, host->wake_irq, omap_hsmmc_wake_irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT, - mmc_hostname(mmc), host); + ret = dev_pm_set_dedicated_wake_irq(host->dev, host->wake_irq); if (ret) { dev_err(mmc_dev(host->mmc), "Unable to request wake IRQ\n"); goto err; @@ -1718,7 +1697,7 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) return 0; err_free_irq: - devm_free_irq(host->dev, host->wake_irq, host); + dev_pm_clear_wake_irq(host->dev); err: dev_warn(host->dev, "no SDIO IRQ support, falling back to polling\n"); host->wake_irq = 0; @@ -2007,6 +1986,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_ops.multi_io_quirk = omap_hsmmc_multi_io_quirk; } + device_init_wakeup(&pdev->dev, true); pm_runtime_enable(host->dev); pm_runtime_get_sync(host->dev); pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); @@ -2147,6 +2127,7 @@ err_slot_name: if (host->use_reg) omap_hsmmc_reg_put(host); err_irq: + device_init_wakeup(&pdev->dev, false); if (host->tx_chan) dma_release_channel(host->tx_chan); if (host->rx_chan) @@ -2178,6 +2159,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev) pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); + device_init_wakeup(&pdev->dev, false); if (host->dbclk) clk_disable_unprepare(host->dbclk); @@ -2204,11 +2186,6 @@ static int omap_hsmmc_suspend(struct device *dev) OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); } - /* do not wake up due to sdio irq */ - if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && - !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) - disable_irq(host->wake_irq); - if (host->dbclk) clk_disable_unprepare(host->dbclk); @@ -2233,11 +2210,6 @@ static int omap_hsmmc_resume(struct device *dev) omap_hsmmc_conf_bus_power(host); omap_hsmmc_protect_card(host); - - if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && - !(host->mmc->pm_flags & MMC_PM_WAKE_SDIO_IRQ)) - enable_irq(host->wake_irq); - pm_runtime_mark_last_busy(host->dev); pm_runtime_put_autosuspend(host->dev); return 0; @@ -2277,10 +2249,6 @@ static int omap_hsmmc_runtime_suspend(struct device *dev) } pinctrl_pm_select_idle_state(dev); - - WARN_ON(host->flags & HSMMC_WAKE_IRQ_ENABLED); - enable_irq(host->wake_irq); - host->flags |= HSMMC_WAKE_IRQ_ENABLED; } else { pinctrl_pm_select_idle_state(dev); } @@ -2302,11 +2270,6 @@ static int omap_hsmmc_runtime_resume(struct device *dev) spin_lock_irqsave(&host->irq_lock, flags); if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { - /* sdio irq flag can't change while in runtime suspend */ - if (host->flags & HSMMC_WAKE_IRQ_ENABLED) { - disable_irq_nosync(host->wake_irq); - host->flags &= ~HSMMC_WAKE_IRQ_ENABLED; - } pinctrl_pm_select_default_state(host->dev); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index dd03ad865caf..661cdaa7ea96 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c @@ -268,7 +268,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, int ret; /* Try to obtain pages, decreasing order if necessary */ - gfp |= __GFP_COLD | __GFP_COMP; + gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN; while (order >= 0) { pages = alloc_pages(gfp, order); if (pages) diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 95153b234c71..299eb4315fe6 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -948,7 +948,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) struct resource *res; void __iomem *base_addr; u32 offset; - int ret; + int ret = 0; pdev = pdata->pdev; dev = &pdev->dev; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 7a4aaa3c01b6..cd4ae76bbff2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -530,7 +530,6 @@ enum bnx2x_tpa_mode_t { struct bnx2x_alloc_pool { struct page *page; - dma_addr_t dma; unsigned int offset; }; @@ -2418,10 +2417,13 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) -#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) +#define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \ + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) + +#define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \ + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e2a65334708d..a90d7364334f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -563,23 +563,20 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp, return -ENOMEM; } - pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0, - PAGE_SIZE, DMA_FROM_DEVICE); - if (unlikely(dma_mapping_error(&bp->pdev->dev, - pool->dma))) { - __free_pages(pool->page, PAGES_PER_SGE_SHIFT); - pool->page = NULL; - BNX2X_ERR("Can't map sge\n"); - return -ENOMEM; - } pool->offset = 0; } + mapping = dma_map_page(&bp->pdev->dev, pool->page, + pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { + BNX2X_ERR("Can't map sge\n"); + return -ENOMEM; + } + get_page(pool->page); sw_buf->page = pool->page; sw_buf->offset = pool->offset; - mapping = pool->dma + sw_buf->offset; dma_unmap_addr_set(sw_buf, mapping, mapping); sge->addr_hi = cpu_to_le32(U64_HI(mapping)); @@ -648,9 +645,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return err; } - dma_unmap_single(&bp->pdev->dev, - dma_unmap_addr(&old_rx_pg, mapping), - SGE_PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(&bp->pdev->dev, + dma_unmap_addr(&old_rx_pg, mapping), + SGE_PAGE_SIZE, DMA_FROM_DEVICE); /* Add one frag and update the appropriate fields in the skb */ if (fp->mode == TPA_MODE_LRO) skb_fill_page_desc(skb, j, old_rx_pg.page, @@ -3421,8 +3418,13 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb, u32 wnd_sum = 0; /* Headers length */ - hlen = (int)(skb_transport_header(skb) - skb->data) + - tcp_hdrlen(skb); + if (xmit_type & XMIT_GSO_ENC) + hlen = (int)(skb_inner_transport_header(skb) - + skb->data) + + inner_tcp_hdrlen(skb); + else + hlen = (int)(skb_transport_header(skb) - + skb->data) + tcp_hdrlen(skb); /* Amount of data (w/o headers) on linear part of SKB*/ first_bd_sz = skb_headlen(skb) - hlen; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 2b30081ec26d..03b7404d5b9b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -807,8 +807,8 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp, /* Since many fragments can share the same page, make sure to * only unmap and free the page once. */ - dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), - SGE_PAGE_SIZE, DMA_FROM_DEVICE); + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), + SGE_PAGE_SIZE, DMA_FROM_DEVICE); put_page(page); @@ -974,14 +974,6 @@ static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp, if (!pool->page) return; - /* Page was not fully fragmented. Unmap unused space */ - if (pool->offset < PAGE_SIZE) { - dma_addr_t dma = pool->dma + pool->offset; - int size = PAGE_SIZE - pool->offset; - - dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE); - } - put_page(pool->page); pool->page = NULL; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 48ed005ba73f..76b9052a961c 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -257,14 +257,15 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bnx2x *bp = netdev_priv(dev); int cfg_idx = bnx2x_get_link_cfg_idx(bp); + u32 media_type; /* Dual Media boards present all available port types */ cmd->supported = bp->port.supported[cfg_idx] | (bp->port.supported[cfg_idx ^ 1] & (SUPPORTED_TP | SUPPORTED_FIBRE)); cmd->advertising = bp->port.advertising[cfg_idx]; - if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type == - ETH_PHY_SFP_1G_FIBER) { + media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type; + if (media_type == ETH_PHY_SFP_1G_FIBER) { cmd->supported &= ~(SUPPORTED_10000baseT_Full); cmd->advertising &= ~(ADVERTISED_10000baseT_Full); } @@ -312,12 +313,26 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) cmd->lp_advertising |= ADVERTISED_100baseT_Full; if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) cmd->lp_advertising |= ADVERTISED_1000baseT_Half; - if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_1000baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) { + if (media_type == ETH_PHY_KR) { + cmd->lp_advertising |= + ADVERTISED_1000baseKX_Full; + } else { + cmd->lp_advertising |= + ADVERTISED_1000baseT_Full; + } + } if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) cmd->lp_advertising |= ADVERTISED_2500baseX_Full; - if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) - cmd->lp_advertising |= ADVERTISED_10000baseT_Full; + if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) { + if (media_type == ETH_PHY_KR) { + cmd->lp_advertising |= + ADVERTISED_10000baseKR_Full; + } else { + cmd->lp_advertising |= + ADVERTISED_10000baseT_Full; + } + } if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; } @@ -564,15 +579,20 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return -EINVAL; } - if (!(bp->port.supported[cfg_idx] & - SUPPORTED_1000baseT_Full)) { + if (bp->port.supported[cfg_idx] & + SUPPORTED_1000baseT_Full) { + advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); + + } else if (bp->port.supported[cfg_idx] & + SUPPORTED_1000baseKX_Full) { + advertising = ADVERTISED_1000baseKX_Full; + } else { DP(BNX2X_MSG_ETHTOOL, "1G full not supported\n"); return -EINVAL; } - advertising = (ADVERTISED_1000baseT_Full | - ADVERTISED_TP); break; case SPEED_2500: @@ -600,17 +620,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) return -EINVAL; } phy_idx = bnx2x_get_cur_phy_idx(bp); - if (!(bp->port.supported[cfg_idx] - & SUPPORTED_10000baseT_Full) || - (bp->link_params.phy[phy_idx].media_type == + if ((bp->port.supported[cfg_idx] & + SUPPORTED_10000baseT_Full) && + (bp->link_params.phy[phy_idx].media_type != ETH_PHY_SFP_1G_FIBER)) { + advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + } else if (bp->port.supported[cfg_idx] & + SUPPORTED_10000baseKR_Full) { + advertising = (ADVERTISED_10000baseKR_Full | + ADVERTISED_FIBRE); + } else { DP(BNX2X_MSG_ETHTOOL, "10G full not supported\n"); return -EINVAL; } - advertising = (ADVERTISED_10000baseT_Full | - ADVERTISED_FIBRE); break; default: @@ -633,6 +658,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) bp->link_params.multi_phy_config = new_multi_phy_config; if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); bnx2x_link_set(bp); } @@ -1204,6 +1230,7 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp) if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "cannot get access to nvram interface\n"); + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); return -EBUSY; } @@ -1944,6 +1971,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev, if (netif_running(dev)) { bnx2x_stats_handle(bp, STATS_EVENT_STOP); + bnx2x_force_link_reset(bp); bnx2x_link_set(bp); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 21a0d6afca4a..a0b03c27e0a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -3392,9 +3392,9 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy, case BNX2X_FLOW_CTRL_AUTO: switch (params->req_fc_auto_adv) { case BNX2X_FLOW_CTRL_BOTH: + case BNX2X_FLOW_CTRL_RX: *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; break; - case BNX2X_FLOW_CTRL_RX: case BNX2X_FLOW_CTRL_TX: *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; @@ -3488,14 +3488,21 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); } -static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) -{ /* LD LP */ +static void bnx2x_pause_resolve(struct bnx2x_phy *phy, + struct link_params *params, + struct link_vars *vars, + u32 pause_result) +{ + struct bnx2x *bp = params->bp; + /* LD LP */ switch (pause_result) { /* ASYM P ASYM P */ case 0xb: /* 1 0 1 1 */ + DP(NETIF_MSG_LINK, "Flow Control: TX only\n"); vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; break; case 0xe: /* 1 1 1 0 */ + DP(NETIF_MSG_LINK, "Flow Control: RX only\n"); vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; break; @@ -3503,10 +3510,22 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) case 0x7: /* 0 1 1 1 */ case 0xd: /* 1 1 0 1 */ case 0xf: /* 1 1 1 1 */ - vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; + /* If the user selected to advertise RX ONLY, + * although we advertised both, need to enable + * RX only. + */ + if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { + DP(NETIF_MSG_LINK, "Flow Control: RX & TX\n"); + vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; + } else { + DP(NETIF_MSG_LINK, "Flow Control: RX only\n"); + vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; + } break; default: + DP(NETIF_MSG_LINK, "Flow Control: None\n"); + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; break; } if (pause_result & (1<<0)) @@ -3567,7 +3586,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, pause_result |= (lp_pause & MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result); - bnx2x_pause_resolve(vars, pause_result); + bnx2x_pause_resolve(phy, params, vars, pause_result); } @@ -5396,7 +5415,7 @@ static void bnx2x_update_adv_fc(struct bnx2x_phy *phy, MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result); } - bnx2x_pause_resolve(vars, pause_result); + bnx2x_pause_resolve(phy, params, vars, pause_result); } @@ -7129,7 +7148,7 @@ static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy, pause_result |= (lp_pause & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; - bnx2x_pause_resolve(vars, pause_result); + bnx2x_pause_resolve(phy, params, vars, pause_result); DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n", pause_result); } @@ -11474,7 +11493,9 @@ static const struct bnx2x_phy phy_warpcore = { SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseKX_Full | SUPPORTED_10000baseT_Full | + SUPPORTED_10000baseKR_Full | SUPPORTED_20000baseKR2_Full | SUPPORTED_20000baseMLD2_Full | SUPPORTED_FIBRE | @@ -11980,8 +12001,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, break; case PORT_HW_CFG_NET_SERDES_IF_KR: phy->media_type = ETH_PHY_KR; - phy->supported &= (SUPPORTED_1000baseT_Full | - SUPPORTED_10000baseT_Full | + phy->supported &= (SUPPORTED_1000baseKX_Full | + SUPPORTED_10000baseKR_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause | @@ -11999,8 +12020,8 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, phy->media_type = ETH_PHY_KR; phy->flags |= FLAGS_WC_DUAL_MODE; phy->supported &= (SUPPORTED_20000baseKR2_Full | - SUPPORTED_10000baseT_Full | - SUPPORTED_1000baseT_Full | + SUPPORTED_10000baseKR_Full | + SUPPORTED_1000baseKX_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE | SUPPORTED_Pause | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 33501bcddc48..c27af12314ed 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -2287,13 +2287,11 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode) void bnx2x_calc_fc_adv(struct bnx2x *bp) { u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); + + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | + ADVERTISED_Pause); switch (bp->link_vars.ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: - bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | - ADVERTISED_Pause); - break; - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | ADVERTISED_Pause); @@ -2304,8 +2302,6 @@ void bnx2x_calc_fc_adv(struct bnx2x *bp) break; default: - bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | - ADVERTISED_Pause); break; } } @@ -2351,12 +2347,16 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) if (load_mode == LOAD_DIAG) { struct link_params *lp = &bp->link_params; lp->loopback_mode = LOOPBACK_XGXS; - /* do PHY loopback at 10G speed, if possible */ - if (lp->req_line_speed[cfx_idx] < SPEED_10000) { + /* Prefer doing PHY loopback at highest speed */ + if (lp->req_line_speed[cfx_idx] < SPEED_20000) { if (lp->speed_cap_mask[cfx_idx] & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) lp->req_line_speed[cfx_idx] = - SPEED_10000; + SPEED_20000; + else if (lp->speed_cap_mask[cfx_idx] & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) + lp->req_line_speed[cfx_idx] = + SPEED_10000; else lp->req_line_speed[cfx_idx] = SPEED_1000; @@ -4867,9 +4867,7 @@ static bool bnx2x_check_blocks_with_parity3(struct bnx2x *bp, u32 sig, res = true; break; case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: - if (print) - _print_next_block((*par_num)++, - "MCP SCPAD"); + (*par_num)++; /* clear latched SCPAD PATIRY from MCP */ REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 1UL << 10); @@ -4931,6 +4929,7 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, (sig[3] & HW_PRTY_ASSERT_SET_3) || (sig[4] & HW_PRTY_ASSERT_SET_4)) { int par_num = 0; + DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", sig[0] & HW_PRTY_ASSERT_SET_0, @@ -4938,9 +4937,18 @@ static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, sig[2] & HW_PRTY_ASSERT_SET_2, sig[3] & HW_PRTY_ASSERT_SET_3, sig[4] & HW_PRTY_ASSERT_SET_4); - if (print) - netdev_err(bp->dev, - "Parity errors detected in blocks: "); + if (print) { + if (((sig[0] & HW_PRTY_ASSERT_SET_0) || + (sig[1] & HW_PRTY_ASSERT_SET_1) || + (sig[2] & HW_PRTY_ASSERT_SET_2) || + (sig[4] & HW_PRTY_ASSERT_SET_4)) || + (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) { + netdev_err(bp->dev, + "Parity errors detected in blocks: "); + } else { + print = false; + } + } res |= bnx2x_check_blocks_with_parity0(bp, sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); res |= bnx2x_check_blocks_with_parity1(bp, @@ -8431,7 +8439,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) BNX2X_ETH_MAC, &ramrod_flags); } else { /* vf */ return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, - bp->fp->index, true); + bp->fp->index, set); } } @@ -9323,7 +9331,8 @@ unload_error: * function stop ramrod is sent, since as part of this ramrod FW access * PTP registers. */ - bnx2x_stop_ptp(bp); + if (bp->flags & PTP_SUPPORTED) + bnx2x_stop_ptp(bp); /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); @@ -11147,6 +11156,12 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) bp->port.advertising[idx] |= (ADVERTISED_1000baseT_Full | ADVERTISED_TP); + } else if (bp->port.supported[idx] & + SUPPORTED_1000baseKX_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_1000; + bp->port.advertising[idx] |= + ADVERTISED_1000baseKX_Full; } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, @@ -11179,6 +11194,13 @@ static void bnx2x_link_settings_requested(struct bnx2x *bp) bp->port.advertising[idx] |= (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE); + } else if (bp->port.supported[idx] & + SUPPORTED_10000baseKR_Full) { + bp->link_params.req_line_speed[idx] = + SPEED_10000; + bp->port.advertising[idx] |= + (ADVERTISED_10000baseKR_Full | + ADVERTISED_FIBRE); } else { BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", link_config, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 07cdf9bbffef..4ad415ac8cfe 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -424,7 +424,7 @@ static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp, o->head_exe_request = false; o->saved_ramrod_flags = 0; rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); - if (rc != 0) { + if ((rc != 0) && (rc != 1)) { BNX2X_ERR("execution of pending commands failed with rc %d\n", rc); #ifdef BNX2X_STOP_ON_ERROR diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index 6f2887a5e0be..6159deab8c98 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -594,6 +594,7 @@ struct bcmgenet_priv { wait_queue_head_t wq; struct phy_device *phydev; struct device_node *phy_dn; + struct device_node *mdio_dn; struct mii_bus *mii_bus; u16 gphy_rev; struct clk *clk_eee; diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 6bef04e2f735..adf23d2ac488 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -408,6 +408,52 @@ static int bcmgenet_mii_probe(struct net_device *dev) return 0; } +/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with + * their internal MDIO management controller making them fail to successfully + * be read from or written to for the first transaction. We insert a dummy + * BMSR read here to make sure that phy_get_device() and get_phy_id() can + * correctly read the PHY MII_PHYSID1/2 registers and successfully register a + * PHY device for this peripheral. + * + * Once the PHY driver is registered, we can workaround subsequent reads from + * there (e.g: during system-wide power management). + * + * bus->reset is invoked before mdiobus_scan during mdiobus_register and is + * therefore the right location to stick that workaround. Since we do not want + * to read from non-existing PHYs, we either use bus->phy_mask or do a manual + * Device Tree scan to limit the search area. + */ +static int bcmgenet_mii_bus_reset(struct mii_bus *bus) +{ + struct net_device *dev = bus->priv; + struct bcmgenet_priv *priv = netdev_priv(dev); + struct device_node *np = priv->mdio_dn; + struct device_node *child = NULL; + u32 read_mask = 0; + int addr = 0; + + if (!np) { + read_mask = 1 << priv->phy_addr; + } else { + for_each_available_child_of_node(np, child) { + addr = of_mdio_parse_addr(&dev->dev, child); + if (addr < 0) + continue; + + read_mask |= 1 << addr; + } + } + + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + if (read_mask & 1 << addr) { + dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr); + mdiobus_read(bus, addr, MII_BMSR); + } + } + + return 0; +} + static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) { struct mii_bus *bus; @@ -427,6 +473,7 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) bus->parent = &priv->pdev->dev; bus->read = bcmgenet_mii_read; bus->write = bcmgenet_mii_write; + bus->reset = bcmgenet_mii_bus_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", priv->pdev->name, priv->pdev->id); @@ -443,7 +490,6 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) { struct device_node *dn = priv->pdev->dev.of_node; struct device *kdev = &priv->pdev->dev; - struct device_node *mdio_dn; char *compat; int ret; @@ -451,14 +497,14 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) if (!compat) return -ENOMEM; - mdio_dn = of_find_compatible_node(dn, NULL, compat); + priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); kfree(compat); - if (!mdio_dn) { + if (!priv->mdio_dn) { dev_err(kdev, "unable to find MDIO bus node\n"); return -ENODEV; } - ret = of_mdiobus_register(priv->mii_bus, mdio_dn); + ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn); if (ret) { dev_err(kdev, "failed to register MDIO bus\n"); return ret; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 160f8077692c..29f330831784 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -434,8 +434,9 @@ static int lio_set_phys_id(struct net_device *netdev, if (ret) return ret; - octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR, - &lio->phy_beacon_val); + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_BEACON_ADDR, + &lio->phy_beacon_val); if (ret) return ret; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c index 0d3106b464b2..f67641a2ff9e 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -650,14 +650,12 @@ void octeon_free_device_mem(struct octeon_device *oct) for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { /* could check mask as well */ - if (oct->droq[i]) - vfree(oct->droq[i]); + vfree(oct->droq[i]); } for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { /* could check mask as well */ - if (oct->instr_queue[i]) - vfree(oct->instr_queue[i]); + vfree(oct->instr_queue[i]); } i = oct->octeon_id; @@ -1078,10 +1076,7 @@ octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode, oct->dispatch.count--; spin_unlock_bh(&oct->dispatch.lock); - - if (dfree) - vfree(dfree); - + vfree(dfree); return retval; } diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 94b502a0cf33..4dba86eaa045 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -216,9 +216,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no) dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); octeon_droq_destroy_ring_buffers(oct, droq); - - if (droq->recv_buf_list) - vfree(droq->recv_buf_list); + vfree(droq->recv_buf_list); if (droq->info_base_addr) cnnic_free_aligned_dma(oct->pci_dev, droq->info_list, diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 356796bf9b87..a2a24652c8f3 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -175,8 +175,7 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) desc_size = CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf)); - if (iq->request_list) - vfree(iq->request_list); + vfree(iq->request_list); if (iq->base_addr) { q_size = iq->max_count * desc_size; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index b0cbb2b7fd48..76684dcb874c 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -1169,10 +1169,7 @@ void *cxgb_alloc_mem(unsigned long size) */ void cxgb_free_mem(void *addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } /* diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index c64b5a99bfef..351f3b1bf800 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -1150,10 +1150,7 @@ void *t4_alloc_mem(size_t size) */ void t4_free_mem(void *addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } /* Send a Work Request to write the filter at a specified index. We construct diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index eadae1b412c6..da2004e2a741 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1208,7 +1208,7 @@ static int enic_poll(struct napi_struct *napi, int budget) napi_complete(napi); vnic_intr_unmask(&enic->intr[intr]); } - enic_poll_unlock_napi(&enic->rq[cq_rq]); + enic_poll_unlock_napi(&enic->rq[cq_rq], napi); return rq_work_done; } @@ -1414,7 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget) */ enic_calc_int_moderation(enic, &enic->rq[rq]); - enic_poll_unlock_napi(&enic->rq[rq]); + enic_poll_unlock_napi(&enic->rq[rq], napi); if (work_done < work_to_do) { /* Some work done, but not enough to stay in polling, diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h index 8111d5202df2..b9c82f143d7e 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_rq.h +++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h @@ -21,6 +21,7 @@ #define _VNIC_RQ_H_ #include <linux/pci.h> +#include <linux/netdevice.h> #include "vnic_dev.h" #include "vnic_cq.h" @@ -75,6 +76,12 @@ struct vnic_rq_buf { uint64_t wr_id; }; +enum enic_poll_state { + ENIC_POLL_STATE_IDLE, + ENIC_POLL_STATE_NAPI, + ENIC_POLL_STATE_POLL +}; + struct vnic_rq { unsigned int index; struct vnic_dev *vdev; @@ -86,19 +93,7 @@ struct vnic_rq { void *os_buf_head; unsigned int pkts_outstanding; #ifdef CONFIG_NET_RX_BUSY_POLL -#define ENIC_POLL_STATE_IDLE 0 -#define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */ -#define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */ -#define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */ -#define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */ -#define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \ - ENIC_POLL_STATE_POLL_YIELD) -#define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \ - ENIC_POLL_STATE_POLL) -#define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \ - ENIC_POLL_STATE_POLL_YIELD) - unsigned int bpoll_state; - spinlock_t bpoll_lock; + atomic_t bpoll_state; #endif /* CONFIG_NET_RX_BUSY_POLL */ }; @@ -215,76 +210,43 @@ static inline int vnic_rq_fill(struct vnic_rq *rq, #ifdef CONFIG_NET_RX_BUSY_POLL static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) { - spin_lock_init(&rq->bpoll_lock); - rq->bpoll_state = ENIC_POLL_STATE_IDLE; + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); } static inline bool enic_poll_lock_napi(struct vnic_rq *rq) { - bool rc = true; - - spin_lock(&rq->bpoll_lock); - if (rq->bpoll_state & ENIC_POLL_LOCKED) { - WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); - rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD; - rc = false; - } else { - rq->bpoll_state = ENIC_POLL_STATE_NAPI; - } - spin_unlock(&rq->bpoll_lock); + int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, + ENIC_POLL_STATE_NAPI); - return rc; + return (rc == ENIC_POLL_STATE_IDLE); } -static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) +static inline void enic_poll_unlock_napi(struct vnic_rq *rq, + struct napi_struct *napi) { - bool rc = false; - - spin_lock(&rq->bpoll_lock); - WARN_ON(rq->bpoll_state & - (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD)); - if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) - rc = true; - rq->bpoll_state = ENIC_POLL_STATE_IDLE; - spin_unlock(&rq->bpoll_lock); - - return rc; + WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI); + napi_gro_flush(napi, false); + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); } static inline bool enic_poll_lock_poll(struct vnic_rq *rq) { - bool rc = true; - - spin_lock_bh(&rq->bpoll_lock); - if (rq->bpoll_state & ENIC_POLL_LOCKED) { - rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD; - rc = false; - } else { - rq->bpoll_state |= ENIC_POLL_STATE_POLL; - } - spin_unlock_bh(&rq->bpoll_lock); + int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, + ENIC_POLL_STATE_POLL); - return rc; + return (rc == ENIC_POLL_STATE_IDLE); } -static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) -{ - bool rc = false; - spin_lock_bh(&rq->bpoll_lock); - WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); - if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) - rc = true; - rq->bpoll_state = ENIC_POLL_STATE_IDLE; - spin_unlock_bh(&rq->bpoll_lock); - - return rc; +static inline void enic_poll_unlock_poll(struct vnic_rq *rq) +{ + WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL); + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); } static inline bool enic_poll_busy_polling(struct vnic_rq *rq) { - WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED)); - return rq->bpoll_state & ENIC_POLL_USER_PEND; + return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL; } #else @@ -298,7 +260,8 @@ static inline bool enic_poll_lock_napi(struct vnic_rq *rq) return true; } -static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) +static inline bool enic_poll_unlock_napi(struct vnic_rq *rq, + struct napi_struct *napi) { return false; } diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index b8de87b03046..ff76d4e9dc1b 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -83,12 +83,12 @@ config UGETH_TX_ON_DEMAND config GIANFAR tristate "Gianfar Ethernet" - depends on FSL_SOC select FSL_PQ_MDIO select PHYLIB select CRC32 ---help--- This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, - and MPC86xx family of chips, and the FEC on the 8540. + and MPC86xx family of chips, the eTSEC on LS1021A and the FEC + on the 8540. endif # NET_VENDOR_FREESCALE diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index a86af8a7485d..1eee73cccdf5 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -428,6 +428,8 @@ struct bufdesc_ex { #define FEC_QUIRK_BUG_CAPTURE (1 << 10) /* Controller has only one MDIO bus */ #define FEC_QUIRK_SINGLE_MDIO (1 << 11) +/* Controller supports RACC register */ +#define FEC_QUIRK_HAS_RACC (1 << 12) struct fec_enet_priv_tx_q { int index; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index e464aeaeed2c..1f89c59b4353 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -85,28 +85,30 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx25-fec", - .driver_data = FEC_QUIRK_USE_GASKET, + .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, }, { .name = "imx27-fec", - .driver_data = 0, + .driver_data = FEC_QUIRK_HAS_RACC, }, { .name = "imx28-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | - FEC_QUIRK_SINGLE_MDIO, + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | + FEC_QUIRK_HAS_RACC, }, { .name = "mvf600-fec", - .driver_data = FEC_QUIRK_ENET_MAC, + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, }, { .name = "imx6sx-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | - FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC, }, { /* sentinel */ } @@ -970,13 +972,15 @@ fec_restart(struct net_device *ndev) writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); #if !defined(CONFIG_M5272) - /* set RX checksum */ - val = readl(fep->hwp + FEC_RACC); - if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) - val |= FEC_RACC_OPTIONS; - else - val &= ~FEC_RACC_OPTIONS; - writel(val, fep->hwp + FEC_RACC); + if (fep->quirks & FEC_QUIRK_HAS_RACC) { + /* set RX checksum */ + val = readl(fep->hwp + FEC_RACC); + if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + val |= FEC_RACC_OPTIONS; + else + val &= ~FEC_RACC_OPTIONS; + writel(val, fep->hwp + FEC_RACC); + } #endif /* diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c index ff2903652f4b..c3b6af83f070 100644 --- a/drivers/net/ethernet/icplus/ipg.c +++ b/drivers/net/ethernet/icplus/ipg.c @@ -1028,7 +1028,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) /* detailed rx_errors */ sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + - ipg_r16(IPG_FRAMETOOLONGERRRORS); + ipg_r16(IPG_FRAMETOOLONGERRORS); sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); /* Unutilized IPG statistic registers. */ diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h index a21e4f5702b5..de606281f97b 100644 --- a/drivers/net/ethernet/icplus/ipg.h +++ b/drivers/net/ethernet/icplus/ipg.h @@ -102,7 +102,7 @@ enum ipg_regs { #define IPG_MCSTFRAMESRCVDOK 0xB8 #define IPG_BCSTFRAMESRCVDOK 0xBE #define IPG_MACCONTROLFRAMESRCVD 0xC6 -#define IPG_FRAMETOOLONGERRRORS 0xC8 +#define IPG_FRAMETOOLONGERRORS 0xC8 #define IPG_INRANGELENGTHERRORS 0xCA #define IPG_FRAMECHECKSEQERRORS 0xCC #define IPG_FRAMESLOSTRXERRORS 0xCE diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index b074b9a667b3..91a5a0ae9cd7 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -237,17 +237,19 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) if (ret_val) return false; out: - if ((hw->mac.type == e1000_pch_lpt) || - (hw->mac.type == e1000_pch_spt)) { - /* Unforce SMBus mode in PHY */ - e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); - phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + /* Only unforce SMBus if ME is not active */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); - /* Unforce SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } } return true; @@ -1087,6 +1089,7 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) u32 mac_reg; s32 ret_val = 0; u16 phy_reg; + u16 oem_reg = 0; if ((hw->mac.type < e1000_pch_lpt) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || @@ -1128,33 +1131,37 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (ret_val) goto out; + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable * LPLU and disable Gig speed when entering ULP */ if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, - &phy_reg); + &oem_reg); if (ret_val) goto release; + + phy_reg = oem_reg; phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, phy_reg); + if (ret_val) goto release; } - /* Force SMBus mode in PHY */ - ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); - if (ret_val) - goto release; - phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; - e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); - - /* Force SMBus mode in MAC */ - mac_reg = er32(CTRL_EXT); - mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_reg); - /* Set Inband ULP Exit, Reset to SMBus mode and * Disable SMBus Release on PERST# in PHY */ @@ -1166,10 +1173,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) if (to_sx) { if (er32(WUFC) & E1000_WUFC_LNKC) phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + else + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; } else { phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; } e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); @@ -1181,6 +1193,15 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) /* Commit ULP changes in PHY by starting auto ULP configuration */ phy_reg |= I218_ULP_CONFIG1_START; e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && + to_sx && (er32(STATUS) & E1000_STATUS_LU)) { + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + oem_reg); + if (ret_val) + goto release; + } + release: hw->phy.ops.release(hw); out: @@ -1379,16 +1400,20 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (((hw->mac.type == e1000_pch2lan) || (hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) && link) { - u32 reg; + u16 speed, duplex; - reg = er32(STATUS); + e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); tipg_reg = er32(TIPG); tipg_reg &= ~E1000_TIPG_IPGT_MASK; - if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { + if (duplex == HALF_DUPLEX && speed == SPEED_10) { tipg_reg |= 0xFF; /* Reduce Rx latency in analog PHY */ emi_val = 0; + } else if (hw->mac.type == e1000_pch_spt && + duplex == FULL_DUPLEX && speed != SPEED_1000) { + tipg_reg |= 0xC; + emi_val = 1; } else { /* Roll back the default values */ @@ -1412,14 +1437,59 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) if (ret_val) return ret_val; + + if (hw->mac.type == e1000_pch_spt) { + u16 data; + u16 ptr_gap; + + if (speed == SPEED_1000) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy_locked(hw, + PHY_REG(776, 20), + &data); + if (ret_val) { + hw->phy.ops.release(hw); + return ret_val; + } + + ptr_gap = (data & (0x3FF << 2)) >> 2; + if (ptr_gap < 0x18) { + data &= ~(0x3FF << 2); + data |= (0x18 << 2); + ret_val = + e1e_wphy_locked(hw, + PHY_REG(776, 20), + data); + } + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + } + } + } + + /* I217 Packet Loss issue: + * ensure that FEXTNVM4 Beacon Duration is set correctly + * on power up. + * Set the Beacon Duration for I217 to 8 usec + */ + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + u32 mac_reg; + + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + ew32(FEXTNVM4, mac_reg); } /* Work-around I218 hang issue */ if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || - (hw->mac.type == e1000_pch_spt)) { + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { ret_val = e1000_k1_workaround_lpt_lp(hw, link); if (ret_val) return ret_val; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index e62b9dcb91fe..89d788d8f263 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6354,13 +6354,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) } /** - * e1000e_disable_aspm - Disable ASPM states + * __e1000e_disable_aspm - Disable ASPM states * @pdev: pointer to PCI device struct * @state: bit-mask of ASPM states to disable + * @locked: indication if this context holds pci_bus_sem locked. * * Some devices *must* have certain ASPM states disabled per hardware errata. **/ -static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) { struct pci_dev *parent = pdev->bus->self; u16 aspm_dis_mask = 0; @@ -6399,7 +6400,10 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) "L1" : ""); #ifdef CONFIG_PCIEASPM - pci_disable_link_state_locked(pdev, state); + if (locked) + pci_disable_link_state_locked(pdev, state); + else + pci_disable_link_state(pdev, state); /* Double-check ASPM control. If not disabled by the above, the * BIOS is preventing that from happening (or CONFIG_PCIEASPM is @@ -6422,6 +6426,32 @@ static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) aspm_dis_mask); } +/** + * e1000e_disable_aspm - Disable ASPM states. + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * This function acquires the pci_bus_sem! + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + __e1000e_disable_aspm(pdev, state, 0); +} + +/** + * e1000e_disable_aspm_locked Disable ASPM states. + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * This function must be called with pci_bus_sem acquired! + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) +{ + __e1000e_disable_aspm(pdev, state, 1); +} + #ifdef CONFIG_PM static int __e1000_resume(struct pci_dev *pdev) { @@ -6435,7 +6465,7 @@ static int __e1000_resume(struct pci_dev *pdev) if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) aspm_disable_flag |= PCIE_LINK_STATE_L1; if (aspm_disable_flag) - e1000e_disable_aspm(pdev, aspm_disable_flag); + e1000e_disable_aspm_locked(pdev, aspm_disable_flag); pci_set_master(pdev); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index f54996f19629..395f32f226c0 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -484,6 +484,8 @@ int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring) if (!dev) return -ENOMEM; + /* warn if we are about to overwrite the pointer */ + WARN_ON(tx_ring->tx_bi); bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); if (!tx_ring->tx_bi) @@ -644,6 +646,8 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring) struct device *dev = rx_ring->dev; int bi_size; + /* warn if we are about to overwrite the pointer */ + WARN_ON(rx_ring->rx_bi); bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); if (!rx_ring->rx_bi) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h index 1b98c25b3092..fea3b75a9a35 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf.h +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h @@ -264,7 +264,6 @@ extern const char i40evf_driver_version[]; int i40evf_up(struct i40evf_adapter *adapter); void i40evf_down(struct i40evf_adapter *adapter); -void i40evf_reinit_locked(struct i40evf_adapter *adapter); void i40evf_reset(struct i40evf_adapter *adapter); void i40evf_set_ethtool_ops(struct net_device *netdev); void i40evf_update_stats(struct i40evf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c index f4e77665bc54..2b53c870e7f1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c @@ -267,8 +267,10 @@ static int i40evf_set_ringparam(struct net_device *netdev, adapter->tx_desc_count = new_tx_count; adapter->rx_desc_count = new_rx_count; - if (netif_running(netdev)) - i40evf_reinit_locked(adapter); + if (netif_running(netdev)) { + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + } return 0; } diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index 7c53aca4b5a6..4ab4ebba07a1 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -170,7 +170,8 @@ static void i40evf_tx_timeout(struct net_device *netdev) struct i40evf_adapter *adapter = netdev_priv(netdev); adapter->tx_timeout_count++; - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { + if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | + I40EVF_FLAG_RESET_NEEDED))) { adapter->flags |= I40EVF_FLAG_RESET_NEEDED; schedule_work(&adapter->reset_task); } @@ -1460,7 +1461,7 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { lut = 0; for (j = 0; j < 4; j++) { - if (cqueue == adapter->vsi_res->num_queue_pairs) + if (cqueue == adapter->num_active_queues) cqueue = 0; lut |= ((cqueue) << (8 * j)); cqueue++; @@ -1470,8 +1471,8 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) i40e_flush(hw); } -#define I40EVF_RESET_WAIT_MS 100 -#define I40EVF_RESET_WAIT_COUNT 200 +#define I40EVF_RESET_WAIT_MS 10 +#define I40EVF_RESET_WAIT_COUNT 500 /** * i40evf_reset_task - Call-back task to handle hardware reset * @work: pointer to work_struct @@ -1495,10 +1496,17 @@ static void i40evf_reset_task(struct work_struct *work) &adapter->crit_section)) usleep_range(500, 1000); + i40evf_misc_irq_disable(adapter); if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { - dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); + adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; + /* Restart the AQ here. If we have been reset but didn't + * detect it, or if the PF had to reinit, our AQ will be hosed. + */ + i40evf_shutdown_adminq(hw); + i40evf_init_adminq(hw); i40evf_request_reset(adapter); } + adapter->flags |= I40EVF_FLAG_RESET_PENDING; /* poll until we see the reset actually happen */ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { @@ -1507,10 +1515,10 @@ static void i40evf_reset_task(struct work_struct *work) if ((rstat_val != I40E_VFR_VFACTIVE) && (rstat_val != I40E_VFR_COMPLETED)) break; - msleep(I40EVF_RESET_WAIT_MS); + usleep_range(500, 1000); } if (i == I40EVF_RESET_WAIT_COUNT) { - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Never saw reset\n"); goto continue_reset; /* act like the reset happened */ } @@ -1518,11 +1526,12 @@ static void i40evf_reset_task(struct work_struct *work) for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; - if ((rstat_val == I40E_VFR_VFACTIVE) || - (rstat_val == I40E_VFR_COMPLETED)) + if (rstat_val == I40E_VFR_VFACTIVE) break; msleep(I40EVF_RESET_WAIT_MS); } + /* extra wait to make sure minimum wait is met */ + msleep(I40EVF_RESET_WAIT_MS); if (i == I40EVF_RESET_WAIT_COUNT) { struct i40evf_mac_filter *f, *ftmp; struct i40evf_vlan_filter *fv, *fvtmp; @@ -1534,11 +1543,10 @@ static void i40evf_reset_task(struct work_struct *work) if (netif_running(adapter->netdev)) { set_bit(__I40E_DOWN, &adapter->vsi.state); - i40evf_irq_disable(adapter); - i40evf_napi_disable_all(adapter); - netif_tx_disable(netdev); - netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + netif_tx_disable(netdev); + i40evf_napi_disable_all(adapter); + i40evf_irq_disable(adapter); i40evf_free_traffic_irqs(adapter); i40evf_free_all_tx_resources(adapter); i40evf_free_all_rx_resources(adapter); @@ -1550,6 +1558,7 @@ static void i40evf_reset_task(struct work_struct *work) list_del(&f->list); kfree(f); } + list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) { list_del(&fv->list); @@ -1564,22 +1573,27 @@ static void i40evf_reset_task(struct work_struct *work) i40evf_shutdown_adminq(hw); adapter->netdev->flags &= ~IFF_UP; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); return; /* Do not attempt to reinit. It's dead, Jim. */ } continue_reset: - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; - - i40evf_irq_disable(adapter); - if (netif_running(adapter->netdev)) { - i40evf_napi_disable_all(adapter); - netif_tx_disable(netdev); - netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + i40evf_napi_disable_all(adapter); } + i40evf_irq_disable(adapter); adapter->state = __I40EVF_RESETTING; + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; + + /* free the Tx/Rx rings and descriptors, might be better to just + * re-use them sometime in the future + */ + i40evf_free_all_rx_resources(adapter); + i40evf_free_all_tx_resources(adapter); /* kill and reinit the admin queue */ if (i40evf_shutdown_adminq(hw)) @@ -1603,6 +1617,7 @@ continue_reset: adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); + i40evf_misc_irq_enable(adapter); mod_timer(&adapter->watchdog_timer, jiffies + 2); @@ -1624,7 +1639,10 @@ continue_reset: goto reset_err; i40evf_irq_enable(adapter, true); + } else { + adapter->state = __I40EVF_DOWN; } + return; reset_err: dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); @@ -1667,6 +1685,11 @@ static void i40evf_adminq_task(struct work_struct *work) memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); } while (pending); + if ((adapter->flags & + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || + adapter->state == __I40EVF_RESETTING) + goto freedom; + /* check for error indications */ val = rd32(hw, hw->aq.arq.len); oldval = val; @@ -1702,6 +1725,7 @@ static void i40evf_adminq_task(struct work_struct *work) if (oldval != val) wr32(hw, hw->aq.asq.len, val); +freedom: kfree(event.msg_buf); out: /* re-enable Admin queue interrupt cause */ @@ -1897,47 +1921,6 @@ static struct net_device_stats *i40evf_get_stats(struct net_device *netdev) } /** - * i40evf_reinit_locked - Software reinit - * @adapter: board private structure - * - * Reinititalizes the ring structures in response to a software configuration - * change. Roughly the same as close followed by open, but skips releasing - * and reallocating the interrupts. - **/ -void i40evf_reinit_locked(struct i40evf_adapter *adapter) -{ - struct net_device *netdev = adapter->netdev; - int err; - - WARN_ON(in_interrupt()); - - i40evf_down(adapter); - - /* allocate transmit descriptors */ - err = i40evf_setup_all_tx_resources(adapter); - if (err) - goto err_reinit; - - /* allocate receive descriptors */ - err = i40evf_setup_all_rx_resources(adapter); - if (err) - goto err_reinit; - - i40evf_configure(adapter); - - err = i40evf_up_complete(adapter); - if (err) - goto err_reinit; - - i40evf_irq_enable(adapter, true); - return; - -err_reinit: - dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); - i40evf_close(netdev); -} - -/** * i40evf_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size @@ -1952,9 +1935,10 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu) if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) return -EINVAL; - /* must set new MTU before calling down or up */ netdev->mtu = new_mtu; - i40evf_reinit_locked(adapter); + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; + schedule_work(&adapter->reset_task); + return 0; } diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 0f69ef81751a..b0182dd31346 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1,5 +1,5 @@ /* Intel(R) Gigabit Ethernet Linux driver - * Copyright(c) 2007-2014 Intel Corporation. + * Copyright(c) 2007-2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -1900,8 +1900,8 @@ static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable * @hw: pointer to the HW structure * - * After rx enable if managability is enabled then there is likely some - * bad data at the start of the fifo and possibly in the DMA fifo. This + * After rx enable if manageability is enabled then there is likely some + * bad data at the start of the fifo and possibly in the DMA fifo. This * function clears the fifos and flushes any packets that came in as rx was * being enabled. **/ @@ -1910,6 +1910,11 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw) u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; int i, ms_wait; + /* disable IPv6 options as per hardware errata */ + rfctl = rd32(E1000_RFCTL); + rfctl |= E1000_RFCTL_IPV6_EX_DIS; + wr32(E1000_RFCTL, rfctl); + if (hw->mac.type != e1000_82575 || !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) return; @@ -1937,7 +1942,6 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw) * incoming packets are rejected. Set enable and wait 2ms so that * any packet that was coming in as RCTL.EN was set is flushed */ - rfctl = rd32(E1000_RFCTL); wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); rlpml = rd32(E1000_RLPML); diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 217f8138851b..f8684aa285be 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -344,7 +344,8 @@ #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ /* Header split receive */ -#define E1000_RFCTL_LEF 0x00040000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_LEF 0x00040000 /* Collision related configuration parameters */ #define E1000_COLLISION_THRESHOLD 15 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index f287186192bb..2f70a9b152bd 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -58,7 +58,7 @@ #define MAJ 5 #define MIN 2 -#define BUILD 15 +#define BUILD 18 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5bdf78231a4e..370e20ed224c 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -310,6 +310,7 @@ struct mvneta_port { unsigned int link; unsigned int duplex; unsigned int speed; + unsigned int tx_csum_limit; int use_inband_status:1; }; @@ -2508,8 +2509,10 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) dev->mtu = mtu; - if (!netif_running(dev)) + if (!netif_running(dev)) { + netdev_update_features(dev); return 0; + } /* The interface is running, so we have to force a * reallocation of the queues @@ -2538,9 +2541,26 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu) mvneta_start_dev(pp); mvneta_port_up(pp); + netdev_update_features(dev); + return 0; } +static netdev_features_t mvneta_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct mvneta_port *pp = netdev_priv(dev); + + if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); + netdev_info(dev, + "Disable IP checksum for MTU greater than %dB\n", + pp->tx_csum_limit); + } + + return features; +} + /* Get mac address */ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) { @@ -2862,6 +2882,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_set_rx_mode = mvneta_set_rx_mode, .ndo_set_mac_address = mvneta_set_mac_addr, .ndo_change_mtu = mvneta_change_mtu, + .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, .ndo_do_ioctl = mvneta_ioctl, }; @@ -3107,6 +3128,9 @@ static int mvneta_probe(struct platform_device *pdev) } } + if (of_device_is_compatible(dn, "marvell,armada-370-neta")) + pp->tx_csum_limit = 1600; + pp->tx_ring_size = MVNETA_MAX_TXD; pp->rx_ring_size = MVNETA_MAX_RXD; @@ -3185,6 +3209,7 @@ static int mvneta_remove(struct platform_device *pdev) static const struct of_device_id mvneta_match[] = { { .compatible = "marvell,armada-370-neta" }, + { .compatible = "marvell,armada-xp-neta" }, { } }; MODULE_DEVICE_TABLE(of, mvneta_match); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 77179d7ae4cc..e0de2fd1ce12 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -1977,10 +1977,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv) mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); } - if (priv->base_tx_qpn) { - mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num); - priv->base_tx_qpn = 0; - } } int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 35f726c17e48..7a4f20bb7fcb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -718,7 +718,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, } #endif static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, - int hwtstamp_rx_filter) + netdev_features_t dev_features) { __wsum hw_checksum = 0; @@ -726,14 +726,8 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, hw_checksum = csum_unfold((__force __sum16)cqe->checksum); - if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) && - hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) { - /* next protocol non IPv4 or IPv6 */ - if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto - != htons(ETH_P_IP) && - ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto - != htons(ETH_P_IPV6)) - return -1; + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) && + !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); hdr += sizeof(struct vlan_hdr); } @@ -896,7 +890,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud if (ip_summed == CHECKSUM_COMPLETE) { void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); - if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) { + if (check_csum(cqe, gro_skb, va, + dev->features)) { ip_summed = CHECKSUM_NONE; ring->csum_none++; ring->csum_complete--; @@ -951,7 +946,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud } if (ip_summed == CHECKSUM_COMPLETE) { - if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) { + if (check_csum(cqe, skb, skb->data, dev->features)) { ip_summed = CHECKSUM_NONE; ring->csum_complete--; ring->csum_none++; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 7bed3a88579f..c10d98f6ad96 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring->size = size; ring->size_mask = size - 1; ring->stride = stride; + ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; tmp = size * sizeof(struct mlx4_en_tx_info); ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); @@ -180,6 +181,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, mlx4_bf_free(mdev->dev, &ring->bf); mlx4_qp_remove(mdev->dev, &ring->qp); mlx4_qp_free(mdev->dev, &ring->qp); + mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); mlx4_en_unmap_buffer(&ring->wqres.buf); mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); kfree(ring->bounce_buf); @@ -231,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); } +static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) +{ + return ring->prod - ring->cons > ring->full_size; +} + static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int index, u8 owner) @@ -473,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev, netdev_tx_completed_queue(ring->tx_queue, packets, bytes); - /* - * Wakeup Tx queue if this stopped, and at least 1 packet - * was completed + /* Wakeup Tx queue if this stopped, and ring is not full. */ - if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { + if (netif_tx_queue_stopped(ring->tx_queue) && + !mlx4_en_is_tx_ring_full(ring)) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } @@ -921,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); /* Check available TXBBs And 2K spare for prefetch */ - stop_queue = (int)(ring->prod - ring_cons) > - ring->size - HEADROOM - MAX_DESC_TXBBS; + stop_queue = mlx4_en_is_tx_ring_full(ring); if (unlikely(stop_queue)) { netif_tx_stop_queue(ring->tx_queue); ring->queue_stopped++; @@ -991,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) smp_rmb(); ring_cons = ACCESS_ONCE(ring->cons); - if (unlikely(((int)(ring->prod - ring_cons)) <= - ring->size - HEADROOM - MAX_DESC_TXBBS)) { + if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; } diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 6fce58718837..0d80aed59043 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_interface *intf) mutex_lock(&intf_mutex); list_add_tail(&intf->list, &intf_list); - list_for_each_entry(priv, &dev_list, dev_list) + list_for_each_entry(priv, &dev_list, dev_list) { + if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) { + mlx4_dbg(&priv->dev, + "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol); + intf->flags &= ~MLX4_INTFF_BONDING; + } mlx4_add_device(intf, priv); + } mutex_unlock(&intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index d5f9adb6a784..666d1669eb52 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -279,6 +279,7 @@ struct mlx4_en_tx_ring { u32 size; /* number of TXBBs */ u32 size_mask; u16 stride; + u32 full_size; u16 cqn; /* index of port CQ associated with this ring */ u32 buf_size; __be32 doorbell_qpn; @@ -580,7 +581,6 @@ struct mlx4_en_priv { int vids[128]; bool wol; struct device *ddev; - int base_tx_qpn; struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; struct hwtstamp_config hwtstamp_config; u32 counter_index; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 42656da50500..7a8ce920c49e 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -116,8 +116,10 @@ static int ravb_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) priv->ptp.current_addend = addend; gccr = ravb_read(ndev, GCCR); - if (gccr & GCCR_LTI) + if (gccr & GCCR_LTI) { + spin_unlock_irqrestore(&priv->lock, flags); return -EBUSY; + } ravb_write(ndev, addend & GTI_TIV, GTI); ravb_write(ndev, gccr | GCCR_LTI, GCCR); diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h index 1341f33e6084..7d430d322931 100644 --- a/drivers/net/ethernet/sis/sis900.h +++ b/drivers/net/ethernet/sis/sis900.h @@ -56,7 +56,7 @@ enum sis900_configuration_register_bits { EDB_MASTER_EN = 0x00002000 }; -enum sis900_eeprom_access_reigster_bits { +enum sis900_eeprom_access_register_bits { MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */ EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002, EEDI = 0x00000001 @@ -73,7 +73,7 @@ enum sis900_interrupt_register_bits { RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001 }; -enum sis900_interrupt_enable_reigster_bits { +enum sis900_interrupt_enable_register_bits { IE = 0x00000001 }; diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 08c483bd2ec7..3f20bb1fe570 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -73,7 +73,7 @@ #define MMC_RX_OCTETCOUNT_G 0x00000188 #define MMC_RX_BROADCASTFRAME_G 0x0000018c #define MMC_RX_MULTICASTFRAME_G 0x00000190 -#define MMC_RX_CRC_ERRROR 0x00000194 +#define MMC_RX_CRC_ERROR 0x00000194 #define MMC_RX_ALIGN_ERROR 0x00000198 #define MMC_RX_RUN_ERROR 0x0000019C #define MMC_RX_JABBER_ERROR 0x000001A0 @@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); - mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR); + mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR); mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index 8b0b1d6aca72..2f1264b882b9 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -18,6 +18,7 @@ if NET_VENDOR_VIA config VIA_RHINE tristate "VIA Rhine support" depends on (PCI || OF_IRQ) + depends on HAS_DMA select CRC32 select MII ---help--- @@ -42,6 +43,7 @@ config VIA_RHINE_MMIO config VIA_VELOCITY tristate "VIA Velocity support" depends on (PCI || (OF_ADDRESS && OF_IRQ)) + depends on HAS_DMA select CRC32 select CRC_CCITT select MII diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index 4dea85bfc545..6b701b3ded74 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -246,6 +246,13 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev) pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", dev_name(&phydev->dev), phydev->drv->name, rev, patch); + /* Dummy read to a register to workaround an issue upon reset where the + * internal inverter may not allow the first MDIO transaction to pass + * the MDIO management controller and make us return 0xffff for such + * reads. + */ + phy_read(phydev, MII_BMSR); + switch (rev) { case 0xb0: ret = bcm7xxx_28nm_b0_afe_config_init(phydev); diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c index fc7abc50b4f1..6a52a7f0fa0d 100644 --- a/drivers/net/phy/mdio-bcm-unimac.c +++ b/drivers/net/phy/mdio-bcm-unimac.c @@ -120,6 +120,48 @@ static int unimac_mdio_write(struct mii_bus *bus, int phy_id, return 0; } +/* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with + * their internal MDIO management controller making them fail to successfully + * be read from or written to for the first transaction. We insert a dummy + * BMSR read here to make sure that phy_get_device() and get_phy_id() can + * correctly read the PHY MII_PHYSID1/2 registers and successfully register a + * PHY device for this peripheral. + * + * Once the PHY driver is registered, we can workaround subsequent reads from + * there (e.g: during system-wide power management). + * + * bus->reset is invoked before mdiobus_scan during mdiobus_register and is + * therefore the right location to stick that workaround. Since we do not want + * to read from non-existing PHYs, we either use bus->phy_mask or do a manual + * Device Tree scan to limit the search area. + */ +static int unimac_mdio_reset(struct mii_bus *bus) +{ + struct device_node *np = bus->dev.of_node; + struct device_node *child; + u32 read_mask = 0; + int addr; + + if (!np) { + read_mask = ~bus->phy_mask; + } else { + for_each_available_child_of_node(np, child) { + addr = of_mdio_parse_addr(&bus->dev, child); + if (addr < 0) + continue; + + read_mask |= 1 << addr; + } + } + + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { + if (read_mask & 1 << addr) + mdiobus_read(bus, addr, MII_BMSR); + } + + return 0; +} + static int unimac_mdio_probe(struct platform_device *pdev) { struct unimac_mdio_priv *priv; @@ -155,6 +197,7 @@ static int unimac_mdio_probe(struct platform_device *pdev) bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; + bus->reset = unimac_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index bdfe51fc3a65..0302483de240 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -230,7 +230,7 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, for (i = 1; i < num_ids && c45_ids->devices_in_package == 0; i++) { - reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; +retry: reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; phy_reg = mdiobus_read(bus, addr, reg_addr); if (phy_reg < 0) return -EIO; @@ -242,12 +242,20 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, return -EIO; c45_ids->devices_in_package |= (phy_reg & 0xffff); - /* If mostly Fs, there is no device there, - * let's get out of here. - */ if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) { - *phy_id = 0xffffffff; - return 0; + if (i) { + /* If mostly Fs, there is no device there, + * then let's continue to probe more, as some + * 10G PHYs have zero Devices In package, + * e.g. Cortina CS4315/CS4340 PHY. + */ + i = 0; + goto retry; + } else { + /* no device there, let's get out of here */ + *phy_id = 0xffffffff; + return 0; + } } } @@ -796,10 +804,11 @@ static int genphy_config_advert(struct phy_device *phydev) if (phydev->supported & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) { adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); - if (adv != oldadv) - changed = 1; } + if (adv != oldadv) + changed = 1; + err = phy_write(phydev, MII_CTRL1000, adv); if (err < 0) return err; diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 76cad712ddb2..17cad185169d 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -66,6 +66,7 @@ #define PHY_ID_VSC8244 0x000fc6c0 #define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8574 0x000704a0 +#define PHY_ID_VSC8641 0x00070431 #define PHY_ID_VSC8662 0x00070660 #define PHY_ID_VSC8221 0x000fc550 #define PHY_ID_VSC8211 0x000fc4b0 @@ -272,6 +273,18 @@ static struct phy_driver vsc82xx_driver[] = { .config_intr = &vsc82xx_config_intr, .driver = { .owner = THIS_MODULE,}, }, { + .phy_id = PHY_ID_VSC8641, + .name = "Vitesse VSC8641", + .phy_id_mask = 0x000ffff0, + .features = PHY_GBIT_FEATURES, + .flags = PHY_HAS_INTERRUPT, + .config_init = &vsc824x_config_init, + .config_aneg = &vsc82x4_config_aneg, + .read_status = &genphy_read_status, + .ack_interrupt = &vsc824x_ack_interrupt, + .config_intr = &vsc82xx_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { .phy_id = PHY_ID_VSC8662, .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, @@ -318,6 +331,7 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8244, 0x000fffc0 }, { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8574, 0x000ffff0 }, + { PHY_ID_VSC8641, 0x000ffff0 }, { PHY_ID_VSC8662, 0x000ffff0 }, { PHY_ID_VSC8221, 0x000ffff0 }, { PHY_ID_VSC8211, 0x000ffff0 }, diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index e9f1075f7d4c..2652245631d1 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -69,10 +69,10 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.3.5.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.4.2.0-k" /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01030500 +#define VMXNET3_DRIVER_VERSION_NUM 0x01040200 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index b3e9491bc8ec..f948c46d5132 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -1244,10 +1244,6 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) np = netdev_priv(netdev); np->xbdev = dev; - /* No need to use rtnl_lock() before the call below as it - * happens before register_netdev(). - */ - netif_set_real_num_tx_queues(netdev, 0); np->queues = NULL; err = -ENOMEM; @@ -1899,9 +1895,6 @@ abort_transaction_no_dev_fatal: xennet_disconnect_backend(info); kfree(info->queues); info->queues = NULL; - rtnl_lock(); - netif_set_real_num_tx_queues(info->netdev, 0); - rtnl_unlock(); out: return err; } diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 07bb3c8f191b..8df1b1777745 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -1,15 +1,20 @@ config DTC bool -config OF - bool +menuconfig OF + bool "Device Tree and Open Firmware support" + help + This option enables the device tree infrastructure. + It is automatically selected by platforms that need it or can + be enabled manually for unittests, overlays or + compile-coverage. -menu "Device Tree and Open Firmware support" - depends on OF +if OF config OF_UNITTEST bool "Device Tree runtime unit tests" - depends on OF_IRQ && OF_EARLY_FLATTREE + depends on OF_IRQ + select OF_EARLY_FLATTREE select OF_RESOLVE help This option builds in test cases for the device tree infrastructure @@ -97,4 +102,4 @@ config OF_OVERLAY While this option is selected automatically when needed, you can enable it manually to improve device tree unit test coverage. -endmenu # OF +endif # OF diff --git a/drivers/of/Makefile b/drivers/of/Makefile index fcacb186a67b..156c072b3117 100644 --- a/drivers/of/Makefile +++ b/drivers/of/Makefile @@ -16,6 +16,3 @@ obj-$(CONFIG_OF_RESOLVE) += resolver.o obj-$(CONFIG_OF_OVERLAY) += overlay.o obj-$(CONFIG_OF_UNITTEST) += unittest-data/ - -CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt -CFLAGS_fdt_address.o = -I$(src)/../../scripts/dtc/libfdt diff --git a/drivers/of/address.c b/drivers/of/address.c index 6906a3f61bd8..8bfda6ade2c0 100644 --- a/drivers/of/address.c +++ b/drivers/of/address.c @@ -712,7 +712,7 @@ int __weak pci_register_io_range(phys_addr_t addr, resource_size_t size) } /* add the range to the list */ - range = kzalloc(sizeof(*range), GFP_KERNEL); + range = kzalloc(sizeof(*range), GFP_ATOMIC); if (!range) { err = -ENOMEM; goto end_register; diff --git a/drivers/of/base.c b/drivers/of/base.c index 6f85ff74c10d..8b5a187a7682 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -89,7 +89,7 @@ EXPORT_SYMBOL(of_n_size_cells); #ifdef CONFIG_NUMA int __weak of_node_to_nid(struct device_node *np) { - return numa_node_id(); + return NUMA_NO_NODE; } #endif diff --git a/drivers/of/device.c b/drivers/of/device.c index 20c1332a0018..8b91ea241b10 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -163,6 +163,18 @@ void of_device_unregister(struct platform_device *ofdev) } EXPORT_SYMBOL(of_device_unregister); +const void *of_device_get_match_data(const struct device *dev) +{ + const struct of_device_id *match; + + match = of_match_device(dev->driver->of_match_table, dev); + if (!match) + return NULL; + + return match->data; +} +EXPORT_SYMBOL(of_device_get_match_data); + ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) { const char *compat; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index f2dd23a32267..07496560e5b9 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -164,11 +164,14 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size, * unflatten_dt_node - Alloc and populate a device_node from the flat tree * @blob: The parent device tree blob * @mem: Memory chunk to use for allocating device nodes and properties - * @p: pointer to node in flat tree + * @poffset: pointer to node in flat tree * @dad: Parent struct device_node + * @nodepp: The device_node tree created by the call * @fpsize: Size of the node path up at the current depth. + * @dryrun: If true, do not allocate device nodes but still calculate needed + * memory size */ -static void * unflatten_dt_node(void *blob, +static void * unflatten_dt_node(const void *blob, void *mem, int *poffset, struct device_node *dad, @@ -378,7 +381,7 @@ static void * unflatten_dt_node(void *blob, * @dt_alloc: An allocator that provides a virtual address to memory * for the resulting tree */ -static void __unflatten_device_tree(void *blob, +static void __unflatten_device_tree(const void *blob, struct device_node **mynodes, void * (*dt_alloc)(u64 size, u64 align)) { @@ -441,7 +444,7 @@ static void *kernel_tree_alloc(u64 size, u64 align) * pointers of the nodes so the normal device-tree walking functions * can be used. */ -void of_fdt_unflatten_tree(unsigned long *blob, +void of_fdt_unflatten_tree(const unsigned long *blob, struct device_node **mynodes) { __unflatten_device_tree(blob, mynodes, &kernel_tree_alloc); @@ -1024,6 +1027,11 @@ void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align) return __va(memblock_alloc(size, align)); } #else +void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) +{ + WARN_ON(1); +} + int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, bool nomap) { @@ -1031,6 +1039,12 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, &base, &size, nomap ? " (nomap)" : ""); return -ENOSYS; } + +void * __init __weak early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + WARN_ON(1); + return NULL; +} #endif bool __init early_init_dt_verify(void *params) diff --git a/drivers/of/irq.c b/drivers/of/irq.c index 1a7980692f25..3cf7a01f557f 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c @@ -252,8 +252,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) * Successfully parsed an interrrupt-map translation; copy new * interrupt specifier into the out_irq structure */ - out_irq->np = newpar; - match_array = imap - newaddrsize - newintsize; for (i = 0; i < newintsize; i++) out_irq->args[i] = be32_to_cpup(imap - newintsize + i); @@ -262,6 +260,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq) skiplevel: /* Iterate again with new parent */ + out_irq->np = newpar; pr_debug(" -> new parent: %s\n", of_node_full_name(newpar)); of_node_put(ipar); ipar = newpar; @@ -469,7 +468,7 @@ int of_irq_to_resource_table(struct device_node *dev, struct resource *res, } EXPORT_SYMBOL_GPL(of_irq_to_resource_table); -struct intc_desc { +struct of_intc_desc { struct list_head list; struct device_node *dev; struct device_node *interrupt_parent; @@ -485,7 +484,7 @@ struct intc_desc { void __init of_irq_init(const struct of_device_id *matches) { struct device_node *np, *parent = NULL; - struct intc_desc *desc, *temp_desc; + struct of_intc_desc *desc, *temp_desc; struct list_head intc_desc_list, intc_parent_list; INIT_LIST_HEAD(&intc_desc_list); @@ -496,7 +495,7 @@ void __init of_irq_init(const struct of_device_id *matches) !of_device_is_available(np)) continue; /* - * Here, we allocate and populate an intc_desc with the node + * Here, we allocate and populate an of_intc_desc with the node * pointer, interrupt-parent device_node etc. */ desc = kzalloc(sizeof(*desc), GFP_KERNEL); diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index dee9270ba547..24e025f79299 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -333,7 +333,7 @@ static DEFINE_IDR(ov_idr); * of the overlay in a list. This list can be used to prevent * illegal overlay removals. * - * Returns the id of the created overlay, or an negative error number + * Returns the id of the created overlay, or a negative error number */ int of_overlay_create(struct device_node *tree) { @@ -481,7 +481,7 @@ static int overlay_removal_is_ok(struct of_overlay *ov) * * Removes an overlay if it is permissible. * - * Returns 0 on success, or an negative error number + * Returns 0 on success, or a negative error number */ int of_overlay_destroy(int id) { @@ -528,7 +528,7 @@ EXPORT_SYMBOL_GPL(of_overlay_destroy); * * Removes all overlays from the system in the correct order. * - * Returns 0 on success, or an negative error number + * Returns 0 on success, or a negative error number */ int of_overlay_destroy_all(void) { diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c index b75d684aefcd..734da589cdfb 100644 --- a/drivers/pci/host/pci-keystone.c +++ b/drivers/pci/host/pci-keystone.c @@ -221,10 +221,9 @@ static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) /* MSI IRQ */ if (IS_ENABLED(CONFIG_PCI_MSI)) { for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { - irq_set_chained_handler(ks_pcie->msi_host_irqs[i], - ks_pcie_msi_irq_handler); - irq_set_handler_data(ks_pcie->msi_host_irqs[i], - ks_pcie); + irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], + ks_pcie_msi_irq_handler, + ks_pcie); } } } diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c index 4c04360f378b..b2a189507fc3 100644 --- a/drivers/pcmcia/xxs1500_ss.c +++ b/drivers/pcmcia/xxs1500_ss.c @@ -11,6 +11,7 @@ #include <linux/io.h> #include <linux/ioport.h> #include <linux/mm.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/resource.h> diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c index c4fc77aa766e..ad1ea1695b4a 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c @@ -1351,8 +1351,7 @@ int mtk_pctrl_init(struct platform_device *pdev, set_irq_flags(virq, IRQF_VALID); }; - irq_set_chained_handler(irq, mtk_eint_irq_handler); - irq_set_handler_data(irq, pctl); + irq_set_chained_handler_and_data(irq, mtk_eint_irq_handler, pctl); set_irq_flags(irq, IRQF_VALID); return 0; diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c index 873433da0f2c..c3c3d2345fc6 100644 --- a/drivers/pinctrl/pinctrl-adi2.c +++ b/drivers/pinctrl/pinctrl-adi2.c @@ -865,8 +865,8 @@ static int adi_gpio_pint_probe(struct platform_device *pdev) pint->pint_map_port = adi_pint_map_port; platform_set_drvdata(pdev, pint); - irq_set_chained_handler(pint->irq, adi_gpio_handle_pint_irq); - irq_set_handler_data(pint->irq, pint); + irq_set_chained_handler_and_data(pint->irq, adi_gpio_handle_pint_irq, + pint); list_add_tail(&pint->node, &adi_pint_list); diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index d34ac879af9e..c262e5f35c28 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1661,8 +1661,8 @@ static int st_pctl_probe_dt(struct platform_device *pdev, if (IS_ERR(info->irqmux_base)) return PTR_ERR(info->irqmux_base); - irq_set_chained_handler(irq, st_gpio_irqmux_handler); - irq_set_handler_data(irq, info); + irq_set_chained_handler_and_data(irq, st_gpio_irqmux_handler, + info); } diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 0b7afa50121a..b18dabba03a4 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -563,8 +563,8 @@ static int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return -ENOMEM; } - irq_set_chained_handler(irq, exynos_irq_demux_eint16_31); - irq_set_handler_data(irq, muxed_data); + irq_set_chained_handler_and_data(irq, exynos_irq_demux_eint16_31, + muxed_data); bank = d->pin_banks; idx = 0; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index f1993f42114c..01b43dbfb795 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -514,8 +514,7 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d) } eint_data->parents[i] = irq; - irq_set_chained_handler(irq, handlers[i]); - irq_set_handler_data(irq, eint_data); + irq_set_chained_handler_and_data(irq, handlers[i], eint_data); } bank = d->pin_banks; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index 7756c1e9e763..ec8cc3b47621 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -506,8 +506,7 @@ static int s3c64xx_eint_gpio_init(struct samsung_pinctrl_drv_data *d) data->domains[nr_domains++] = bank->irq_domain; } - irq_set_chained_handler(d->irq, s3c64xx_eint_gpio_irq); - irq_set_handler_data(d->irq, data); + irq_set_chained_handler_and_data(d->irq, s3c64xx_eint_gpio_irq, data); return 0; } @@ -731,8 +730,9 @@ static int s3c64xx_eint_eint0_init(struct samsung_pinctrl_drv_data *d) return -ENXIO; } - irq_set_chained_handler(irq, s3c64xx_eint0_handlers[i]); - irq_set_handler_data(irq, data); + irq_set_chained_handler_and_data(irq, + s3c64xx_eint0_handlers[i], + data); } bank = d->pin_banks; diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index d7857c72e627..f09573e13203 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -1005,9 +1005,9 @@ int sunxi_pinctrl_init(struct platform_device *pdev, writel(0xffffffff, pctl->membase + sunxi_irq_status_reg_from_bank(i)); - irq_set_chained_handler(pctl->irq[i], - sunxi_pinctrl_irq_handler); - irq_set_handler_data(pctl->irq[i], pctl); + irq_set_chained_handler_and_data(pctl->irq[i], + sunxi_pinctrl_irq_handler, + pctl); } dev_info(&pdev->dev, "initialized sunXi PIO driver\n"); diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c index 8c43589c3edb..1f52462f4cdd 100644 --- a/drivers/platform/goldfish/pdev_bus.c +++ b/drivers/platform/goldfish/pdev_bus.c @@ -220,20 +220,10 @@ free_resources: return ret; } -static int goldfish_pdev_bus_remove(struct platform_device *pdev) -{ - iounmap(pdev_bus_base); - free_irq(pdev_bus_irq, pdev); - release_mem_region(pdev_bus_addr, pdev_bus_len); - return 0; -} - static struct platform_driver goldfish_pdev_bus_driver = { .probe = goldfish_pdev_bus_probe, - .remove = goldfish_pdev_bus_remove, .driver = { .name = "goldfish_pdev_bus" } }; - -module_platform_driver(goldfish_pdev_bus_driver); +builtin_platform_driver(goldfish_pdev_bus_driver); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 7137a077b9a6..1e2f57f6d297 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -141,6 +141,22 @@ config DELL_SMO8800 To compile this driver as a module, choose M here: the module will be called dell-smo8800. +config DELL_RBTN + tristate "Dell Airplane Mode Switch driver" + depends on ACPI + depends on INPUT + depends on RFKILL + ---help--- + Say Y here if you want to support Dell Airplane Mode Switch ACPI + device on Dell laptops. Sometimes it has names: DELLABCE or DELRBTN. + This driver register rfkill device or input hotkey device depending + on hardware type (hw switch slider or keyboard toggle button). For + rfkill devices it receive HW switch events and set correct hard + rfkill state. + + To compile this driver as a module, choose M here: the module will + be called dell-rbtn. + config FUJITSU_LAPTOP tristate "Fujitsu Laptop Extras" @@ -622,7 +638,6 @@ config ACPI_TOSHIBA select NEW_LEDS depends on BACKLIGHT_CLASS_DEVICE depends on INPUT - depends on RFKILL || RFKILL = n depends on SERIO_I8042 || SERIO_I8042 = n depends on ACPI_VIDEO || ACPI_VIDEO = n select INPUT_POLLDEV @@ -653,6 +668,7 @@ config ACPI_TOSHIBA config TOSHIBA_BT_RFKILL tristate "Toshiba Bluetooth RFKill switch support" depends on ACPI + depends on RFKILL || RFKILL = n ---help--- This driver adds support for Bluetooth events for the RFKill switch on modern Toshiba laptops with full ACPI support and diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index f82232b1fc4d..b3e54ed863c3 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o obj-$(CONFIG_DELL_WMI) += dell-wmi.o obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o +obj-$(CONFIG_DELL_RBTN) += dell-rbtn.o obj-$(CONFIG_ACER_WMI) += acer-wmi.o obj-$(CONFIG_ACERHDF) += acerhdf.o obj-$(CONFIG_HP_ACCEL) += hp_accel.o diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 6f8558f744a4..efbc3f0c592b 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -78,6 +78,7 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_METHODID_GPID 0x44495047 /* Get Panel ID?? (Resol) */ #define ASUS_WMI_METHODID_QMOD 0x444F4D51 /* Quiet MODe */ #define ASUS_WMI_METHODID_SPLV 0x4C425053 /* Set Panel Light Value */ +#define ASUS_WMI_METHODID_AGFN 0x4E464741 /* FaN? */ #define ASUS_WMI_METHODID_SFUN 0x4E554653 /* FUNCtionalities */ #define ASUS_WMI_METHODID_SDSP 0x50534453 /* Set DiSPlay output */ #define ASUS_WMI_METHODID_GDSP 0x50534447 /* Get DiSPlay output */ @@ -150,12 +151,38 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 +#define ASUS_FAN_DESC "cpu_fan" +#define ASUS_FAN_MFUN 0x13 +#define ASUS_FAN_SFUN_READ 0x06 +#define ASUS_FAN_SFUN_WRITE 0x07 +#define ASUS_FAN_CTRL_MANUAL 1 +#define ASUS_FAN_CTRL_AUTO 2 + struct bios_args { u32 arg0; u32 arg1; } __packed; /* + * Struct that's used for all methods called via AGFN. Naming is + * identically to the AML code. + */ +struct agfn_args { + u16 mfun; /* probably "Multi-function" to be called */ + u16 sfun; /* probably "Sub-function" to be called */ + u16 len; /* size of the hole struct, including subfunction fields */ + u8 stas; /* not used by now */ + u8 err; /* zero on success */ +} __packed; + +/* struct used for calling fan read and write methods */ +struct fan_args { + struct agfn_args agfn; /* common fields */ + u8 fan; /* fan number: 0: set auto mode 1: 1st fan */ + u32 speed; /* read: RPM/100 - write: 0-255 */ +} __packed; + +/* * <platform>/ - debugfs root directory * dev_id - current dev_id * ctrl_param - current ctrl_param @@ -204,6 +231,10 @@ struct asus_wmi { struct asus_rfkill gps; struct asus_rfkill uwb; + bool asus_hwmon_fan_manual_mode; + int asus_hwmon_num_fans; + int asus_hwmon_pwm; + struct hotplug_slot *hotplug_slot; struct mutex hotplug_lock; struct mutex wmi_lock; @@ -294,6 +325,36 @@ exit: return 0; } +static int asus_wmi_evaluate_method_agfn(const struct acpi_buffer args) +{ + struct acpi_buffer input; + u64 phys_addr; + u32 retval; + u32 status = -1; + + /* + * Copy to dma capable address otherwise memory corruption occurs as + * bios has to be able to access it. + */ + input.pointer = kzalloc(args.length, GFP_DMA | GFP_KERNEL); + input.length = args.length; + if (!input.pointer) + return -ENOMEM; + phys_addr = virt_to_phys(input.pointer); + memcpy(input.pointer, args.pointer, args.length); + + status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_AGFN, + phys_addr, 0, &retval); + if (!status) + memcpy(args.pointer, input.pointer, args.length); + + kfree(input.pointer); + if (status) + return -ENXIO; + + return retval; +} + static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) { return asus_wmi_evaluate_method(asus->dsts_id, dev_id, 0, retval); @@ -1022,35 +1083,228 @@ exit: /* * Hwmon device */ -static ssize_t asus_hwmon_pwm1(struct device *dev, - struct device_attribute *attr, - char *buf) +static int asus_hwmon_agfn_fan_speed_read(struct asus_wmi *asus, int fan, + int *speed) +{ + struct fan_args args = { + .agfn.len = sizeof(args), + .agfn.mfun = ASUS_FAN_MFUN, + .agfn.sfun = ASUS_FAN_SFUN_READ, + .fan = fan, + .speed = 0, + }; + struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; + int status; + + if (fan != 1) + return -EINVAL; + + status = asus_wmi_evaluate_method_agfn(input); + + if (status || args.agfn.err) + return -ENXIO; + + if (speed) + *speed = args.speed; + + return 0; +} + +static int asus_hwmon_agfn_fan_speed_write(struct asus_wmi *asus, int fan, + int *speed) +{ + struct fan_args args = { + .agfn.len = sizeof(args), + .agfn.mfun = ASUS_FAN_MFUN, + .agfn.sfun = ASUS_FAN_SFUN_WRITE, + .fan = fan, + .speed = speed ? *speed : 0, + }; + struct acpi_buffer input = { (acpi_size) sizeof(args), &args }; + int status; + + /* 1: for setting 1st fan's speed 0: setting auto mode */ + if (fan != 1 && fan != 0) + return -EINVAL; + + status = asus_wmi_evaluate_method_agfn(input); + + if (status || args.agfn.err) + return -ENXIO; + + if (speed && fan == 1) + asus->asus_hwmon_pwm = *speed; + + return 0; +} + +/* + * Check if we can read the speed of one fan. If true we assume we can also + * control it. + */ +static int asus_hwmon_get_fan_number(struct asus_wmi *asus, int *num_fans) +{ + int status; + int speed = 0; + + *num_fans = 0; + + status = asus_hwmon_agfn_fan_speed_read(asus, 1, &speed); + if (!status) + *num_fans = 1; + + return 0; +} + +static int asus_hwmon_fan_set_auto(struct asus_wmi *asus) +{ + int status; + + status = asus_hwmon_agfn_fan_speed_write(asus, 0, NULL); + if (status) + return -ENXIO; + + asus->asus_hwmon_fan_manual_mode = false; + + return 0; +} + +static int asus_hwmon_fan_rpm_show(struct device *dev, int fan) { struct asus_wmi *asus = dev_get_drvdata(dev); - u32 value; + int value; + int ret; + + /* no speed readable on manual mode */ + if (asus->asus_hwmon_fan_manual_mode) + return -ENXIO; + + ret = asus_hwmon_agfn_fan_speed_read(asus, fan+1, &value); + if (ret) { + pr_warn("reading fan speed failed: %d\n", ret); + return -ENXIO; + } + + return value; +} + +static void asus_hwmon_pwm_show(struct asus_wmi *asus, int fan, int *value) +{ int err; - err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, &value); + if (asus->asus_hwmon_pwm >= 0) { + *value = asus->asus_hwmon_pwm; + return; + } + err = asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_FAN_CTRL, value); if (err < 0) - return err; + return; - value &= 0xFF; - - if (value == 1) /* Low Speed */ - value = 85; - else if (value == 2) - value = 170; - else if (value == 3) - value = 255; - else if (value != 0) { - pr_err("Unknown fan speed %#x\n", value); - value = -1; + *value &= 0xFF; + + if (*value == 1) /* Low Speed */ + *value = 85; + else if (*value == 2) + *value = 170; + else if (*value == 3) + *value = 255; + else if (*value) { + pr_err("Unknown fan speed %#x\n", *value); + *value = -1; } +} + +static ssize_t pwm1_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + int value; + + asus_hwmon_pwm_show(asus, 0, &value); return sprintf(buf, "%d\n", value); } +static ssize_t pwm1_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { + struct asus_wmi *asus = dev_get_drvdata(dev); + int value; + int state; + int ret; + + ret = kstrtouint(buf, 10, &value); + + if (ret) + return ret; + + value = clamp(value, 0, 255); + + state = asus_hwmon_agfn_fan_speed_write(asus, 1, &value); + if (state) + pr_warn("Setting fan speed failed: %d\n", state); + else + asus->asus_hwmon_fan_manual_mode = true; + + return count; +} + +static ssize_t fan1_input_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int value = asus_hwmon_fan_rpm_show(dev, 0); + + return sprintf(buf, "%d\n", value < 0 ? -1 : value*100); + +} + +static ssize_t pwm1_enable_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + + if (asus->asus_hwmon_fan_manual_mode) + return sprintf(buf, "%d\n", ASUS_FAN_CTRL_MANUAL); + + return sprintf(buf, "%d\n", ASUS_FAN_CTRL_AUTO); +} + +static ssize_t pwm1_enable_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + int status = 0; + int state; + int ret; + + ret = kstrtouint(buf, 10, &state); + + if (ret) + return ret; + + if (state == ASUS_FAN_CTRL_MANUAL) + asus->asus_hwmon_fan_manual_mode = true; + else + status = asus_hwmon_fan_set_auto(asus); + + if (status) + return status; + + return count; +} + +static ssize_t fan1_label_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", ASUS_FAN_DESC); +} + static ssize_t asus_hwmon_temp1(struct device *dev, struct device_attribute *attr, char *buf) @@ -1069,11 +1323,21 @@ static ssize_t asus_hwmon_temp1(struct device *dev, return sprintf(buf, "%d\n", value); } -static DEVICE_ATTR(pwm1, S_IRUGO, asus_hwmon_pwm1, NULL); +/* Fan1 */ +static DEVICE_ATTR_RW(pwm1); +static DEVICE_ATTR_RW(pwm1_enable); +static DEVICE_ATTR_RO(fan1_input); +static DEVICE_ATTR_RO(fan1_label); + +/* Temperature */ static DEVICE_ATTR(temp1_input, S_IRUGO, asus_hwmon_temp1, NULL); static struct attribute *hwmon_attributes[] = { &dev_attr_pwm1.attr, + &dev_attr_pwm1_enable.attr, + &dev_attr_fan1_input.attr, + &dev_attr_fan1_label.attr, + &dev_attr_temp1_input.attr, NULL }; @@ -1084,19 +1348,28 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev->parent); struct asus_wmi *asus = platform_get_drvdata(pdev); - bool ok = true; int dev_id = -1; + int fan_attr = -1; u32 value = ASUS_WMI_UNSUPPORTED_METHOD; + bool ok = true; if (attr == &dev_attr_pwm1.attr) dev_id = ASUS_WMI_DEVID_FAN_CTRL; else if (attr == &dev_attr_temp1_input.attr) dev_id = ASUS_WMI_DEVID_THERMAL_CTRL; + + if (attr == &dev_attr_fan1_input.attr + || attr == &dev_attr_fan1_label.attr + || attr == &dev_attr_pwm1.attr + || attr == &dev_attr_pwm1_enable.attr) { + fan_attr = 1; + } + if (dev_id != -1) { int err = asus_wmi_get_devstate(asus, dev_id, &value); - if (err < 0) + if (err < 0 && fan_attr == -1) return 0; /* can't return negative here */ } @@ -1112,10 +1385,16 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, if (value == ASUS_WMI_UNSUPPORTED_METHOD || value & 0xFFF80000 || (!asus->sfun && !(value & ASUS_WMI_DSTS_PRESENCE_BIT))) ok = false; + else + ok = fan_attr <= asus->asus_hwmon_num_fans; } else if (dev_id == ASUS_WMI_DEVID_THERMAL_CTRL) { /* If value is zero, something is clearly wrong */ - if (value == 0) + if (!value) ok = false; + } else if (fan_attr <= asus->asus_hwmon_num_fans && fan_attr != -1) { + ok = true; + } else { + ok = false; } return ok ? attr->mode : 0; @@ -1723,6 +2002,25 @@ error_debugfs: return -ENOMEM; } +static int asus_wmi_fan_init(struct asus_wmi *asus) +{ + int status; + + asus->asus_hwmon_pwm = -1; + asus->asus_hwmon_num_fans = -1; + asus->asus_hwmon_fan_manual_mode = false; + + status = asus_hwmon_get_fan_number(asus, &asus->asus_hwmon_num_fans); + if (status) { + asus->asus_hwmon_num_fans = 0; + pr_warn("Could not determine number of fans: %d\n", status); + return -ENXIO; + } + + pr_info("Number of fans: %d\n", asus->asus_hwmon_num_fans); + return 0; +} + /* * WMI Driver */ @@ -1756,6 +2054,9 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_input; + err = asus_wmi_fan_init(asus); /* probably no problems on error */ + asus_hwmon_fan_set_auto(asus); + err = asus_wmi_hwmon_init(asus); if (err) goto fail_hwmon; @@ -1831,6 +2132,7 @@ static int asus_wmi_remove(struct platform_device *device) asus_wmi_rfkill_exit(asus); asus_wmi_debugfs_exit(asus); asus_wmi_platform_exit(asus); + asus_hwmon_fan_set_auto(asus); kfree(asus); return 0; diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 01d081052b50..35de903cb506 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -33,6 +33,7 @@ #include <linux/seq_file.h> #include <acpi/video.h> #include "../../firmware/dcdbas.h" +#include "dell-rbtn.h" #define BRIGHTNESS_TOKEN 0x7d #define KBD_LED_OFF_TOKEN 0x01E1 @@ -643,6 +644,20 @@ static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str, return false; } +static int (*dell_rbtn_notifier_register_func)(struct notifier_block *); +static int (*dell_rbtn_notifier_unregister_func)(struct notifier_block *); + +static int dell_laptop_rbtn_notifier_call(struct notifier_block *nb, + unsigned long action, void *data) +{ + schedule_delayed_work(&dell_rfkill_work, 0); + return NOTIFY_OK; +} + +static struct notifier_block dell_laptop_rbtn_notifier = { + .notifier_call = dell_laptop_rbtn_notifier_call, +}; + static int __init dell_setup_rfkill(void) { int status, ret, whitelisted; @@ -719,10 +734,62 @@ static int __init dell_setup_rfkill(void) goto err_wwan; } - ret = i8042_install_filter(dell_laptop_i8042_filter); - if (ret) { - pr_warn("Unable to install key filter\n"); + /* + * Dell Airplane Mode Switch driver (dell-rbtn) supports ACPI devices + * which can receive events from HW slider switch. + * + * Dell SMBIOS on whitelisted models supports controlling radio devices + * but does not support receiving HW button switch events. We can use + * i8042 filter hook function to receive keyboard data and handle + * keycode for HW button. + * + * So if it is possible we will use Dell Airplane Mode Switch ACPI + * driver for receiving HW events and Dell SMBIOS for setting rfkill + * states. If ACPI driver or device is not available we will fallback to + * i8042 filter hook function. + * + * To prevent duplicate rfkill devices which control and do same thing, + * dell-rbtn driver will automatically remove its own rfkill devices + * once function dell_rbtn_notifier_register() is called. + */ + + dell_rbtn_notifier_register_func = + symbol_request(dell_rbtn_notifier_register); + if (dell_rbtn_notifier_register_func) { + dell_rbtn_notifier_unregister_func = + symbol_request(dell_rbtn_notifier_unregister); + if (!dell_rbtn_notifier_unregister_func) { + symbol_put(dell_rbtn_notifier_register); + dell_rbtn_notifier_register_func = NULL; + } + } + + if (dell_rbtn_notifier_register_func) { + ret = dell_rbtn_notifier_register_func( + &dell_laptop_rbtn_notifier); + symbol_put(dell_rbtn_notifier_register); + dell_rbtn_notifier_register_func = NULL; + if (ret != 0) { + symbol_put(dell_rbtn_notifier_unregister); + dell_rbtn_notifier_unregister_func = NULL; + } + } else { + pr_info("Symbols from dell-rbtn acpi driver are not available\n"); + ret = -ENODEV; + } + + if (ret == 0) { + pr_info("Using dell-rbtn acpi driver for receiving events\n"); + } else if (ret != -ENODEV) { + pr_warn("Unable to register dell rbtn notifier\n"); goto err_filter; + } else { + ret = i8042_install_filter(dell_laptop_i8042_filter); + if (ret) { + pr_warn("Unable to install key filter\n"); + goto err_filter; + } + pr_info("Using i8042 filter function for receiving events\n"); } return 0; @@ -745,6 +812,14 @@ err_wifi: static void dell_cleanup_rfkill(void) { + if (dell_rbtn_notifier_unregister_func) { + dell_rbtn_notifier_unregister_func(&dell_laptop_rbtn_notifier); + symbol_put(dell_rbtn_notifier_unregister); + dell_rbtn_notifier_unregister_func = NULL; + } else { + i8042_remove_filter(dell_laptop_i8042_filter); + } + cancel_delayed_work_sync(&dell_rfkill_work); if (wifi_rfkill) { rfkill_unregister(wifi_rfkill); rfkill_destroy(wifi_rfkill); @@ -1957,8 +2032,6 @@ static int __init dell_init(void) return 0; fail_backlight: - i8042_remove_filter(dell_laptop_i8042_filter); - cancel_delayed_work_sync(&dell_rfkill_work); dell_cleanup_rfkill(); fail_rfkill: free_page((unsigned long)bufferpage); @@ -1979,8 +2052,6 @@ static void __exit dell_exit(void) if (quirks && quirks->touchpad_led) touchpad_led_exit(); kbd_led_exit(); - i8042_remove_filter(dell_laptop_i8042_filter); - cancel_delayed_work_sync(&dell_rfkill_work); backlight_device_unregister(dell_backlight_device); dell_cleanup_rfkill(); if (platform_device) { @@ -1991,7 +2062,14 @@ static void __exit dell_exit(void) free_page((unsigned long)buffer); } -module_init(dell_init); +/* dell-rbtn.c driver export functions which will not work correctly (and could + * cause kernel crash) if they are called before dell-rbtn.c init code. This is + * not problem when dell-rbtn.c is compiled as external module. When both files + * (dell-rbtn.c and dell-laptop.c) are compiled statically into kernel, then we + * need to ensure that dell_init() will be called after initializing dell-rbtn. + * This can be achieved by late_initcall() instead module_init(). + */ +late_initcall(dell_init); module_exit(dell_exit); MODULE_AUTHOR("Matthew Garrett <[email protected]>"); diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c new file mode 100644 index 000000000000..cd410e392550 --- /dev/null +++ b/drivers/platform/x86/dell-rbtn.c @@ -0,0 +1,423 @@ +/* + Dell Airplane Mode Switch driver + Copyright (C) 2014-2015 Pali Rohár <[email protected]> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. +*/ + +#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/rfkill.h> +#include <linux/input.h> + +enum rbtn_type { + RBTN_UNKNOWN, + RBTN_TOGGLE, + RBTN_SLIDER, +}; + +struct rbtn_data { + enum rbtn_type type; + struct rfkill *rfkill; + struct input_dev *input_dev; +}; + + +/* + * acpi functions + */ + +static enum rbtn_type rbtn_check(struct acpi_device *device) +{ + unsigned long long output; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "CRBT", NULL, &output); + if (ACPI_FAILURE(status)) + return RBTN_UNKNOWN; + + switch (output) { + case 0: + case 1: + return RBTN_TOGGLE; + case 2: + case 3: + return RBTN_SLIDER; + default: + return RBTN_UNKNOWN; + } +} + +static int rbtn_get(struct acpi_device *device) +{ + unsigned long long output; + acpi_status status; + + status = acpi_evaluate_integer(device->handle, "GRBT", NULL, &output); + if (ACPI_FAILURE(status)) + return -EINVAL; + + return !output; +} + +static int rbtn_acquire(struct acpi_device *device, bool enable) +{ + struct acpi_object_list input; + union acpi_object param; + acpi_status status; + + param.type = ACPI_TYPE_INTEGER; + param.integer.value = enable; + input.count = 1; + input.pointer = ¶m; + + status = acpi_evaluate_object(device->handle, "ARBT", &input, NULL); + if (ACPI_FAILURE(status)) + return -EINVAL; + + return 0; +} + + +/* + * rfkill device + */ + +static void rbtn_rfkill_query(struct rfkill *rfkill, void *data) +{ + struct acpi_device *device = data; + int state; + + state = rbtn_get(device); + if (state < 0) + return; + + rfkill_set_states(rfkill, state, state); +} + +static int rbtn_rfkill_set_block(void *data, bool blocked) +{ + /* NOTE: setting soft rfkill state is not supported */ + return -EINVAL; +} + +static struct rfkill_ops rbtn_ops = { + .query = rbtn_rfkill_query, + .set_block = rbtn_rfkill_set_block, +}; + +static int rbtn_rfkill_init(struct acpi_device *device) +{ + struct rbtn_data *rbtn_data = device->driver_data; + int ret; + + if (rbtn_data->rfkill) + return 0; + + /* + * NOTE: rbtn controls all radio devices, not only WLAN + * but rfkill interface does not support "ANY" type + * so "WLAN" type is used + */ + rbtn_data->rfkill = rfkill_alloc("dell-rbtn", &device->dev, + RFKILL_TYPE_WLAN, &rbtn_ops, device); + if (!rbtn_data->rfkill) + return -ENOMEM; + + ret = rfkill_register(rbtn_data->rfkill); + if (ret) { + rfkill_destroy(rbtn_data->rfkill); + rbtn_data->rfkill = NULL; + return ret; + } + + return 0; +} + +static void rbtn_rfkill_exit(struct acpi_device *device) +{ + struct rbtn_data *rbtn_data = device->driver_data; + + if (!rbtn_data->rfkill) + return; + + rfkill_unregister(rbtn_data->rfkill); + rfkill_destroy(rbtn_data->rfkill); + rbtn_data->rfkill = NULL; +} + +static void rbtn_rfkill_event(struct acpi_device *device) +{ + struct rbtn_data *rbtn_data = device->driver_data; + + if (rbtn_data->rfkill) + rbtn_rfkill_query(rbtn_data->rfkill, device); +} + + +/* + * input device + */ + +static int rbtn_input_init(struct rbtn_data *rbtn_data) +{ + int ret; + + rbtn_data->input_dev = input_allocate_device(); + if (!rbtn_data->input_dev) + return -ENOMEM; + + rbtn_data->input_dev->name = "DELL Wireless hotkeys"; + rbtn_data->input_dev->phys = "dellabce/input0"; + rbtn_data->input_dev->id.bustype = BUS_HOST; + rbtn_data->input_dev->evbit[0] = BIT(EV_KEY); + set_bit(KEY_RFKILL, rbtn_data->input_dev->keybit); + + ret = input_register_device(rbtn_data->input_dev); + if (ret) { + input_free_device(rbtn_data->input_dev); + rbtn_data->input_dev = NULL; + return ret; + } + + return 0; +} + +static void rbtn_input_exit(struct rbtn_data *rbtn_data) +{ + input_unregister_device(rbtn_data->input_dev); + rbtn_data->input_dev = NULL; +} + +static void rbtn_input_event(struct rbtn_data *rbtn_data) +{ + input_report_key(rbtn_data->input_dev, KEY_RFKILL, 1); + input_sync(rbtn_data->input_dev); + input_report_key(rbtn_data->input_dev, KEY_RFKILL, 0); + input_sync(rbtn_data->input_dev); +} + + +/* + * acpi driver + */ + +static int rbtn_add(struct acpi_device *device); +static int rbtn_remove(struct acpi_device *device); +static void rbtn_notify(struct acpi_device *device, u32 event); + +static const struct acpi_device_id rbtn_ids[] = { + { "DELRBTN", 0 }, + { "DELLABCE", 0 }, + { "", 0 }, +}; + +static struct acpi_driver rbtn_driver = { + .name = "dell-rbtn", + .ids = rbtn_ids, + .ops = { + .add = rbtn_add, + .remove = rbtn_remove, + .notify = rbtn_notify, + }, + .owner = THIS_MODULE, +}; + + +/* + * notifier export functions + */ + +static bool auto_remove_rfkill = true; + +static ATOMIC_NOTIFIER_HEAD(rbtn_chain_head); + +static int rbtn_inc_count(struct device *dev, void *data) +{ + struct acpi_device *device = to_acpi_device(dev); + struct rbtn_data *rbtn_data = device->driver_data; + int *count = data; + + if (rbtn_data->type == RBTN_SLIDER) + (*count)++; + + return 0; +} + +static int rbtn_switch_dev(struct device *dev, void *data) +{ + struct acpi_device *device = to_acpi_device(dev); + struct rbtn_data *rbtn_data = device->driver_data; + bool enable = data; + + if (rbtn_data->type != RBTN_SLIDER) + return 0; + + if (enable) + rbtn_rfkill_init(device); + else + rbtn_rfkill_exit(device); + + return 0; +} + +int dell_rbtn_notifier_register(struct notifier_block *nb) +{ + bool first; + int count; + int ret; + + count = 0; + ret = driver_for_each_device(&rbtn_driver.drv, NULL, &count, + rbtn_inc_count); + if (ret || count == 0) + return -ENODEV; + + first = !rbtn_chain_head.head; + + ret = atomic_notifier_chain_register(&rbtn_chain_head, nb); + if (ret != 0) + return ret; + + if (auto_remove_rfkill && first) + ret = driver_for_each_device(&rbtn_driver.drv, NULL, + (void *)false, rbtn_switch_dev); + + return ret; +} +EXPORT_SYMBOL_GPL(dell_rbtn_notifier_register); + +int dell_rbtn_notifier_unregister(struct notifier_block *nb) +{ + int ret; + + ret = atomic_notifier_chain_unregister(&rbtn_chain_head, nb); + if (ret != 0) + return ret; + + if (auto_remove_rfkill && !rbtn_chain_head.head) + ret = driver_for_each_device(&rbtn_driver.drv, NULL, + (void *)true, rbtn_switch_dev); + + return ret; +} +EXPORT_SYMBOL_GPL(dell_rbtn_notifier_unregister); + + +/* + * acpi driver functions + */ + +static int rbtn_add(struct acpi_device *device) +{ + struct rbtn_data *rbtn_data; + enum rbtn_type type; + int ret = 0; + + type = rbtn_check(device); + if (type == RBTN_UNKNOWN) { + dev_info(&device->dev, "Unknown device type\n"); + return -EINVAL; + } + + ret = rbtn_acquire(device, true); + if (ret < 0) { + dev_err(&device->dev, "Cannot enable device\n"); + return ret; + } + + rbtn_data = devm_kzalloc(&device->dev, sizeof(*rbtn_data), GFP_KERNEL); + if (!rbtn_data) + return -ENOMEM; + + rbtn_data->type = type; + device->driver_data = rbtn_data; + + switch (rbtn_data->type) { + case RBTN_TOGGLE: + ret = rbtn_input_init(rbtn_data); + break; + case RBTN_SLIDER: + if (auto_remove_rfkill && rbtn_chain_head.head) + ret = 0; + else + ret = rbtn_rfkill_init(device); + break; + default: + ret = -EINVAL; + } + + return ret; + +} + +static int rbtn_remove(struct acpi_device *device) +{ + struct rbtn_data *rbtn_data = device->driver_data; + + switch (rbtn_data->type) { + case RBTN_TOGGLE: + rbtn_input_exit(rbtn_data); + break; + case RBTN_SLIDER: + rbtn_rfkill_exit(device); + break; + default: + break; + } + + rbtn_acquire(device, false); + device->driver_data = NULL; + + return 0; +} + +static void rbtn_notify(struct acpi_device *device, u32 event) +{ + struct rbtn_data *rbtn_data = device->driver_data; + + if (event != 0x80) { + dev_info(&device->dev, "Received unknown event (0x%x)\n", + event); + return; + } + + switch (rbtn_data->type) { + case RBTN_TOGGLE: + rbtn_input_event(rbtn_data); + break; + case RBTN_SLIDER: + rbtn_rfkill_event(device); + atomic_notifier_call_chain(&rbtn_chain_head, event, device); + break; + default: + break; + } +} + + +/* + * module functions + */ + +module_acpi_driver(rbtn_driver); + +module_param(auto_remove_rfkill, bool, 0444); + +MODULE_PARM_DESC(auto_remove_rfkill, "Automatically remove rfkill devices when " + "other modules start receiving events " + "from this module and re-add them when " + "the last module stops receiving events " + "(default true)"); +MODULE_DEVICE_TABLE(acpi, rbtn_ids); +MODULE_DESCRIPTION("Dell Airplane Mode Switch driver"); +MODULE_AUTHOR("Pali Rohár <[email protected]>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-rbtn.h b/drivers/platform/x86/dell-rbtn.h new file mode 100644 index 000000000000..c59cc6b8ec2b --- /dev/null +++ b/drivers/platform/x86/dell-rbtn.h @@ -0,0 +1,24 @@ +/* + Dell Airplane Mode Switch driver + Copyright (C) 2014-2015 Pali Rohár <[email protected]> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. +*/ + +#ifndef _DELL_RBTN_H_ +#define _DELL_RBTN_H_ + +struct notifier_block; + +int dell_rbtn_notifier_register(struct notifier_block *nb); +int dell_rbtn_notifier_unregister(struct notifier_block *nb); + +#endif diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index bea022830944..76b57388d01b 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -465,8 +465,9 @@ static const struct ideapad_rfk_data ideapad_rfk_data[] = { static int ideapad_rfk_set(void *data, bool blocked) { struct ideapad_rfk_priv *priv = data; + int opcode = ideapad_rfk_data[priv->dev].opcode; - return write_ec_cmd(priv->priv->adev->handle, priv->dev, !blocked); + return write_ec_cmd(priv->priv->adev->handle, opcode, !blocked); } static struct rfkill_ops ideapad_rfk_ops = { @@ -838,6 +839,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { }, }, { + .ident = "Lenovo G50-30", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G50-30"), + }, + }, + { .ident = "Lenovo Yoga 2 11 / 13 / Pro", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c index 073a90a63dbc..fd86daba7ffd 100644 --- a/drivers/platform/x86/pvpanic.c +++ b/drivers/platform/x86/pvpanic.c @@ -92,13 +92,13 @@ pvpanic_walk_resources(struct acpi_resource *res, void *context) static int pvpanic_add(struct acpi_device *device) { - acpi_status status; - u64 ret; + int ret; - status = acpi_evaluate_integer(device->handle, "_STA", NULL, - &ret); + ret = acpi_bus_get_status(device); + if (ret < 0) + return ret; - if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B) + if (!device->status.enabled || !device->status.functional) return -ENODEV; acpi_walk_resources(device->handle, METHOD_NAME__CRS, diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 59bf27ed72d6..3ad7b1fa24ce 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -31,7 +31,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define TOSHIBA_ACPI_VERSION "0.21" +#define TOSHIBA_ACPI_VERSION "0.22" #define PROC_INTERFACE_VERSION 1 #include <linux/kernel.h> @@ -41,7 +41,6 @@ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/backlight.h> -#include <linux/rfkill.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/leds.h> @@ -82,7 +81,7 @@ MODULE_LICENSE("GPL"); #define TCI_WORDS 6 -/* operations */ +/* Operations */ #define HCI_SET 0xff00 #define HCI_GET 0xfe00 #define SCI_OPEN 0xf100 @@ -90,7 +89,7 @@ MODULE_LICENSE("GPL"); #define SCI_GET 0xf300 #define SCI_SET 0xf400 -/* return codes */ +/* Return codes */ #define TOS_SUCCESS 0x0000 #define TOS_OPEN_CLOSE_OK 0x0044 #define TOS_FAILURE 0x1000 @@ -105,7 +104,7 @@ MODULE_LICENSE("GPL"); #define TOS_NOT_INITIALIZED 0x8d50 #define TOS_NOT_INSTALLED 0x8e00 -/* registers */ +/* Registers */ #define HCI_FAN 0x0004 #define HCI_TR_BACKLIGHT 0x0005 #define HCI_SYSTEM_EVENT 0x0016 @@ -127,7 +126,7 @@ MODULE_LICENSE("GPL"); #define SCI_TOUCHPAD 0x050e #define SCI_KBD_FUNCTION_KEYS 0x0522 -/* field definitions */ +/* Field definitions */ #define HCI_ACCEL_MASK 0x7fff #define HCI_HOTKEY_DISABLE 0x0b #define HCI_HOTKEY_ENABLE 0x09 @@ -165,7 +164,6 @@ MODULE_LICENSE("GPL"); struct toshiba_acpi_dev { struct acpi_device *acpi_dev; const char *method_hci; - struct rfkill *bt_rfk; struct input_dev *hotkey_dev; struct work_struct hotkey_work; struct backlight_device *backlight_dev; @@ -202,8 +200,6 @@ struct toshiba_acpi_dev { unsigned int panel_power_on_supported:1; unsigned int usb_three_supported:1; unsigned int sysfs_created:1; - - struct mutex mutex; }; static struct toshiba_acpi_dev *toshiba_acpi; @@ -330,13 +326,13 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev, } /* - * Common hci tasks (get or set one or two value) + * Common hci tasks * * In addition to the ACPI status, the HCI system returns a result which * may be useful (such as "not supported"). */ -static u32 hci_write1(struct toshiba_acpi_dev *dev, u32 reg, u32 in1) +static u32 hci_write(struct toshiba_acpi_dev *dev, u32 reg, u32 in1) { u32 in[TCI_WORDS] = { HCI_SET, reg, in1, 0, 0, 0 }; u32 out[TCI_WORDS]; @@ -345,7 +341,7 @@ static u32 hci_write1(struct toshiba_acpi_dev *dev, u32 reg, u32 in1) return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE; } -static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1) +static u32 hci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1) { u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 }; u32 out[TCI_WORDS]; @@ -359,31 +355,6 @@ static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1) return out[0]; } -static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2) -{ - u32 in[TCI_WORDS] = { HCI_SET, reg, in1, in2, 0, 0 }; - u32 out[TCI_WORDS]; - acpi_status status = tci_raw(dev, in, out); - - return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE; -} - -static u32 hci_read2(struct toshiba_acpi_dev *dev, - u32 reg, u32 *out1, u32 *out2) -{ - u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 }; - u32 out[TCI_WORDS]; - acpi_status status = tci_raw(dev, in, out); - - if (ACPI_FAILURE(status)) - return TOS_FAILURE; - - *out1 = out[2]; - *out2 = out[3]; - - return out[0]; -} - /* * Common sci tasks */ @@ -395,7 +366,7 @@ static int sci_open(struct toshiba_acpi_dev *dev) acpi_status status; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to open SCI failed\n"); return 0; } @@ -433,7 +404,7 @@ static void sci_close(struct toshiba_acpi_dev *dev) acpi_status status; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to close SCI failed\n"); return; } @@ -481,7 +452,7 @@ static int toshiba_illumination_available(struct toshiba_acpi_dev *dev) status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to query Illumination support failed\n"); return 0; } else if (out[0] == TOS_NOT_SUPPORTED) { @@ -522,7 +493,7 @@ static enum led_brightness toshiba_illumination_get(struct led_classdev *cdev) struct toshiba_acpi_dev, led_dev); u32 state, result; - /* First request : initialize communication. */ + /* First request : initialize communication. */ if (!sci_open(dev)) return LED_OFF; @@ -625,7 +596,7 @@ static enum led_brightness toshiba_kbd_backlight_get(struct led_classdev *cdev) u32 state, result; /* Check the keyboard backlight state */ - result = hci_read1(dev, HCI_KBD_ILLUMINATION, &state); + result = hci_read(dev, HCI_KBD_ILLUMINATION, &state); if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) { pr_err("ACPI call to get the keyboard backlight failed\n"); return LED_OFF; @@ -646,7 +617,7 @@ static void toshiba_kbd_backlight_set(struct led_classdev *cdev, /* Set the keyboard backlight state */ state = brightness ? 1 : 0; - result = hci_write1(dev, HCI_KBD_ILLUMINATION, state); + result = hci_write(dev, HCI_KBD_ILLUMINATION, state); if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) { pr_err("ACPI call to set KBD Illumination mode failed\n"); return; @@ -703,7 +674,7 @@ static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) u32 out[TCI_WORDS]; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get ECO led failed\n"); } else if (out[0] == TOS_NOT_INSTALLED) { pr_info("ECO led not installed"); @@ -825,7 +796,7 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev) return; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get USB Sleep and Charge mode failed\n"); sci_close(dev); return; @@ -839,7 +810,7 @@ static void toshiba_usb_sleep_charge_available(struct toshiba_acpi_dev *dev) in[5] = SCI_USB_CHARGE_BAT_LVL; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get USB Sleep and Charge mode failed\n"); sci_close(dev); return; @@ -919,7 +890,7 @@ static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev, in[5] = SCI_USB_CHARGE_BAT_LVL; status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get USB S&C battery level failed\n"); return -EIO; } else if (out[0] == TOS_NOT_SUPPORTED) { @@ -948,7 +919,7 @@ static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev, in[5] = SCI_USB_CHARGE_BAT_LVL; status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to set USB S&C battery level failed\n"); return -EIO; } else if (out[0] == TOS_NOT_SUPPORTED) { @@ -974,7 +945,7 @@ static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev, in[5] = SCI_USB_CHARGE_RAPID_DSP; status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get USB Rapid Charge failed\n"); return -EIO; } else if (out[0] == TOS_NOT_SUPPORTED || @@ -1002,7 +973,7 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev, in[5] = SCI_USB_CHARGE_RAPID_DSP; status = tci_raw(dev, in, out); sci_close(dev); - if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + if (ACPI_FAILURE(status)) { pr_err("ACPI call to set USB Rapid Charge failed\n"); return -EIO; } else if (out[0] == TOS_NOT_SUPPORTED) { @@ -1194,121 +1165,31 @@ static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state) static int toshiba_hotkey_event_type_get(struct toshiba_acpi_dev *dev, u32 *type) { - u32 val1 = 0x03; - u32 val2 = 0; - u32 result; + u32 in[TCI_WORDS] = { HCI_GET, HCI_SYSTEM_INFO, 0x03, 0, 0, 0 }; + u32 out[TCI_WORDS]; + acpi_status status; - result = hci_read2(dev, HCI_SYSTEM_INFO, &val1, &val2); - if (result == TOS_FAILURE) { + status = tci_raw(dev, in, out); + if (ACPI_FAILURE(status)) { pr_err("ACPI call to get System type failed\n"); return -EIO; - } else if (result == TOS_NOT_SUPPORTED) { + } else if (out[0] == TOS_NOT_SUPPORTED) { pr_info("System type not supported\n"); return -ENODEV; } - *type = val2; + *type = out[3]; return 0; } -/* Bluetooth rfkill handlers */ - -static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) -{ - u32 hci_result; - u32 value, value2; - - value = 0; - value2 = 0; - hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2); - if (hci_result == TOS_SUCCESS) - *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false; - - return hci_result; -} - -static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state) -{ - u32 hci_result; - u32 value, value2; - - value = 0; - value2 = 0x0001; - hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2); - - *radio_state = value & HCI_WIRELESS_KILL_SWITCH; - return hci_result; -} - -static int bt_rfkill_set_block(void *data, bool blocked) -{ - struct toshiba_acpi_dev *dev = data; - u32 result1, result2; - u32 value; - int err; - bool radio_state; - - value = (blocked == false); - - mutex_lock(&dev->mutex); - if (hci_get_radio_state(dev, &radio_state) != TOS_SUCCESS) { - err = -EIO; - goto out; - } - - if (!radio_state) { - err = 0; - goto out; - } - - result1 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER); - result2 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH); - - if (result1 != TOS_SUCCESS || result2 != TOS_SUCCESS) - err = -EIO; - else - err = 0; - out: - mutex_unlock(&dev->mutex); - return err; -} - -static void bt_rfkill_poll(struct rfkill *rfkill, void *data) -{ - bool new_rfk_state; - bool value; - u32 hci_result; - struct toshiba_acpi_dev *dev = data; - - mutex_lock(&dev->mutex); - - hci_result = hci_get_radio_state(dev, &value); - if (hci_result != TOS_SUCCESS) { - /* Can't do anything useful */ - mutex_unlock(&dev->mutex); - return; - } - - new_rfk_state = value; - - mutex_unlock(&dev->mutex); - - if (rfkill_set_hw_state(rfkill, !new_rfk_state)) - bt_rfkill_set_block(data, true); -} - -static const struct rfkill_ops toshiba_rfk_ops = { - .set_block = bt_rfkill_set_block, - .poll = bt_rfkill_poll, -}; - +/* Transflective Backlight */ static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled) { u32 hci_result; u32 status; - hci_result = hci_read1(dev, HCI_TR_BACKLIGHT, &status); + hci_result = hci_read(dev, HCI_TR_BACKLIGHT, &status); *enabled = !status; return hci_result == TOS_SUCCESS ? 0 : -EIO; } @@ -1318,12 +1199,13 @@ static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable) u32 hci_result; u32 value = !enable; - hci_result = hci_write1(dev, HCI_TR_BACKLIGHT, value); + hci_result = hci_write(dev, HCI_TR_BACKLIGHT, value); return hci_result == TOS_SUCCESS ? 0 : -EIO; } -static struct proc_dir_entry *toshiba_proc_dir /*= 0*/; +static struct proc_dir_entry *toshiba_proc_dir; +/* LCD Brightness */ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) { u32 hci_result; @@ -1341,7 +1223,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) brightness++; } - hci_result = hci_read1(dev, HCI_LCD_BRIGHTNESS, &value); + hci_result = hci_read(dev, HCI_LCD_BRIGHTNESS, &value); if (hci_result == TOS_SUCCESS) return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT); @@ -1396,7 +1278,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value) } value = value << HCI_LCD_BRIGHTNESS_SHIFT; - hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value); + hci_result = hci_write(dev, HCI_LCD_BRIGHTNESS, value); return hci_result == TOS_SUCCESS ? 0 : -EIO; } @@ -1446,7 +1328,7 @@ static int get_video_status(struct toshiba_acpi_dev *dev, u32 *status) { u32 hci_result; - hci_result = hci_read1(dev, HCI_VIDEO_OUT, status); + hci_result = hci_read(dev, HCI_VIDEO_OUT, status); return hci_result == TOS_SUCCESS ? 0 : -EIO; } @@ -1531,7 +1413,8 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out); /* * To avoid unnecessary video disruption, only write the new - * video setting if something changed. */ + * video setting if something changed. + */ if (new_video_out != video_out) ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out); } @@ -1552,7 +1435,7 @@ static int get_fan_status(struct toshiba_acpi_dev *dev, u32 *status) { u32 hci_result; - hci_result = hci_read1(dev, HCI_FAN, status); + hci_result = hci_read(dev, HCI_FAN, status); return hci_result == TOS_SUCCESS ? 0 : -EIO; } @@ -1592,7 +1475,7 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf, if (sscanf(cmd, " force_on : %i", &value) == 1 && value >= 0 && value <= 1) { - hci_result = hci_write1(dev, HCI_FAN, value); + hci_result = hci_write(dev, HCI_FAN, value); if (hci_result == TOS_SUCCESS) dev->force_fan = value; else @@ -1620,7 +1503,7 @@ static int keys_proc_show(struct seq_file *m, void *v) u32 value; if (!dev->key_event_valid && dev->system_event_supported) { - hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value); + hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value); if (hci_result == TOS_SUCCESS) { dev->key_event_valid = 1; dev->last_key_event = value; @@ -1632,7 +1515,7 @@ static int keys_proc_show(struct seq_file *m, void *v) * some machines where system events sporadically * become disabled. */ - hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1); + hci_result = hci_write(dev, HCI_SYSTEM_EVENT, 1); pr_notice("Re-enabled hotkeys\n"); } else { pr_err("Error reading hotkey status\n"); @@ -1769,7 +1652,7 @@ static ssize_t fan_store(struct device *dev, if (state != 0 && state != 1) return -EINVAL; - result = hci_write1(toshiba, HCI_FAN, state); + result = hci_write(toshiba, HCI_FAN, state); if (result == TOS_FAILURE) return -EIO; else if (result == TOS_NOT_SUPPORTED) @@ -2391,7 +2274,7 @@ static int toshiba_acpi_enable_hotkeys(struct toshiba_acpi_dev *dev) if (ACPI_FAILURE(status)) return -ENODEV; - result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE); + result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE); if (result == TOS_FAILURE) return -EIO; else if (result == TOS_NOT_SUPPORTED) @@ -2408,8 +2291,8 @@ static void toshiba_acpi_enable_special_functions(struct toshiba_acpi_dev *dev) * Re-activate the hotkeys, but this time, we are using the * "Special Functions" mode. */ - result = hci_write1(dev, HCI_HOTKEY_EVENT, - HCI_HOTKEY_SPECIAL_FUNCTIONS); + result = hci_write(dev, HCI_HOTKEY_EVENT, + HCI_HOTKEY_SPECIAL_FUNCTIONS); if (result != TOS_SUCCESS) pr_err("Could not enable the Special Function mode\n"); } @@ -2490,7 +2373,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev) toshiba_acpi_report_hotkey(dev, scancode); } else if (dev->system_event_supported) { do { - hci_result = hci_read1(dev, HCI_SYSTEM_EVENT, &value); + hci_result = hci_read(dev, HCI_SYSTEM_EVENT, &value); switch (hci_result) { case TOS_SUCCESS: toshiba_acpi_report_hotkey(dev, (int)value); @@ -2502,7 +2385,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev) * sporadically become disabled. */ hci_result = - hci_write1(dev, HCI_SYSTEM_EVENT, 1); + hci_write(dev, HCI_SYSTEM_EVENT, 1); pr_notice("Re-enabled hotkeys\n"); /* Fall through */ default: @@ -2579,7 +2462,7 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev) if (acpi_has_method(dev->acpi_dev->handle, "INFO")) dev->info_supported = 1; else { - hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1); + hci_result = hci_write(dev, HCI_SYSTEM_EVENT, 1); if (hci_result == TOS_SUCCESS) dev->system_event_supported = 1; } @@ -2689,11 +2572,6 @@ static int toshiba_acpi_remove(struct acpi_device *acpi_dev) sparse_keymap_free(dev->hotkey_dev); } - if (dev->bt_rfk) { - rfkill_unregister(dev->bt_rfk); - rfkill_destroy(dev->bt_rfk); - } - backlight_device_unregister(dev->backlight_dev); if (dev->illumination_supported) @@ -2730,7 +2608,6 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) const char *hci_method; u32 special_functions; u32 dummy; - bool bt_present; int ret = 0; if (toshiba_acpi) @@ -2766,33 +2643,10 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) if (toshiba_acpi_setup_keyboard(dev)) pr_info("Unable to activate hotkeys\n"); - mutex_init(&dev->mutex); - ret = toshiba_acpi_setup_backlight(dev); if (ret) goto error; - /* Register rfkill switch for Bluetooth */ - if (hci_get_bt_present(dev, &bt_present) == TOS_SUCCESS && bt_present) { - dev->bt_rfk = rfkill_alloc("Toshiba Bluetooth", - &acpi_dev->dev, - RFKILL_TYPE_BLUETOOTH, - &toshiba_rfk_ops, - dev); - if (!dev->bt_rfk) { - pr_err("unable to allocate rfkill device\n"); - ret = -ENOMEM; - goto error; - } - - ret = rfkill_register(dev->bt_rfk); - if (ret) { - pr_err("unable to register rfkill device\n"); - rfkill_destroy(dev->bt_rfk); - goto error; - } - } - if (toshiba_illumination_available(dev)) { dev->led_dev.name = "toshiba::illumination"; dev->led_dev.max_brightness = 1; @@ -2930,7 +2784,7 @@ static int toshiba_acpi_suspend(struct device *device) u32 result; if (dev->hotkey_dev) - result = hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE); + result = hci_write(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_DISABLE); return 0; } diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c index 249800763362..c5e45089ac51 100644 --- a/drivers/platform/x86/toshiba_bluetooth.c +++ b/drivers/platform/x86/toshiba_bluetooth.c @@ -10,12 +10,6 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. - * - * Note the Toshiba Bluetooth RFKill switch seems to be a strange - * fish. It only provides a BT event when the switch is flipped to - * the 'on' position. When flipping it to 'off', the USB device is - * simply pulled away underneath us, without any BT event being - * delivered. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -25,6 +19,7 @@ #include <linux/init.h> #include <linux/types.h> #include <linux/acpi.h> +#include <linux/rfkill.h> #define BT_KILLSWITCH_MASK 0x01 #define BT_PLUGGED_MASK 0x40 @@ -34,6 +29,15 @@ MODULE_AUTHOR("Jes Sorensen <[email protected]>"); MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver"); MODULE_LICENSE("GPL"); +struct toshiba_bluetooth_dev { + struct acpi_device *acpi_dev; + struct rfkill *rfk; + + bool killswitch; + bool plugged; + bool powered; +}; + static int toshiba_bt_rfkill_add(struct acpi_device *device); static int toshiba_bt_rfkill_remove(struct acpi_device *device); static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event); @@ -95,41 +99,12 @@ static int toshiba_bluetooth_status(acpi_handle handle) return -ENXIO; } - pr_info("Bluetooth status %llu\n", status); - return status; } static int toshiba_bluetooth_enable(acpi_handle handle) { acpi_status result; - bool killswitch; - bool powered; - bool plugged; - int status; - - /* - * Query ACPI to verify RFKill switch is set to 'on'. - * If not, we return silently, no need to report it as - * an error. - */ - status = toshiba_bluetooth_status(handle); - if (status < 0) - return status; - - killswitch = (status & BT_KILLSWITCH_MASK) ? true : false; - powered = (status & BT_POWER_MASK) ? true : false; - plugged = (status & BT_PLUGGED_MASK) ? true : false; - - if (!killswitch) - return 0; - /* - * This check ensures to only enable the device if it is powered - * off or detached, as some recent devices somehow pass the killswitch - * test, causing a loop enabling/disabling the device, see bug 93911. - */ - if (powered || plugged) - return 0; result = acpi_evaluate_object(handle, "AUSB", NULL, NULL); if (ACPI_FAILURE(result)) { @@ -165,20 +140,102 @@ static int toshiba_bluetooth_disable(acpi_handle handle) return 0; } +/* Helper function */ +static int toshiba_bluetooth_sync_status(struct toshiba_bluetooth_dev *bt_dev) +{ + int status; + + status = toshiba_bluetooth_status(bt_dev->acpi_dev->handle); + if (status < 0) { + pr_err("Could not sync bluetooth device status\n"); + return status; + } + + bt_dev->killswitch = (status & BT_KILLSWITCH_MASK) ? true : false; + bt_dev->plugged = (status & BT_PLUGGED_MASK) ? true : false; + bt_dev->powered = (status & BT_POWER_MASK) ? true : false; + + pr_debug("Bluetooth status %d killswitch %d plugged %d powered %d\n", + status, bt_dev->killswitch, bt_dev->plugged, bt_dev->powered); + + return 0; +} + +/* RFKill handlers */ +static int bt_rfkill_set_block(void *data, bool blocked) +{ + struct toshiba_bluetooth_dev *bt_dev = data; + int ret; + + ret = toshiba_bluetooth_sync_status(bt_dev); + if (ret) + return ret; + + if (!bt_dev->killswitch) + return 0; + + if (blocked) + ret = toshiba_bluetooth_disable(bt_dev->acpi_dev->handle); + else + ret = toshiba_bluetooth_enable(bt_dev->acpi_dev->handle); + + return ret; +} + +static void bt_rfkill_poll(struct rfkill *rfkill, void *data) +{ + struct toshiba_bluetooth_dev *bt_dev = data; + + if (toshiba_bluetooth_sync_status(bt_dev)) + return; + + /* + * Note the Toshiba Bluetooth RFKill switch seems to be a strange + * fish. It only provides a BT event when the switch is flipped to + * the 'on' position. When flipping it to 'off', the USB device is + * simply pulled away underneath us, without any BT event being + * delivered. + */ + rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); +} + +static const struct rfkill_ops rfk_ops = { + .set_block = bt_rfkill_set_block, + .poll = bt_rfkill_poll, +}; + +/* ACPI driver functions */ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) { - toshiba_bluetooth_enable(device->handle); + struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device); + + if (toshiba_bluetooth_sync_status(bt_dev)) + return; + + rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); } #ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev) { - return toshiba_bluetooth_enable(to_acpi_device(dev)->handle); + struct toshiba_bluetooth_dev *bt_dev; + int ret; + + bt_dev = acpi_driver_data(to_acpi_device(dev)); + + ret = toshiba_bluetooth_sync_status(bt_dev); + if (ret) + return ret; + + rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); + + return 0; } #endif static int toshiba_bt_rfkill_add(struct acpi_device *device) { + struct toshiba_bluetooth_dev *bt_dev; int result; result = toshiba_bluetooth_present(device->handle); @@ -187,17 +244,54 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device) pr_info("Toshiba ACPI Bluetooth device driver\n"); - /* Enable the BT device */ - result = toshiba_bluetooth_enable(device->handle); - if (result) + bt_dev = kzalloc(sizeof(*bt_dev), GFP_KERNEL); + if (!bt_dev) + return -ENOMEM; + bt_dev->acpi_dev = device; + device->driver_data = bt_dev; + dev_set_drvdata(&device->dev, bt_dev); + + result = toshiba_bluetooth_sync_status(bt_dev); + if (result) { + kfree(bt_dev); return result; + } + + bt_dev->rfk = rfkill_alloc("Toshiba Bluetooth", + &device->dev, + RFKILL_TYPE_BLUETOOTH, + &rfk_ops, + bt_dev); + if (!bt_dev->rfk) { + pr_err("Unable to allocate rfkill device\n"); + kfree(bt_dev); + return -ENOMEM; + } + + rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); + + result = rfkill_register(bt_dev->rfk); + if (result) { + pr_err("Unable to register rfkill device\n"); + rfkill_destroy(bt_dev->rfk); + kfree(bt_dev); + } return result; } static int toshiba_bt_rfkill_remove(struct acpi_device *device) { + struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device); + /* clean up */ + if (bt_dev->rfk) { + rfkill_unregister(bt_dev->rfk); + rfkill_destroy(bt_dev->rfk); + } + + kfree(bt_dev); + return toshiba_bluetooth_disable(device->handle); } diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c index 65300b6a84b9..7f2afc6b5eb9 100644 --- a/drivers/platform/x86/toshiba_haps.c +++ b/drivers/platform/x86/toshiba_haps.c @@ -78,15 +78,20 @@ static ssize_t protection_level_store(struct device *dev, const char *buf, size_t count) { struct toshiba_haps_dev *haps = dev_get_drvdata(dev); - int level, ret; - - if (sscanf(buf, "%d", &level) != 1 || level < 0 || level > 3) - return -EINVAL; + int level; + int ret; - /* Set the sensor level. - * Acceptable levels are: + ret = kstrtoint(buf, 0, &level); + if (ret) + return ret; + /* + * Check for supported levels, which can be: * 0 - Disabled | 1 - Low | 2 - Medium | 3 - High */ + if (level < 0 || level > 3) + return -EINVAL; + + /* Set the sensor level */ ret = toshiba_haps_protection_level(haps->acpi_dev->handle, level); if (ret != 0) return ret; @@ -95,15 +100,21 @@ static ssize_t protection_level_store(struct device *dev, return count; } +static DEVICE_ATTR_RW(protection_level); static ssize_t reset_protection_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct toshiba_haps_dev *haps = dev_get_drvdata(dev); - int reset, ret; + int reset; + int ret; - if (sscanf(buf, "%d", &reset) != 1 || reset != 1) + ret = kstrtoint(buf, 0, &reset); + if (ret) + return ret; + /* The only accepted value is 1 */ + if (reset != 1) return -EINVAL; /* Reset the protection interface */ @@ -113,10 +124,7 @@ static ssize_t reset_protection_store(struct device *dev, return count; } - -static DEVICE_ATTR(protection_level, S_IRUGO | S_IWUSR, - protection_level_show, protection_level_store); -static DEVICE_ATTR(reset_protection, S_IWUSR, NULL, reset_protection_store); +static DEVICE_ATTR_WO(reset_protection); static struct attribute *haps_attributes[] = { &dev_attr_protection_level.attr, diff --git a/drivers/power/reset/syscon-reboot.c b/drivers/power/reset/syscon-reboot.c index d3c7d245ae63..7d0d269a0837 100644 --- a/drivers/power/reset/syscon-reboot.c +++ b/drivers/power/reset/syscon-reboot.c @@ -88,4 +88,4 @@ static struct platform_driver syscon_reboot_driver = { .of_match_table = syscon_reboot_of_match, }, }; -module_platform_driver(syscon_reboot_driver); +builtin_platform_driver(syscon_reboot_driver); diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802.c index 6af41abccacb..c07ee13bd470 100644 --- a/drivers/regulator/max77802.c +++ b/drivers/regulator/max77802.c @@ -27,6 +27,7 @@ #include <linux/gpio.h> #include <linux/slab.h> #include <linux/gpio/consumer.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index b3e5bd1d5d9c..9842301f7980 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -685,10 +685,7 @@ static inline void *cxgbi_alloc_big_mem(unsigned int size, static inline void cxgbi_free_big_mem(void *addr) { - if (is_vmalloc_addr(addr)) - vfree(addr); - else - kfree(addr); + kvfree(addr); } static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr) diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1f8e2dc9c616..30268bb2ddb6 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2363,17 +2363,13 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) u64 block, rest = 0; struct scsi_data_buffer *sdb; enum dma_data_direction dir; - size_t (*func)(struct scatterlist *, unsigned int, void *, size_t, - off_t); if (do_write) { sdb = scsi_out(scmd); dir = DMA_TO_DEVICE; - func = sg_pcopy_to_buffer; } else { sdb = scsi_in(scmd); dir = DMA_FROM_DEVICE; - func = sg_pcopy_from_buffer; } if (!sdb->length) @@ -2385,16 +2381,16 @@ do_device_access(struct scsi_cmnd *scmd, u64 lba, u32 num, bool do_write) if (block + num > sdebug_store_sectors) rest = block + num - sdebug_store_sectors; - ret = func(sdb->table.sgl, sdb->table.nents, + ret = sg_copy_buffer(sdb->table.sgl, sdb->table.nents, fake_storep + (block * scsi_debug_sector_size), - (num - rest) * scsi_debug_sector_size, 0); + (num - rest) * scsi_debug_sector_size, 0, do_write); if (ret != (num - rest) * scsi_debug_sector_size) return ret; if (rest) { - ret += func(sdb->table.sgl, sdb->table.nents, + ret += sg_copy_buffer(sdb->table.sgl, sdb->table.nents, fake_storep, rest * scsi_debug_sector_size, - (num - rest) * scsi_debug_sector_size); + (num - rest) * scsi_debug_sector_size, do_write); } return ret; diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index 81f22980b2de..156b790072b4 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -366,8 +366,9 @@ int __init register_intc_controller(struct intc_desc *desc) /* redirect this interrupts to the first one */ irq_set_chip(irq2, &dummy_irq_chip); - irq_set_chained_handler(irq2, intc_redirect_irq); - irq_set_handler_data(irq2, (void *)irq); + irq_set_chained_handler_and_data(irq2, + intc_redirect_irq, + (void *)irq); } } diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c index f30ac9354ff2..f5f1b821241a 100644 --- a/drivers/sh/intc/virq.c +++ b/drivers/sh/intc/virq.c @@ -243,8 +243,9 @@ restart: */ irq_set_nothread(irq); - irq_set_chained_handler(entry->pirq, intc_virq_handler); + /* Set handler data before installing the handler */ add_virq_to_pirq(entry->pirq, irq); + irq_set_chained_handler(entry->pirq, intc_virq_handler); radix_tree_tag_clear(&d->tree, entry->enum_id, INTC_TAG_VIRQ_NEEDS_ALLOC); diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c index b562af816c0a..b04b05a0904e 100644 --- a/drivers/soc/qcom/spm.c +++ b/drivers/soc/qcom/spm.c @@ -260,7 +260,7 @@ static int __init qcom_cpuidle_init(struct device_node *cpu_node, int cpu) /* We have atleast one power down mode */ cpumask_clear(&mask); cpumask_set_cpu(cpu, &mask); - qcom_scm_set_warm_boot_addr(cpu_resume, &mask); + qcom_scm_set_warm_boot_addr(cpu_resume_arm, &mask); } per_cpu(qcom_idle_ops, cpu) = fns; diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index cc119d15dd16..75d0457a77b7 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -1021,7 +1021,7 @@ static struct platform_driver tegra_pmc_driver = { }, .probe = tegra_pmc_probe, }; -module_platform_driver(tegra_pmc_driver); +builtin_platform_driver(tegra_pmc_driver); /* * Early initialization to allow access to registers in the very early boot diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c index 1a07bf540fec..e642c4540dda 100644 --- a/drivers/soc/versatile/soc-realview.c +++ b/drivers/soc/versatile/soc-realview.c @@ -142,4 +142,4 @@ static struct platform_driver realview_soc_driver = { .of_match_table = realview_soc_of_match, }, }; -module_platform_driver(realview_soc_driver); +builtin_platform_driver(realview_soc_driver); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 0b4e24217564..cd3bfc16d25f 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -279,10 +279,7 @@ int transport_alloc_session_tags(struct se_session *se_sess, if (rc < 0) { pr_err("Unable to init se_sess->sess_tag_pool," " tag_num: %u\n", tag_num); - if (is_vmalloc_addr(se_sess->sess_cmd_map)) - vfree(se_sess->sess_cmd_map); - else - kfree(se_sess->sess_cmd_map); + kvfree(se_sess->sess_cmd_map); se_sess->sess_cmd_map = NULL; return -ENOMEM; } @@ -489,10 +486,7 @@ void transport_free_session(struct se_session *se_sess) { if (se_sess->sess_cmd_map) { percpu_ida_destroy(&se_sess->sess_tag_pool); - if (is_vmalloc_addr(se_sess->sess_cmd_map)) - vfree(se_sess->sess_cmd_map); - else - kfree(se_sess->sess_cmd_map); + kvfree(se_sess->sess_cmd_map); } kmem_cache_free(se_sess_cache, se_sess); } diff --git a/drivers/tty/metag_da.c b/drivers/tty/metag_da.c index 3774600741d8..9325262289f9 100644 --- a/drivers/tty/metag_da.c +++ b/drivers/tty/metag_da.c @@ -640,25 +640,7 @@ err_destroy_ports: put_tty_driver(channel_driver); return ret; } - -static void dashtty_exit(void) -{ - int nport; - struct dashtty_port *dport; - - del_timer_sync(&put_timer); - kthread_stop(dashtty_thread); - del_timer_sync(&poll_timer); - tty_unregister_driver(channel_driver); - for (nport = 0; nport < NUM_TTY_CHANNELS; nport++) { - dport = &dashtty_ports[nport]; - tty_port_destroy(&dport->port); - } - put_tty_driver(channel_driver); -} - -module_init(dashtty_init); -module_exit(dashtty_exit); +device_initcall(dashtty_init); #ifdef CONFIG_DA_CONSOLE diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 978204333c94..d75a66c72750 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -22,6 +22,7 @@ #include <linux/pm_runtime.h> #include <linux/console.h> #include <linux/pm_qos.h> +#include <linux/pm_wakeirq.h> #include <linux/dma-mapping.h> #include "8250.h" @@ -552,17 +553,6 @@ static void omap8250_uart_qos_work(struct work_struct *work) pm_qos_update_request(&priv->pm_qos_request, priv->latency); } -static irqreturn_t omap_wake_irq(int irq, void *dev_id) -{ - struct uart_port *port = dev_id; - int ret; - - ret = port->handle_irq(port); - if (ret) - return IRQ_HANDLED; - return IRQ_NONE; -} - #ifdef CONFIG_SERIAL_8250_DMA static int omap_8250_dma_handle_irq(struct uart_port *port); #endif @@ -596,11 +586,9 @@ static int omap_8250_startup(struct uart_port *port) int ret; if (priv->wakeirq) { - ret = request_irq(priv->wakeirq, omap_wake_irq, - port->irqflags, "uart wakeup irq", port); + ret = dev_pm_set_dedicated_wake_irq(port->dev, priv->wakeirq); if (ret) return ret; - disable_irq(priv->wakeirq); } pm_runtime_get_sync(port->dev); @@ -649,8 +637,7 @@ static int omap_8250_startup(struct uart_port *port) err: pm_runtime_mark_last_busy(port->dev); pm_runtime_put_autosuspend(port->dev); - if (priv->wakeirq) - free_irq(priv->wakeirq, port); + dev_pm_clear_wake_irq(port->dev); return ret; } @@ -682,10 +669,8 @@ static void omap_8250_shutdown(struct uart_port *port) pm_runtime_mark_last_busy(port->dev); pm_runtime_put_autosuspend(port->dev); - free_irq(port->irq, port); - if (priv->wakeirq) - free_irq(priv->wakeirq, port); + dev_pm_clear_wake_irq(port->dev); } static void omap_8250_throttle(struct uart_port *port) @@ -1226,31 +1211,6 @@ static int omap8250_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM - -static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv, - bool enable) -{ - if (!priv->wakeirq) - return; - - if (enable) - enable_irq(priv->wakeirq); - else - disable_irq_nosync(priv->wakeirq); -} - -static void omap8250_enable_wakeup(struct omap8250_priv *priv, - bool enable) -{ - if (enable == priv->wakeups_enabled) - return; - - omap8250_enable_wakeirq(priv, enable); - priv->wakeups_enabled = enable; -} -#endif - #ifdef CONFIG_PM_SLEEP static int omap8250_prepare(struct device *dev) { @@ -1277,11 +1237,6 @@ static int omap8250_suspend(struct device *dev) serial8250_suspend_port(priv->line); flush_work(&priv->qos_work); - - if (device_may_wakeup(dev)) - omap8250_enable_wakeup(priv, true); - else - omap8250_enable_wakeup(priv, false); return 0; } @@ -1289,9 +1244,6 @@ static int omap8250_resume(struct device *dev) { struct omap8250_priv *priv = dev_get_drvdata(dev); - if (device_may_wakeup(dev)) - omap8250_enable_wakeup(priv, false); - serial8250_resume_port(priv->line); return 0; } @@ -1333,7 +1285,6 @@ static int omap8250_runtime_suspend(struct device *dev) return -EBUSY; } - omap8250_enable_wakeup(priv, true); if (up->dma) omap_8250_rx_dma(up, UART_IIR_RX_TIMEOUT); @@ -1354,7 +1305,6 @@ static int omap8250_runtime_resume(struct device *dev) return 0; up = serial8250_get_port(priv->line); - omap8250_enable_wakeup(priv, false); loss_cntx = omap8250_lost_context(up); if (loss_cntx) diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 7f49172ccd86..7a2172b5e93c 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -38,6 +38,7 @@ #include <linux/serial_core.h> #include <linux/irq.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeirq.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/gpio.h> @@ -160,7 +161,6 @@ struct uart_omap_port { unsigned long port_activity; int context_loss_cnt; u32 errata; - u8 wakeups_enabled; u32 features; int rts_gpio; @@ -209,28 +209,11 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up) return pdata->get_context_loss_count(up->dev); } -static inline void serial_omap_enable_wakeirq(struct uart_omap_port *up, - bool enable) -{ - if (!up->wakeirq) - return; - - if (enable) - enable_irq(up->wakeirq); - else - disable_irq_nosync(up->wakeirq); -} - +/* REVISIT: Remove this when omap3 boots in device tree only mode */ static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) { struct omap_uart_port_info *pdata = dev_get_platdata(up->dev); - if (enable == up->wakeups_enabled) - return; - - serial_omap_enable_wakeirq(up, enable); - up->wakeups_enabled = enable; - if (!pdata || !pdata->enable_wakeup) return; @@ -750,13 +733,11 @@ static int serial_omap_startup(struct uart_port *port) /* Optional wake-up IRQ */ if (up->wakeirq) { - retval = request_irq(up->wakeirq, serial_omap_irq, - up->port.irqflags, up->name, up); + retval = dev_pm_set_dedicated_wake_irq(up->dev, up->wakeirq); if (retval) { free_irq(up->port.irq, up); return retval; } - disable_irq(up->wakeirq); } dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line); @@ -845,8 +826,7 @@ static void serial_omap_shutdown(struct uart_port *port) pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); free_irq(up->port.irq, up); - if (up->wakeirq) - free_irq(up->wakeirq, up); + dev_pm_clear_wake_irq(up->dev); } static void serial_omap_uart_qos_work(struct work_struct *work) @@ -1139,13 +1119,6 @@ serial_omap_pm(struct uart_port *port, unsigned int state, serial_out(up, UART_EFR, efr); serial_out(up, UART_LCR, 0); - if (!device_may_wakeup(up->dev)) { - if (!state) - pm_runtime_forbid(up->dev); - else - pm_runtime_allow(up->dev); - } - pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); } diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 262647bbc614..241fafde42cb 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1,3 +1,4 @@ + # # Watchdog device configuration # @@ -96,6 +97,15 @@ config DA9063_WATCHDOG This driver can be built as a module. The module name is da9063_wdt. +config DA9062_WATCHDOG + tristate "Dialog DA9062 Watchdog" + depends on MFD_DA9062 + select WATCHDOG_CORE + help + Support for the watchdog in the DA9062 PMIC. + + This driver can be built as a module. The module name is da9062_wdt. + config GPIO_WATCHDOG tristate "Watchdog device controlled through GPIO-line" depends on OF_GPIO @@ -104,6 +114,17 @@ config GPIO_WATCHDOG If you say yes here you get support for watchdog device controlled through GPIO-line. +config GPIO_WATCHDOG_ARCH_INITCALL + bool "Register the watchdog as early as possible" + depends on GPIO_WATCHDOG=y + help + In some situations, the default initcall level (module_init) + in not early enough in the boot process to avoid the watchdog + to be triggered. + If you say yes here, the initcall level would be raised to + arch_initcall. + If in doubt, say N. + config MENF21BMC_WATCHDOG tristate "MEN 14F021P00 BMC Watchdog" depends on MFD_MENF21BMC @@ -169,6 +190,7 @@ config AT91SAM9X_WATCHDOG config CADENCE_WATCHDOG tristate "Cadence Watchdog Timer" + depends on HAS_IOMEM select WATCHDOG_CORE help Say Y here if you want to include support for the watchdog @@ -408,7 +430,7 @@ config TS72XX_WATCHDOG config MAX63XX_WATCHDOG tristate "Max63xx watchdog" - depends on ARM && HAS_IOMEM + depends on HAS_IOMEM select WATCHDOG_CORE help Support for memory mapped max63{69,70,71,72,73,74} watchdog timer. @@ -526,6 +548,16 @@ config MEDIATEK_WATCHDOG To compile this driver as a module, choose M here: the module will be called mtk_wdt. +config DIGICOLOR_WATCHDOG + tristate "Conexant Digicolor SoCs watchdog support" + depends on ARCH_DIGICOLOR + select WATCHDOG_CORE + help + Say Y here to include support for the watchdog timer + in Conexant Digicolor SoCs. + To compile this driver as a module, choose M here: the + module will be called digicolor_wdt. + # AVR32 Architecture config AT32AP700X_WDT @@ -1355,7 +1387,7 @@ config BOOKE_WDT_DEFAULT_TIMEOUT config MEN_A21_WDT tristate "MEN A21 VME CPU Carrier Board Watchdog Timer" select WATCHDOG_CORE - depends on GPIOLIB + depends on GPIOLIB || COMPILE_TEST help Watchdog driver for MEN A21 VMEbus CPU Carrier Boards. diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index d98768c7d928..59ea9a1b8e76 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o +obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o @@ -180,6 +181,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o # Architecture Independent obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o +obj-$(CONFIG_DA9062_WATCHDOG) += da9062_wdt.o obj-$(CONFIG_DA9063_WATCHDOG) += da9063_wdt.o obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c index 1443b3c391de..e4698f7c5f93 100644 --- a/drivers/watchdog/at91sam9_wdt.c +++ b/drivers/watchdog/at91sam9_wdt.c @@ -40,9 +40,9 @@ #define DRV_NAME "AT91SAM9 Watchdog" #define wdt_read(wdt, field) \ - __raw_readl((wdt)->base + (field)) + readl_relaxed((wdt)->base + (field)) #define wdt_write(wtd, field, val) \ - __raw_writel((val), (wdt)->base + (field)) + writel_relaxed((val), (wdt)->base + (field)) /* AT91SAM9 watchdog runs a 12bit counter @ 256Hz, * use this to convert a watchdog diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c new file mode 100644 index 000000000000..b3a870ce85be --- /dev/null +++ b/drivers/watchdog/da9062_wdt.c @@ -0,0 +1,253 @@ +/* + * da9062_wdt.c - WDT device driver for DA9062 + * Copyright (C) 2015 Dialog Semiconductor Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/watchdog.h> +#include <linux/platform_device.h> +#include <linux/uaccess.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/mfd/da9062/registers.h> +#include <linux/mfd/da9062/core.h> +#include <linux/regmap.h> +#include <linux/of.h> + +static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 }; +#define DA9062_TWDSCALE_DISABLE 0 +#define DA9062_TWDSCALE_MIN 1 +#define DA9062_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1) +#define DA9062_WDT_MIN_TIMEOUT wdt_timeout[DA9062_TWDSCALE_MIN] +#define DA9062_WDT_MAX_TIMEOUT wdt_timeout[DA9062_TWDSCALE_MAX] +#define DA9062_WDG_DEFAULT_TIMEOUT wdt_timeout[DA9062_TWDSCALE_MAX-1] +#define DA9062_RESET_PROTECTION_MS 300 + +struct da9062_watchdog { + struct da9062 *hw; + struct watchdog_device wdtdev; + unsigned long j_time_stamp; +}; + +static void da9062_set_window_start(struct da9062_watchdog *wdt) +{ + wdt->j_time_stamp = jiffies; +} + +static void da9062_apply_window_protection(struct da9062_watchdog *wdt) +{ + unsigned long delay = msecs_to_jiffies(DA9062_RESET_PROTECTION_MS); + unsigned long timeout = wdt->j_time_stamp + delay; + unsigned long now = jiffies; + unsigned int diff_ms; + + /* if time-limit has not elapsed then wait for remainder */ + if (time_before(now, timeout)) { + diff_ms = jiffies_to_msecs(timeout-now); + dev_dbg(wdt->hw->dev, + "Kicked too quickly. Delaying %u msecs\n", diff_ms); + msleep(diff_ms); + } +} + +static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs) +{ + unsigned int i; + + for (i = DA9062_TWDSCALE_MIN; i <= DA9062_TWDSCALE_MAX; i++) { + if (wdt_timeout[i] >= secs) + return i; + } + + return DA9062_TWDSCALE_MAX; +} + +static int da9062_reset_watchdog_timer(struct da9062_watchdog *wdt) +{ + int ret; + + da9062_apply_window_protection(wdt); + + ret = regmap_update_bits(wdt->hw->regmap, + DA9062AA_CONTROL_F, + DA9062AA_WATCHDOG_MASK, + DA9062AA_WATCHDOG_MASK); + + da9062_set_window_start(wdt); + + return ret; +} + +static int da9062_wdt_update_timeout_register(struct da9062_watchdog *wdt, + unsigned int regval) +{ + struct da9062 *chip = wdt->hw; + int ret; + + ret = da9062_reset_watchdog_timer(wdt); + if (ret) + return ret; + + return regmap_update_bits(chip->regmap, + DA9062AA_CONTROL_D, + DA9062AA_TWDSCALE_MASK, + regval); +} + +static int da9062_wdt_start(struct watchdog_device *wdd) +{ + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + unsigned int selector; + int ret; + + selector = da9062_wdt_timeout_to_sel(wdt->wdtdev.timeout); + ret = da9062_wdt_update_timeout_register(wdt, selector); + if (ret) + dev_err(wdt->hw->dev, "Watchdog failed to start (err = %d)\n", + ret); + + return ret; +} + +static int da9062_wdt_stop(struct watchdog_device *wdd) +{ + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + int ret; + + ret = da9062_reset_watchdog_timer(wdt); + if (ret) { + dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n", + ret); + return ret; + } + + ret = regmap_update_bits(wdt->hw->regmap, + DA9062AA_CONTROL_D, + DA9062AA_TWDSCALE_MASK, + DA9062_TWDSCALE_DISABLE); + if (ret) + dev_err(wdt->hw->dev, "Watchdog failed to stop (err = %d)\n", + ret); + + return ret; +} + +static int da9062_wdt_ping(struct watchdog_device *wdd) +{ + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + int ret; + + ret = da9062_reset_watchdog_timer(wdt); + if (ret) + dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n", + ret); + + return ret; +} + +static int da9062_wdt_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd); + unsigned int selector; + int ret; + + selector = da9062_wdt_timeout_to_sel(timeout); + ret = da9062_wdt_update_timeout_register(wdt, selector); + if (ret) + dev_err(wdt->hw->dev, "Failed to set watchdog timeout (err = %d)\n", + ret); + else + wdd->timeout = wdt_timeout[selector]; + + return ret; +} + +static const struct watchdog_info da9062_watchdog_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .identity = "DA9062 WDT", +}; + +static const struct watchdog_ops da9062_watchdog_ops = { + .owner = THIS_MODULE, + .start = da9062_wdt_start, + .stop = da9062_wdt_stop, + .ping = da9062_wdt_ping, + .set_timeout = da9062_wdt_set_timeout, +}; + +static int da9062_wdt_probe(struct platform_device *pdev) +{ + int ret; + struct da9062 *chip; + struct da9062_watchdog *wdt; + + chip = dev_get_drvdata(pdev->dev.parent); + if (!chip) + return -EINVAL; + + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + wdt->hw = chip; + + wdt->wdtdev.info = &da9062_watchdog_info; + wdt->wdtdev.ops = &da9062_watchdog_ops; + wdt->wdtdev.min_timeout = DA9062_WDT_MIN_TIMEOUT; + wdt->wdtdev.max_timeout = DA9062_WDT_MAX_TIMEOUT; + wdt->wdtdev.timeout = DA9062_WDG_DEFAULT_TIMEOUT; + wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS; + + watchdog_set_drvdata(&wdt->wdtdev, wdt); + dev_set_drvdata(&pdev->dev, wdt); + + ret = watchdog_register_device(&wdt->wdtdev); + if (ret < 0) { + dev_err(wdt->hw->dev, + "watchdog registration failed (%d)\n", ret); + return ret; + } + + da9062_set_window_start(wdt); + + ret = da9062_wdt_ping(&wdt->wdtdev); + if (ret < 0) + watchdog_unregister_device(&wdt->wdtdev); + + return ret; +} + +static int da9062_wdt_remove(struct platform_device *pdev) +{ + struct da9062_watchdog *wdt = dev_get_drvdata(&pdev->dev); + + watchdog_unregister_device(&wdt->wdtdev); + return 0; +} + +static struct platform_driver da9062_wdt_driver = { + .probe = da9062_wdt_probe, + .remove = da9062_wdt_remove, + .driver = { + .name = "da9062-watchdog", + }, +}; +module_platform_driver(da9062_wdt_driver); + +MODULE_AUTHOR("S Twiss <[email protected]>"); +MODULE_DESCRIPTION("WDT device driver for Dialog DA9062"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:da9062-watchdog"); diff --git a/drivers/watchdog/digicolor_wdt.c b/drivers/watchdog/digicolor_wdt.c new file mode 100644 index 000000000000..31d8e4936611 --- /dev/null +++ b/drivers/watchdog/digicolor_wdt.c @@ -0,0 +1,205 @@ +/* + * Watchdog driver for Conexant Digicolor + * + * Copyright (C) 2015 Paradox Innovation Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/types.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/clk.h> +#include <linux/watchdog.h> +#include <linux/reboot.h> +#include <linux/platform_device.h> +#include <linux/of_address.h> + +#define TIMER_A_CONTROL 0 +#define TIMER_A_COUNT 4 + +#define TIMER_A_ENABLE_COUNT BIT(0) +#define TIMER_A_ENABLE_WATCHDOG BIT(1) + +struct dc_wdt { + void __iomem *base; + struct clk *clk; + struct notifier_block restart_handler; + spinlock_t lock; +}; + +static unsigned timeout; +module_param(timeout, uint, 0); +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds"); + +static void dc_wdt_set(struct dc_wdt *wdt, u32 ticks) +{ + unsigned long flags; + + spin_lock_irqsave(&wdt->lock, flags); + + writel_relaxed(0, wdt->base + TIMER_A_CONTROL); + writel_relaxed(ticks, wdt->base + TIMER_A_COUNT); + writel_relaxed(TIMER_A_ENABLE_COUNT | TIMER_A_ENABLE_WATCHDOG, + wdt->base + TIMER_A_CONTROL); + + spin_unlock_irqrestore(&wdt->lock, flags); +} + +static int dc_restart_handler(struct notifier_block *this, unsigned long mode, + void *cmd) +{ + struct dc_wdt *wdt = container_of(this, struct dc_wdt, restart_handler); + + dc_wdt_set(wdt, 1); + /* wait for reset to assert... */ + mdelay(500); + + return NOTIFY_DONE; +} + +static int dc_wdt_start(struct watchdog_device *wdog) +{ + struct dc_wdt *wdt = watchdog_get_drvdata(wdog); + + dc_wdt_set(wdt, wdog->timeout * clk_get_rate(wdt->clk)); + + return 0; +} + +static int dc_wdt_stop(struct watchdog_device *wdog) +{ + struct dc_wdt *wdt = watchdog_get_drvdata(wdog); + + writel_relaxed(0, wdt->base + TIMER_A_CONTROL); + + return 0; +} + +static int dc_wdt_set_timeout(struct watchdog_device *wdog, unsigned int t) +{ + struct dc_wdt *wdt = watchdog_get_drvdata(wdog); + + dc_wdt_set(wdt, t * clk_get_rate(wdt->clk)); + wdog->timeout = t; + + return 0; +} + +static unsigned int dc_wdt_get_timeleft(struct watchdog_device *wdog) +{ + struct dc_wdt *wdt = watchdog_get_drvdata(wdog); + uint32_t count = readl_relaxed(wdt->base + TIMER_A_COUNT); + + return count / clk_get_rate(wdt->clk); +} + +static struct watchdog_ops dc_wdt_ops = { + .owner = THIS_MODULE, + .start = dc_wdt_start, + .stop = dc_wdt_stop, + .set_timeout = dc_wdt_set_timeout, + .get_timeleft = dc_wdt_get_timeleft, +}; + +static struct watchdog_info dc_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE + | WDIOF_KEEPALIVEPING, + .identity = "Conexant Digicolor Watchdog", +}; + +static struct watchdog_device dc_wdt_wdd = { + .info = &dc_wdt_info, + .ops = &dc_wdt_ops, + .min_timeout = 1, +}; + +static int dc_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct dc_wdt *wdt; + int ret; + + wdt = devm_kzalloc(dev, sizeof(struct dc_wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + platform_set_drvdata(pdev, wdt); + + wdt->base = of_iomap(np, 0); + if (!wdt->base) { + dev_err(dev, "Failed to remap watchdog regs"); + return -ENODEV; + } + + wdt->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(wdt->clk)) { + ret = PTR_ERR(wdt->clk); + goto err_iounmap; + } + dc_wdt_wdd.max_timeout = U32_MAX / clk_get_rate(wdt->clk); + dc_wdt_wdd.timeout = dc_wdt_wdd.max_timeout; + + spin_lock_init(&wdt->lock); + + watchdog_set_drvdata(&dc_wdt_wdd, wdt); + watchdog_init_timeout(&dc_wdt_wdd, timeout, dev); + ret = watchdog_register_device(&dc_wdt_wdd); + if (ret) { + dev_err(dev, "Failed to register watchdog device"); + goto err_iounmap; + } + + wdt->restart_handler.notifier_call = dc_restart_handler; + wdt->restart_handler.priority = 128; + ret = register_restart_handler(&wdt->restart_handler); + if (ret) + dev_warn(&pdev->dev, "cannot register restart handler\n"); + + return 0; + +err_iounmap: + iounmap(wdt->base); + return ret; +} + +static int dc_wdt_remove(struct platform_device *pdev) +{ + struct dc_wdt *wdt = platform_get_drvdata(pdev); + + unregister_restart_handler(&wdt->restart_handler); + watchdog_unregister_device(&dc_wdt_wdd); + iounmap(wdt->base); + + return 0; +} + +static void dc_wdt_shutdown(struct platform_device *pdev) +{ + dc_wdt_stop(&dc_wdt_wdd); +} + +static const struct of_device_id dc_wdt_of_match[] = { + { .compatible = "cnxt,cx92755-wdt", }, + {}, +}; +MODULE_DEVICE_TABLE(of, dc_wdt_of_match); + +static struct platform_driver dc_wdt_driver = { + .probe = dc_wdt_probe, + .remove = dc_wdt_remove, + .shutdown = dc_wdt_shutdown, + .driver = { + .name = "digicolor-wdt", + .of_match_table = dc_wdt_of_match, + }, +}; +module_platform_driver(dc_wdt_driver); + +MODULE_AUTHOR("Baruch Siach <[email protected]>"); +MODULE_DESCRIPTION("Driver for Conexant Digicolor watchdog timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index d0bb9499d12c..6ea0634345e9 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -35,7 +35,6 @@ #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/reboot.h> -#include <linux/spinlock.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <linux/watchdog.h> @@ -61,7 +60,6 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " #define WDT_TIMEOUT (HZ / 2) static struct { - spinlock_t lock; void __iomem *regs; struct clk *clk; unsigned long in_use; @@ -177,7 +175,6 @@ static int dw_wdt_open(struct inode *inode, struct file *filp) /* Make sure we don't get unloaded. */ __module_get(THIS_MODULE); - spin_lock(&dw_wdt.lock); if (!dw_wdt_is_enabled()) { /* * The watchdog is not currently enabled. Set the timeout to @@ -190,8 +187,6 @@ static int dw_wdt_open(struct inode *inode, struct file *filp) dw_wdt_set_next_heartbeat(); - spin_unlock(&dw_wdt.lock); - return nonseekable_open(inode, filp); } @@ -220,6 +215,7 @@ static ssize_t dw_wdt_write(struct file *filp, const char __user *buf, } dw_wdt_set_next_heartbeat(); + dw_wdt_keepalive(); mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT); return len; @@ -348,8 +344,6 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) if (ret) return ret; - spin_lock_init(&dw_wdt.lock); - ret = misc_register(&dw_wdt_miscdev); if (ret) goto out_disable_clk; diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c index cbc313d37c59..1687cc2d7122 100644 --- a/drivers/watchdog/gpio_wdt.c +++ b/drivers/watchdog/gpio_wdt.c @@ -267,7 +267,16 @@ static struct platform_driver gpio_wdt_driver = { .probe = gpio_wdt_probe, .remove = gpio_wdt_remove, }; + +#ifdef CONFIG_GPIO_WATCHDOG_ARCH_INITCALL +static int __init gpio_wdt_init(void) +{ + return platform_driver_register(&gpio_wdt_driver); +} +arch_initcall(gpio_wdt_init); +#else module_platform_driver(gpio_wdt_driver); +#endif MODULE_AUTHOR("Alexander Shiyan <[email protected]>"); MODULE_DESCRIPTION("GPIO Watchdog"); diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index ada3e44f9932..286369d4f0f5 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -588,7 +588,7 @@ static long hpwdt_ioctl(struct file *file, unsigned int cmd, { void __user *argp = (void __user *)arg; int __user *p = argp; - int new_margin; + int new_margin, options; int ret = -ENOTTY; switch (cmd) { @@ -608,6 +608,20 @@ static long hpwdt_ioctl(struct file *file, unsigned int cmd, ret = 0; break; + case WDIOC_SETOPTIONS: + ret = get_user(options, p); + if (ret) + break; + + if (options & WDIOS_DISABLECARD) + hpwdt_stop(); + + if (options & WDIOS_ENABLECARD) { + hpwdt_start(); + hpwdt_ping(); + } + break; + case WDIOC_SETTIMEOUT: ret = get_user(new_margin, p); if (ret) diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c index 0deaa4f971f5..0f73621827ab 100644 --- a/drivers/watchdog/imgpdc_wdt.c +++ b/drivers/watchdog/imgpdc_wdt.c @@ -9,6 +9,35 @@ * * Based on drivers/watchdog/sunxi_wdt.c Copyright (c) 2013 Carlo Caione * 2012 Henrik Nordstrom + * + * Notes + * ----- + * The timeout value is rounded to the next power of two clock cycles. + * This is configured using the PDC_WDT_CONFIG register, according to this + * formula: + * + * timeout = 2^(delay + 1) clock cycles + * + * Where 'delay' is the value written in PDC_WDT_CONFIG register. + * + * Therefore, the hardware only allows to program watchdog timeouts, expressed + * as a power of two number of watchdog clock cycles. The current implementation + * guarantees that the actual watchdog timeout will be _at least_ the value + * programmed in the imgpdg_wdt driver. + * + * The following table shows how the user-configured timeout relates + * to the actual hardware timeout (watchdog clock @ 40000 Hz): + * + * input timeout | WD_DELAY | actual timeout + * ----------------------------------- + * 10 | 18 | 13 seconds + * 20 | 19 | 26 seconds + * 30 | 20 | 52 seconds + * 60 | 21 | 104 seconds + * + * Albeit coarse, this granularity would suffice most watchdog uses. + * If the platform allows it, the user should be able to change the watchdog + * clock rate and achieve a finer timeout granularity. */ #include <linux/clk.h> @@ -16,6 +45,7 @@ #include <linux/log2.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/reboot.h> #include <linux/slab.h> #include <linux/watchdog.h> @@ -42,7 +72,7 @@ #define PDC_WDT_MIN_TIMEOUT 1 #define PDC_WDT_DEF_TIMEOUT 64 -static int heartbeat = PDC_WDT_DEF_TIMEOUT; +static int heartbeat; module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds " "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); @@ -57,6 +87,7 @@ struct pdc_wdt_dev { struct clk *wdt_clk; struct clk *sys_clk; void __iomem *base; + struct notifier_block restart_handler; }; static int pdc_wdt_keepalive(struct watchdog_device *wdt_dev) @@ -84,18 +115,24 @@ static int pdc_wdt_stop(struct watchdog_device *wdt_dev) return 0; } +static void __pdc_wdt_set_timeout(struct pdc_wdt_dev *wdt) +{ + unsigned long clk_rate = clk_get_rate(wdt->wdt_clk); + unsigned int val; + + val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK; + val |= order_base_2(wdt->wdt_dev.timeout * clk_rate) - 1; + writel(val, wdt->base + PDC_WDT_CONFIG); +} + static int pdc_wdt_set_timeout(struct watchdog_device *wdt_dev, unsigned int new_timeout) { - unsigned int val; struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev); - unsigned long clk_rate = clk_get_rate(wdt->wdt_clk); wdt->wdt_dev.timeout = new_timeout; - val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK; - val |= order_base_2(new_timeout * clk_rate) - 1; - writel(val, wdt->base + PDC_WDT_CONFIG); + __pdc_wdt_set_timeout(wdt); return 0; } @@ -106,6 +143,8 @@ static int pdc_wdt_start(struct watchdog_device *wdt_dev) unsigned int val; struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev); + __pdc_wdt_set_timeout(wdt); + val = readl(wdt->base + PDC_WDT_CONFIG); val |= PDC_WDT_CONFIG_ENABLE; writel(val, wdt->base + PDC_WDT_CONFIG); @@ -128,8 +167,21 @@ static const struct watchdog_ops pdc_wdt_ops = { .set_timeout = pdc_wdt_set_timeout, }; +static int pdc_wdt_restart(struct notifier_block *this, unsigned long mode, + void *cmd) +{ + struct pdc_wdt_dev *wdt = container_of(this, struct pdc_wdt_dev, + restart_handler); + + /* Assert SOFT_RESET */ + writel(0x1, wdt->base + PDC_WDT_SOFT_RESET); + + return NOTIFY_OK; +} + static int pdc_wdt_probe(struct platform_device *pdev) { + u64 div; int ret, val; unsigned long clk_rate; struct resource *res; @@ -189,16 +241,15 @@ static int pdc_wdt_probe(struct platform_device *pdev) pdc_wdt->wdt_dev.info = &pdc_wdt_info; pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; - pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; + + div = 1ULL << (PDC_WDT_CONFIG_DELAY_MASK + 1); + do_div(div, clk_rate); + pdc_wdt->wdt_dev.max_timeout = div; + pdc_wdt->wdt_dev.timeout = PDC_WDT_DEF_TIMEOUT; pdc_wdt->wdt_dev.parent = &pdev->dev; watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); - ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); - if (ret < 0) { - pdc_wdt->wdt_dev.timeout = pdc_wdt->wdt_dev.max_timeout; - dev_warn(&pdev->dev, - "Initial timeout out of range! setting max timeout\n"); - } + watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); pdc_wdt_stop(&pdc_wdt->wdt_dev); @@ -238,6 +289,13 @@ static int pdc_wdt_probe(struct platform_device *pdev) if (ret) goto disable_wdt_clk; + pdc_wdt->restart_handler.notifier_call = pdc_wdt_restart; + pdc_wdt->restart_handler.priority = 128; + ret = register_restart_handler(&pdc_wdt->restart_handler); + if (ret) + dev_warn(&pdev->dev, "failed to register restart handler: %d\n", + ret); + return 0; disable_wdt_clk: diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 5e6d808d358a..0bb1a1d1b170 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -166,6 +166,8 @@ static int imx2_wdt_set_timeout(struct watchdog_device *wdog, { struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); + wdog->timeout = new_timeout; + regmap_update_bits(wdev->regmap, IMX2_WDT_WCR, IMX2_WDT_WCR_WT, WDOG_SEC_TO_COUNT(new_timeout)); return 0; @@ -256,8 +258,11 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) wdog->ops = &imx2_wdt_ops; wdog->min_timeout = 1; wdog->max_timeout = IMX2_WDT_MAX_TIME; + wdog->parent = &pdev->dev; - clk_prepare_enable(wdev->clk); + ret = clk_prepare_enable(wdev->clk); + if (ret) + return ret; regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val); wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0; @@ -286,7 +291,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) ret = watchdog_register_device(wdog); if (ret) { dev_err(&pdev->dev, "cannot register watchdog device\n"); - return ret; + goto disable_clk; } wdev->restart_handler.notifier_call = imx2_restart_handler; @@ -299,6 +304,10 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) wdog->timeout, nowayout); return 0; + +disable_clk: + clk_disable_unprepare(wdev->clk); + return ret; } static int __exit imx2_wdt_remove(struct platform_device *pdev) @@ -362,8 +371,11 @@ static int imx2_wdt_resume(struct device *dev) { struct watchdog_device *wdog = dev_get_drvdata(dev); struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); + int ret; - clk_prepare_enable(wdev->clk); + ret = clk_prepare_enable(wdev->clk); + if (ret) + return ret; if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) { /* diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c index 08da3114accb..f36ca4be0720 100644 --- a/drivers/watchdog/max63xx_wdt.c +++ b/drivers/watchdog/max63xx_wdt.c @@ -39,10 +39,22 @@ static bool nowayout = WATCHDOG_NOWAYOUT; #define MAX6369_WDSET (7 << 0) #define MAX6369_WDI (1 << 3) -static DEFINE_SPINLOCK(io_lock); +#define MAX6369_WDSET_DISABLED 3 static int nodelay; -static void __iomem *wdt_base; + +struct max63xx_wdt { + struct watchdog_device wdd; + const struct max63xx_timeout *timeout; + + /* memory mapping */ + void __iomem *base; + spinlock_t lock; + + /* WDI and WSET bits write access routines */ + void (*ping)(struct max63xx_wdt *wdt); + void (*set)(struct max63xx_wdt *wdt, u8 set); +}; /* * The timeout values used are actually the absolute minimum the chip @@ -59,25 +71,25 @@ static void __iomem *wdt_base; /* Timeouts in second */ struct max63xx_timeout { - u8 wdset; - u8 tdelay; - u8 twd; + const u8 wdset; + const u8 tdelay; + const u8 twd; }; -static struct max63xx_timeout max6369_table[] = { +static const struct max63xx_timeout max6369_table[] = { { 5, 1, 1 }, { 6, 10, 10 }, { 7, 60, 60 }, { }, }; -static struct max63xx_timeout max6371_table[] = { +static const struct max63xx_timeout max6371_table[] = { { 6, 60, 3 }, { 7, 60, 60 }, { }, }; -static struct max63xx_timeout max6373_table[] = { +static const struct max63xx_timeout max6373_table[] = { { 2, 60, 1 }, { 5, 0, 1 }, { 1, 3, 3 }, @@ -86,8 +98,6 @@ static struct max63xx_timeout max6373_table[] = { { }, }; -static struct max63xx_timeout *current_timeout; - static struct max63xx_timeout * max63xx_select_timeout(struct max63xx_timeout *table, int value) { @@ -108,59 +118,32 @@ max63xx_select_timeout(struct max63xx_timeout *table, int value) static int max63xx_wdt_ping(struct watchdog_device *wdd) { - u8 val; - - spin_lock(&io_lock); + struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd); - val = __raw_readb(wdt_base); - - __raw_writeb(val | MAX6369_WDI, wdt_base); - __raw_writeb(val & ~MAX6369_WDI, wdt_base); - - spin_unlock(&io_lock); + wdt->ping(wdt); return 0; } static int max63xx_wdt_start(struct watchdog_device *wdd) { - struct max63xx_timeout *entry = watchdog_get_drvdata(wdd); - u8 val; + struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd); - spin_lock(&io_lock); - - val = __raw_readb(wdt_base); - val &= ~MAX6369_WDSET; - val |= entry->wdset; - __raw_writeb(val, wdt_base); - - spin_unlock(&io_lock); + wdt->set(wdt, wdt->timeout->wdset); /* check for a edge triggered startup */ - if (entry->tdelay == 0) - max63xx_wdt_ping(wdd); + if (wdt->timeout->tdelay == 0) + wdt->ping(wdt); return 0; } static int max63xx_wdt_stop(struct watchdog_device *wdd) { - u8 val; + struct max63xx_wdt *wdt = watchdog_get_drvdata(wdd); - spin_lock(&io_lock); - - val = __raw_readb(wdt_base); - val &= ~MAX6369_WDSET; - val |= 3; - __raw_writeb(val, wdt_base); - - spin_unlock(&io_lock); + wdt->set(wdt, MAX6369_WDSET_DISABLED); return 0; } -static const struct watchdog_info max63xx_wdt_info = { - .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, - .identity = "max63xx Watchdog", -}; - static const struct watchdog_ops max63xx_wdt_ops = { .owner = THIS_MODULE, .start = max63xx_wdt_start, @@ -168,53 +151,108 @@ static const struct watchdog_ops max63xx_wdt_ops = { .ping = max63xx_wdt_ping, }; -static struct watchdog_device max63xx_wdt_dev = { - .info = &max63xx_wdt_info, - .ops = &max63xx_wdt_ops, +static const struct watchdog_info max63xx_wdt_info = { + .options = WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "max63xx Watchdog", }; +static void max63xx_mmap_ping(struct max63xx_wdt *wdt) +{ + u8 val; + + spin_lock(&wdt->lock); + + val = __raw_readb(wdt->base); + + __raw_writeb(val | MAX6369_WDI, wdt->base); + __raw_writeb(val & ~MAX6369_WDI, wdt->base); + + spin_unlock(&wdt->lock); +} + +static void max63xx_mmap_set(struct max63xx_wdt *wdt, u8 set) +{ + u8 val; + + spin_lock(&wdt->lock); + + val = __raw_readb(wdt->base); + val &= ~MAX6369_WDSET; + val |= set & MAX6369_WDSET; + __raw_writeb(val, wdt->base); + + spin_unlock(&wdt->lock); +} + +static int max63xx_mmap_init(struct platform_device *p, struct max63xx_wdt *wdt) +{ + struct resource *mem = platform_get_resource(p, IORESOURCE_MEM, 0); + + wdt->base = devm_ioremap_resource(&p->dev, mem); + if (IS_ERR(wdt->base)) + return PTR_ERR(wdt->base); + + spin_lock_init(&wdt->lock); + + wdt->ping = max63xx_mmap_ping; + wdt->set = max63xx_mmap_set; + return 0; +} + static int max63xx_wdt_probe(struct platform_device *pdev) { - struct resource *wdt_mem; + struct max63xx_wdt *wdt; struct max63xx_timeout *table; + int err; + + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; table = (struct max63xx_timeout *)pdev->id_entry->driver_data; if (heartbeat < 1 || heartbeat > MAX_HEARTBEAT) heartbeat = DEFAULT_HEARTBEAT; - dev_info(&pdev->dev, "requesting %ds heartbeat\n", heartbeat); - current_timeout = max63xx_select_timeout(table, heartbeat); - - if (!current_timeout) { - dev_err(&pdev->dev, "unable to satisfy heartbeat request\n"); + wdt->timeout = max63xx_select_timeout(table, heartbeat); + if (!wdt->timeout) { + dev_err(&pdev->dev, "unable to satisfy %ds heartbeat request\n", + heartbeat); return -EINVAL; } - dev_info(&pdev->dev, "using %ds heartbeat with %ds initial delay\n", - current_timeout->twd, current_timeout->tdelay); + err = max63xx_mmap_init(pdev, wdt); + if (err) + return err; + + platform_set_drvdata(pdev, &wdt->wdd); + watchdog_set_drvdata(&wdt->wdd, wdt); - heartbeat = current_timeout->twd; + wdt->wdd.parent = &pdev->dev; + wdt->wdd.timeout = wdt->timeout->twd; + wdt->wdd.info = &max63xx_wdt_info; + wdt->wdd.ops = &max63xx_wdt_ops; - wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - wdt_base = devm_ioremap_resource(&pdev->dev, wdt_mem); - if (IS_ERR(wdt_base)) - return PTR_ERR(wdt_base); + watchdog_set_nowayout(&wdt->wdd, nowayout); - max63xx_wdt_dev.timeout = heartbeat; - watchdog_set_nowayout(&max63xx_wdt_dev, nowayout); - watchdog_set_drvdata(&max63xx_wdt_dev, current_timeout); + err = watchdog_register_device(&wdt->wdd); + if (err) + return err; - return watchdog_register_device(&max63xx_wdt_dev); + dev_info(&pdev->dev, "using %ds heartbeat with %ds initial delay\n", + wdt->timeout->twd, wdt->timeout->tdelay); + return 0; } static int max63xx_wdt_remove(struct platform_device *pdev) { - watchdog_unregister_device(&max63xx_wdt_dev); + struct watchdog_device *wdd = platform_get_drvdata(pdev); + + watchdog_unregister_device(wdd); return 0; } -static struct platform_device_id max63xx_id_table[] = { +static const struct platform_device_id max63xx_id_table[] = { { "max6369_wdt", (kernel_ulong_t)max6369_table, }, { "max6370_wdt", (kernel_ulong_t)max6369_table, }, { "max6371_wdt", (kernel_ulong_t)max6371_table, }, diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c index 96dbba980579..d193a5e79c38 100644 --- a/drivers/watchdog/mena21_wdt.c +++ b/drivers/watchdog/mena21_wdt.c @@ -208,14 +208,15 @@ static int a21_wdt_probe(struct platform_device *pdev) else if (reset == 7) a21_wdt.bootstatus |= WDIOF_EXTERN2; + drv->wdt = a21_wdt; + dev_set_drvdata(&pdev->dev, drv); + ret = watchdog_register_device(&a21_wdt); if (ret) { dev_err(&pdev->dev, "Cannot register watchdog device\n"); goto err_register_wd; } - dev_set_drvdata(&pdev->dev, drv); - dev_info(&pdev->dev, "MEN A21 watchdog timer driver enabled\n"); return 0; diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index 1e6be9e40577..de911c7e477c 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -53,7 +53,15 @@ static unsigned timer_margin; module_param(timer_margin, uint, 0); MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)"); +#define to_omap_wdt_dev(_wdog) container_of(_wdog, struct omap_wdt_dev, wdog) + +static bool early_enable; +module_param(early_enable, bool, 0); +MODULE_PARM_DESC(early_enable, + "Watchdog is started on module insertion (default=0)"); + struct omap_wdt_dev { + struct watchdog_device wdog; void __iomem *base; /* physical */ struct device *dev; bool omap_wdt_users; @@ -123,7 +131,7 @@ static void omap_wdt_set_timer(struct omap_wdt_dev *wdev, static int omap_wdt_start(struct watchdog_device *wdog) { - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); void __iomem *base = wdev->base; mutex_lock(&wdev->lock); @@ -132,6 +140,13 @@ static int omap_wdt_start(struct watchdog_device *wdog) pm_runtime_get_sync(wdev->dev); + /* + * Make sure the watchdog is disabled. This is unfortunately required + * because writing to various registers with the watchdog running has no + * effect. + */ + omap_wdt_disable(wdev); + /* initialize prescaler */ while (readl_relaxed(base + OMAP_WATCHDOG_WPS) & 0x01) cpu_relax(); @@ -151,7 +166,7 @@ static int omap_wdt_start(struct watchdog_device *wdog) static int omap_wdt_stop(struct watchdog_device *wdog) { - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); mutex_lock(&wdev->lock); omap_wdt_disable(wdev); @@ -163,7 +178,7 @@ static int omap_wdt_stop(struct watchdog_device *wdog) static int omap_wdt_ping(struct watchdog_device *wdog) { - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); mutex_lock(&wdev->lock); omap_wdt_reload(wdev); @@ -175,7 +190,7 @@ static int omap_wdt_ping(struct watchdog_device *wdog) static int omap_wdt_set_timeout(struct watchdog_device *wdog, unsigned int timeout) { - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = to_omap_wdt_dev(wdog); mutex_lock(&wdev->lock); omap_wdt_disable(wdev); @@ -188,6 +203,16 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog, return 0; } +static unsigned int omap_wdt_get_timeleft(struct watchdog_device *wdog) +{ + struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + void __iomem *base = wdev->base; + u32 value; + + value = readl_relaxed(base + OMAP_WATCHDOG_CRR); + return GET_WCCR_SECS(value); +} + static const struct watchdog_info omap_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "OMAP Watchdog", @@ -199,21 +224,16 @@ static const struct watchdog_ops omap_wdt_ops = { .stop = omap_wdt_stop, .ping = omap_wdt_ping, .set_timeout = omap_wdt_set_timeout, + .get_timeleft = omap_wdt_get_timeleft, }; static int omap_wdt_probe(struct platform_device *pdev) { struct omap_wd_timer_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct watchdog_device *omap_wdt; struct resource *res; struct omap_wdt_dev *wdev; - u32 rs; int ret; - omap_wdt = devm_kzalloc(&pdev->dev, sizeof(*omap_wdt), GFP_KERNEL); - if (!omap_wdt) - return -ENOMEM; - wdev = devm_kzalloc(&pdev->dev, sizeof(*wdev), GFP_KERNEL); if (!wdev) return -ENOMEM; @@ -229,35 +249,30 @@ static int omap_wdt_probe(struct platform_device *pdev) if (IS_ERR(wdev->base)) return PTR_ERR(wdev->base); - omap_wdt->info = &omap_wdt_info; - omap_wdt->ops = &omap_wdt_ops; - omap_wdt->min_timeout = TIMER_MARGIN_MIN; - omap_wdt->max_timeout = TIMER_MARGIN_MAX; + wdev->wdog.info = &omap_wdt_info; + wdev->wdog.ops = &omap_wdt_ops; + wdev->wdog.min_timeout = TIMER_MARGIN_MIN; + wdev->wdog.max_timeout = TIMER_MARGIN_MAX; - if (timer_margin >= TIMER_MARGIN_MIN && - timer_margin <= TIMER_MARGIN_MAX) - omap_wdt->timeout = timer_margin; - else - omap_wdt->timeout = TIMER_MARGIN_DEFAULT; + if (watchdog_init_timeout(&wdev->wdog, timer_margin, &pdev->dev) < 0) + wdev->wdog.timeout = TIMER_MARGIN_DEFAULT; - watchdog_set_drvdata(omap_wdt, wdev); - watchdog_set_nowayout(omap_wdt, nowayout); + watchdog_set_nowayout(&wdev->wdog, nowayout); - platform_set_drvdata(pdev, omap_wdt); + platform_set_drvdata(pdev, wdev); pm_runtime_enable(wdev->dev); pm_runtime_get_sync(wdev->dev); - if (pdata && pdata->read_reset_sources) - rs = pdata->read_reset_sources(); - else - rs = 0; - omap_wdt->bootstatus = (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT)) ? - WDIOF_CARDRESET : 0; + if (pdata && pdata->read_reset_sources) { + u32 rs = pdata->read_reset_sources(); + if (rs & (1 << OMAP_MPU_WD_RST_SRC_ID_SHIFT)) + wdev->wdog.bootstatus = WDIOF_CARDRESET; + } omap_wdt_disable(wdev); - ret = watchdog_register_device(omap_wdt); + ret = watchdog_register_device(&wdev->wdog); if (ret) { pm_runtime_disable(wdev->dev); return ret; @@ -265,17 +280,19 @@ static int omap_wdt_probe(struct platform_device *pdev) pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n", readl_relaxed(wdev->base + OMAP_WATCHDOG_REV) & 0xFF, - omap_wdt->timeout); + wdev->wdog.timeout); pm_runtime_put_sync(wdev->dev); + if (early_enable) + omap_wdt_start(&wdev->wdog); + return 0; } static void omap_wdt_shutdown(struct platform_device *pdev) { - struct watchdog_device *wdog = platform_get_drvdata(pdev); - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); mutex_lock(&wdev->lock); if (wdev->omap_wdt_users) { @@ -287,11 +304,10 @@ static void omap_wdt_shutdown(struct platform_device *pdev) static int omap_wdt_remove(struct platform_device *pdev) { - struct watchdog_device *wdog = platform_get_drvdata(pdev); - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); pm_runtime_disable(wdev->dev); - watchdog_unregister_device(wdog); + watchdog_unregister_device(&wdev->wdog); return 0; } @@ -306,8 +322,7 @@ static int omap_wdt_remove(struct platform_device *pdev) static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state) { - struct watchdog_device *wdog = platform_get_drvdata(pdev); - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); mutex_lock(&wdev->lock); if (wdev->omap_wdt_users) { @@ -321,8 +336,7 @@ static int omap_wdt_suspend(struct platform_device *pdev, pm_message_t state) static int omap_wdt_resume(struct platform_device *pdev) { - struct watchdog_device *wdog = platform_get_drvdata(pdev); - struct omap_wdt_dev *wdev = watchdog_get_drvdata(wdog); + struct omap_wdt_dev *wdev = platform_get_drvdata(pdev); mutex_lock(&wdev->lock); if (wdev->omap_wdt_users) { diff --git a/drivers/watchdog/omap_wdt.h b/drivers/watchdog/omap_wdt.h index 09b774cf75b9..42f31ec5e90d 100644 --- a/drivers/watchdog/omap_wdt.h +++ b/drivers/watchdog/omap_wdt.h @@ -50,5 +50,6 @@ #define PTV 0 /* prescale */ #define GET_WLDR_VAL(secs) (0xffffffff - ((secs) * (32768/(1<<PTV))) + 1) +#define GET_WCCR_SECS(val) ((0xffffffff - (val) + 1) / (32768/(1<<PTV))) #endif /* _OMAP_WATCHDOG_H */ diff --git a/drivers/watchdog/st_lpc_wdt.c b/drivers/watchdog/st_lpc_wdt.c index f32be155212a..6785afdc0fca 100644 --- a/drivers/watchdog/st_lpc_wdt.c +++ b/drivers/watchdog/st_lpc_wdt.c @@ -197,7 +197,7 @@ static int st_wdog_probe(struct platform_device *pdev) return -EINVAL; } - /* LPC can either run in RTC or WDT mode */ + /* LPC can either run as a Clocksource or in RTC or WDT mode */ if (mode != ST_LPC_MODE_WDT) return -ENODEV; diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index cec9b559647d..1a8059455413 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -43,6 +43,45 @@ static DEFINE_IDA(watchdog_ida); static struct class *watchdog_class; +/* + * Deferred Registration infrastructure. + * + * Sometimes watchdog drivers needs to be loaded as soon as possible, + * for example when it's impossible to disable it. To do so, + * raising the initcall level of the watchdog driver is a solution. + * But in such case, the miscdev is maybe not ready (subsys_initcall), and + * watchdog_core need miscdev to register the watchdog as a char device. + * + * The deferred registration infrastructure offer a way for the watchdog + * subsystem to register a watchdog properly, even before miscdev is ready. + */ + +static DEFINE_MUTEX(wtd_deferred_reg_mutex); +static LIST_HEAD(wtd_deferred_reg_list); +static bool wtd_deferred_reg_done; + +static int watchdog_deferred_registration_add(struct watchdog_device *wdd) +{ + list_add_tail(&wdd->deferred, + &wtd_deferred_reg_list); + return 0; +} + +static void watchdog_deferred_registration_del(struct watchdog_device *wdd) +{ + struct list_head *p, *n; + struct watchdog_device *wdd_tmp; + + list_for_each_safe(p, n, &wtd_deferred_reg_list) { + wdd_tmp = list_entry(p, struct watchdog_device, + deferred); + if (wdd_tmp == wdd) { + list_del(&wdd_tmp->deferred); + break; + } + } +} + static void watchdog_check_min_max_timeout(struct watchdog_device *wdd) { /* @@ -98,17 +137,7 @@ int watchdog_init_timeout(struct watchdog_device *wdd, } EXPORT_SYMBOL_GPL(watchdog_init_timeout); -/** - * watchdog_register_device() - register a watchdog device - * @wdd: watchdog device - * - * Register a watchdog device with the kernel so that the - * watchdog timer can be accessed from userspace. - * - * A zero is returned on success and a negative errno code for - * failure. - */ -int watchdog_register_device(struct watchdog_device *wdd) +static int __watchdog_register_device(struct watchdog_device *wdd) { int ret, id, devno; @@ -164,16 +193,33 @@ int watchdog_register_device(struct watchdog_device *wdd) return 0; } -EXPORT_SYMBOL_GPL(watchdog_register_device); /** - * watchdog_unregister_device() - unregister a watchdog device - * @wdd: watchdog device to unregister + * watchdog_register_device() - register a watchdog device + * @wdd: watchdog device * - * Unregister a watchdog device that was previously successfully - * registered with watchdog_register_device(). + * Register a watchdog device with the kernel so that the + * watchdog timer can be accessed from userspace. + * + * A zero is returned on success and a negative errno code for + * failure. */ -void watchdog_unregister_device(struct watchdog_device *wdd) + +int watchdog_register_device(struct watchdog_device *wdd) +{ + int ret; + + mutex_lock(&wtd_deferred_reg_mutex); + if (wtd_deferred_reg_done) + ret = __watchdog_register_device(wdd); + else + ret = watchdog_deferred_registration_add(wdd); + mutex_unlock(&wtd_deferred_reg_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(watchdog_register_device); + +static void __watchdog_unregister_device(struct watchdog_device *wdd) { int ret; int devno; @@ -189,8 +235,43 @@ void watchdog_unregister_device(struct watchdog_device *wdd) ida_simple_remove(&watchdog_ida, wdd->id); wdd->dev = NULL; } + +/** + * watchdog_unregister_device() - unregister a watchdog device + * @wdd: watchdog device to unregister + * + * Unregister a watchdog device that was previously successfully + * registered with watchdog_register_device(). + */ + +void watchdog_unregister_device(struct watchdog_device *wdd) +{ + mutex_lock(&wtd_deferred_reg_mutex); + if (wtd_deferred_reg_done) + __watchdog_unregister_device(wdd); + else + watchdog_deferred_registration_del(wdd); + mutex_unlock(&wtd_deferred_reg_mutex); +} + EXPORT_SYMBOL_GPL(watchdog_unregister_device); +static int __init watchdog_deferred_registration(void) +{ + mutex_lock(&wtd_deferred_reg_mutex); + wtd_deferred_reg_done = true; + while (!list_empty(&wtd_deferred_reg_list)) { + struct watchdog_device *wdd; + + wdd = list_first_entry(&wtd_deferred_reg_list, + struct watchdog_device, deferred); + list_del(&wdd->deferred); + __watchdog_register_device(wdd); + } + mutex_unlock(&wtd_deferred_reg_mutex); + return 0; +} + static int __init watchdog_init(void) { int err; @@ -207,6 +288,7 @@ static int __init watchdog_init(void) return err; } + watchdog_deferred_registration(); return 0; } @@ -217,7 +299,7 @@ static void __exit watchdog_exit(void) ida_destroy(&watchdog_ida); } -subsys_initcall(watchdog_init); +subsys_initcall_sync(watchdog_init); module_exit(watchdog_exit); MODULE_AUTHOR("Alan Cox <[email protected]>"); diff --git a/fs/adfs/super.c b/fs/adfs/super.c index a19c31d3f369..4d4a0df8344f 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -242,7 +242,7 @@ static struct kmem_cache *adfs_inode_cachep; static struct inode *adfs_alloc_inode(struct super_block *sb) { struct adfs_inode_info *ei; - ei = (struct adfs_inode_info *)kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); + ei = kmem_cache_alloc(adfs_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index a8f463c028ce..5fa92bc790ef 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -140,7 +140,7 @@ affs_remove_link(struct dentry *dentry) { struct inode *dir, *inode = d_inode(dentry); struct super_block *sb = inode->i_sb; - struct buffer_head *bh = NULL, *link_bh = NULL; + struct buffer_head *bh, *link_bh = NULL; u32 link_ino, ino; int retval; diff --git a/fs/affs/inode.c b/fs/affs/inode.c index a022f4accd76..17349500592d 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -346,7 +346,7 @@ affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s3 { struct super_block *sb = dir->i_sb; struct buffer_head *inode_bh = NULL; - struct buffer_head *bh = NULL; + struct buffer_head *bh; u32 block = 0; int retval; diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c index f39b71c3981e..ea5b69a18ba9 100644 --- a/fs/affs/symlink.c +++ b/fs/affs/symlink.c @@ -16,14 +16,12 @@ static int affs_symlink_readpage(struct file *file, struct page *page) struct inode *inode = page->mapping->host; char *link = kmap(page); struct slink_front *lf; - int err; int i, j; char c; char lc; pr_debug("follow_link(ino=%lu)\n", inode->i_ino); - err = -EIO; bh = affs_bread(inode->i_sb, inode->i_ino); if (!bh) goto fail; @@ -66,7 +64,7 @@ fail: SetPageError(page); kunmap(page); unlock_page(page); - return err; + return -EIO; } const struct address_space_operations affs_symlink_aops = { diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 64fa248343f6..8f84646f10e9 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -187,10 +187,10 @@ int ceph_pre_init_acls(struct inode *dir, umode_t *mode, val_size2 = posix_acl_xattr_size(default_acl->a_count); err = -ENOMEM; - tmp_buf = kmalloc(max(val_size1, val_size2), GFP_NOFS); + tmp_buf = kmalloc(max(val_size1, val_size2), GFP_KERNEL); if (!tmp_buf) goto out_err; - pagelist = kmalloc(sizeof(struct ceph_pagelist), GFP_NOFS); + pagelist = kmalloc(sizeof(struct ceph_pagelist), GFP_KERNEL); if (!pagelist) goto out_err; ceph_pagelist_init(pagelist); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e162bcd105ee..890c50971a69 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -87,17 +87,21 @@ static int ceph_set_page_dirty(struct page *page) inode = mapping->host; ci = ceph_inode(inode); - /* - * Note that we're grabbing a snapc ref here without holding - * any locks! - */ - snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context); - /* dirty the head */ spin_lock(&ci->i_ceph_lock); - if (ci->i_head_snapc == NULL) - ci->i_head_snapc = ceph_get_snap_context(snapc); - ++ci->i_wrbuffer_ref_head; + BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference + if (__ceph_have_pending_cap_snap(ci)) { + struct ceph_cap_snap *capsnap = + list_last_entry(&ci->i_cap_snaps, + struct ceph_cap_snap, + ci_item); + snapc = ceph_get_snap_context(capsnap->context); + capsnap->dirty_pages++; + } else { + BUG_ON(!ci->i_head_snapc); + snapc = ceph_get_snap_context(ci->i_head_snapc); + ++ci->i_wrbuffer_ref_head; + } if (ci->i_wrbuffer_ref == 0) ihold(inode); ++ci->i_wrbuffer_ref; @@ -346,7 +350,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) /* build page vector */ nr_pages = calc_pages_for(0, len); - pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); + pages = kmalloc(sizeof(*pages) * nr_pages, GFP_KERNEL); ret = -ENOMEM; if (!pages) goto out; @@ -358,7 +362,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) dout("start_read %p adding %p idx %lu\n", inode, page, page->index); if (add_to_page_cache_lru(page, &inode->i_data, page->index, - GFP_NOFS)) { + GFP_KERNEL)) { ceph_fscache_uncache_page(inode, page); page_cache_release(page); dout("start_read %p add_to_page_cache failed %p\n", @@ -436,7 +440,7 @@ out: * only snap context we are allowed to write back. */ static struct ceph_snap_context *get_oldest_context(struct inode *inode, - u64 *snap_size) + loff_t *snap_size) { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_snap_context *snapc = NULL; @@ -476,8 +480,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) struct ceph_osd_client *osdc; struct ceph_snap_context *snapc, *oldest; loff_t page_off = page_offset(page); + loff_t snap_size = -1; long writeback_stat; - u64 truncate_size, snap_size = 0; + u64 truncate_size; u32 truncate_seq; int err = 0, len = PAGE_CACHE_SIZE; @@ -512,7 +517,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) spin_lock(&ci->i_ceph_lock); truncate_seq = ci->i_truncate_seq; truncate_size = ci->i_truncate_size; - if (!snap_size) + if (snap_size == -1) snap_size = i_size_read(inode); spin_unlock(&ci->i_ceph_lock); @@ -695,7 +700,8 @@ static int ceph_writepages_start(struct address_space *mapping, unsigned wsize = 1 << inode->i_blkbits; struct ceph_osd_request *req = NULL; int do_sync = 0; - u64 truncate_size, snap_size; + loff_t snap_size, i_size; + u64 truncate_size; u32 truncate_seq; /* @@ -741,7 +747,7 @@ static int ceph_writepages_start(struct address_space *mapping, retry: /* find oldest snap context with dirty data */ ceph_put_snap_context(snapc); - snap_size = 0; + snap_size = -1; snapc = get_oldest_context(inode, &snap_size); if (!snapc) { /* hmm, why does writepages get called when there @@ -749,16 +755,13 @@ retry: dout(" no snap context with dirty data?\n"); goto out; } - if (snap_size == 0) - snap_size = i_size_read(inode); dout(" oldest snapc is %p seq %lld (%d snaps)\n", snapc, snapc->seq, snapc->num_snaps); spin_lock(&ci->i_ceph_lock); truncate_seq = ci->i_truncate_seq; truncate_size = ci->i_truncate_size; - if (!snap_size) - snap_size = i_size_read(inode); + i_size = i_size_read(inode); spin_unlock(&ci->i_ceph_lock); if (last_snapc && snapc != last_snapc) { @@ -828,8 +831,10 @@ get_more_pages: dout("waiting on writeback %p\n", page); wait_on_page_writeback(page); } - if (page_offset(page) >= snap_size) { - dout("%p page eof %llu\n", page, snap_size); + if (page_offset(page) >= + (snap_size == -1 ? i_size : snap_size)) { + dout("%p page eof %llu\n", page, + (snap_size == -1 ? i_size : snap_size)); done = 1; unlock_page(page); break; @@ -884,7 +889,8 @@ get_more_pages: } if (do_sync) - osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); + osd_req_op_init(req, 1, + CEPH_OSD_OP_STARTSYNC, 0); req->r_callback = writepages_finish; req->r_inode = inode; @@ -944,10 +950,18 @@ get_more_pages: } /* Format the osd request message and submit the write */ - offset = page_offset(pages[0]); - len = min(snap_size - offset, - (u64)locked_pages << PAGE_CACHE_SHIFT); + len = (u64)locked_pages << PAGE_CACHE_SHIFT; + if (snap_size == -1) { + len = min(len, (u64)i_size_read(inode) - offset); + /* writepages_finish() clears writeback pages + * according to the data length, so make sure + * data length covers all locked pages */ + len = max(len, 1 + + ((u64)(locked_pages - 1) << PAGE_CACHE_SHIFT)); + } else { + len = min(len, snap_size - offset); + } dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); @@ -1032,7 +1046,6 @@ static int ceph_update_writeable_page(struct file *file, { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; loff_t page_off = pos & PAGE_CACHE_MASK; int pos_in_page = pos & ~PAGE_CACHE_MASK; int end_in_page = pos_in_page + len; @@ -1044,10 +1057,6 @@ retry_locked: /* writepages currently holds page lock, but if we change that later, */ wait_on_page_writeback(page); - /* check snap context */ - BUG_ON(!ci->i_snap_realm); - down_read(&mdsc->snap_rwsem); - BUG_ON(!ci->i_snap_realm->cached_context); snapc = page_snap_context(page); if (snapc && snapc != ci->i_head_snapc) { /* @@ -1055,7 +1064,6 @@ retry_locked: * context! is it writeable now? */ oldest = get_oldest_context(inode, NULL); - up_read(&mdsc->snap_rwsem); if (snapc->seq > oldest->seq) { ceph_put_snap_context(oldest); @@ -1112,7 +1120,6 @@ retry_locked: } /* we need to read it. */ - up_read(&mdsc->snap_rwsem); r = readpage_nounlock(file, page); if (r < 0) goto fail_nosnap; @@ -1157,16 +1164,13 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, /* * we don't do anything in here that simple_write_end doesn't do - * except adjust dirty page accounting and drop read lock on - * mdsc->snap_rwsem. + * except adjust dirty page accounting */ static int ceph_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = file_inode(file); - struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_mds_client *mdsc = fsc->mdsc; unsigned from = pos & (PAGE_CACHE_SIZE - 1); int check_cap = 0; @@ -1188,7 +1192,6 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, set_page_dirty(page); unlock_page(page); - up_read(&mdsc->snap_rwsem); page_cache_release(page); if (check_cap) @@ -1314,13 +1317,17 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct inode *inode = file_inode(vma->vm_file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; - struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; + struct ceph_cap_flush *prealloc_cf; struct page *page = vmf->page; loff_t off = page_offset(page); loff_t size = i_size_read(inode); size_t len; int want, got, ret; + prealloc_cf = ceph_alloc_cap_flush(); + if (!prealloc_cf) + return VM_FAULT_SIGBUS; + if (ci->i_inline_version != CEPH_INLINE_NONE) { struct page *locked_page = NULL; if (off == 0) { @@ -1330,8 +1337,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) ret = ceph_uninline_data(vma->vm_file, locked_page); if (locked_page) unlock_page(locked_page); - if (ret < 0) - return VM_FAULT_SIGBUS; + if (ret < 0) { + ret = VM_FAULT_SIGBUS; + goto out_free; + } } if (off + PAGE_CACHE_SIZE <= size) @@ -1353,7 +1362,8 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) break; if (ret != -ERESTARTSYS) { WARN_ON(1); - return VM_FAULT_SIGBUS; + ret = VM_FAULT_SIGBUS; + goto out_free; } } dout("page_mkwrite %p %llu~%zd got cap refs on %s\n", @@ -1373,7 +1383,6 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret == 0) { /* success. we'll keep the page locked. */ set_page_dirty(page); - up_read(&mdsc->snap_rwsem); ret = VM_FAULT_LOCKED; } else { if (ret == -ENOMEM) @@ -1389,7 +1398,8 @@ out: int dirty; spin_lock(&ci->i_ceph_lock); ci->i_inline_version = CEPH_INLINE_NONE; - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, + &prealloc_cf); spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); @@ -1398,6 +1408,8 @@ out: dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %d\n", inode, off, len, ceph_cap_string(got), ret); ceph_put_cap_refs(ci, got); +out_free: + ceph_free_cap_flush(prealloc_cf); return ret; } @@ -1509,8 +1521,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ceph_vino(inode), 0, &len, 0, 1, CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, - ci->i_snap_realm->cached_context, - 0, 0, false); + ceph_empty_snapc, 0, 0, false); if (IS_ERR(req)) { err = PTR_ERR(req); goto out; @@ -1528,7 +1539,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) ceph_vino(inode), 0, &len, 1, 3, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, - ci->i_snap_realm->cached_context, + ceph_empty_snapc, ci->i_truncate_seq, ci->i_truncate_size, false); if (IS_ERR(req)) { @@ -1597,3 +1608,206 @@ int ceph_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_ops = &ceph_vmops; return 0; } + +enum { + POOL_READ = 1, + POOL_WRITE = 2, +}; + +static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool) +{ + struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); + struct ceph_mds_client *mdsc = fsc->mdsc; + struct ceph_osd_request *rd_req = NULL, *wr_req = NULL; + struct rb_node **p, *parent; + struct ceph_pool_perm *perm; + struct page **pages; + int err = 0, err2 = 0, have = 0; + + down_read(&mdsc->pool_perm_rwsem); + p = &mdsc->pool_perm_tree.rb_node; + while (*p) { + perm = rb_entry(*p, struct ceph_pool_perm, node); + if (pool < perm->pool) + p = &(*p)->rb_left; + else if (pool > perm->pool) + p = &(*p)->rb_right; + else { + have = perm->perm; + break; + } + } + up_read(&mdsc->pool_perm_rwsem); + if (*p) + goto out; + + dout("__ceph_pool_perm_get pool %u no perm cached\n", pool); + + down_write(&mdsc->pool_perm_rwsem); + parent = NULL; + while (*p) { + parent = *p; + perm = rb_entry(parent, struct ceph_pool_perm, node); + if (pool < perm->pool) + p = &(*p)->rb_left; + else if (pool > perm->pool) + p = &(*p)->rb_right; + else { + have = perm->perm; + break; + } + } + if (*p) { + up_write(&mdsc->pool_perm_rwsem); + goto out; + } + + rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, + ceph_empty_snapc, + 1, false, GFP_NOFS); + if (!rd_req) { + err = -ENOMEM; + goto out_unlock; + } + + rd_req->r_flags = CEPH_OSD_FLAG_READ; + osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0); + rd_req->r_base_oloc.pool = pool; + snprintf(rd_req->r_base_oid.name, sizeof(rd_req->r_base_oid.name), + "%llx.00000000", ci->i_vino.ino); + rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name); + + wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, + ceph_empty_snapc, + 1, false, GFP_NOFS); + if (!wr_req) { + err = -ENOMEM; + goto out_unlock; + } + + wr_req->r_flags = CEPH_OSD_FLAG_WRITE | + CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK; + osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL); + wr_req->r_base_oloc.pool = pool; + wr_req->r_base_oid = rd_req->r_base_oid; + + /* one page should be large enough for STAT data */ + pages = ceph_alloc_page_vector(1, GFP_KERNEL); + if (IS_ERR(pages)) { + err = PTR_ERR(pages); + goto out_unlock; + } + + osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE, + 0, false, true); + ceph_osdc_build_request(rd_req, 0, NULL, CEPH_NOSNAP, + &ci->vfs_inode.i_mtime); + err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false); + + ceph_osdc_build_request(wr_req, 0, NULL, CEPH_NOSNAP, + &ci->vfs_inode.i_mtime); + err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false); + + if (!err) + err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req); + if (!err2) + err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req); + + if (err >= 0 || err == -ENOENT) + have |= POOL_READ; + else if (err != -EPERM) + goto out_unlock; + + if (err2 == 0 || err2 == -EEXIST) + have |= POOL_WRITE; + else if (err2 != -EPERM) { + err = err2; + goto out_unlock; + } + + perm = kmalloc(sizeof(*perm), GFP_NOFS); + if (!perm) { + err = -ENOMEM; + goto out_unlock; + } + + perm->pool = pool; + perm->perm = have; + rb_link_node(&perm->node, parent, p); + rb_insert_color(&perm->node, &mdsc->pool_perm_tree); + err = 0; +out_unlock: + up_write(&mdsc->pool_perm_rwsem); + + if (rd_req) + ceph_osdc_put_request(rd_req); + if (wr_req) + ceph_osdc_put_request(wr_req); +out: + if (!err) + err = have; + dout("__ceph_pool_perm_get pool %u result = %d\n", pool, err); + return err; +} + +int ceph_pool_perm_check(struct ceph_inode_info *ci, int need) +{ + u32 pool; + int ret, flags; + + if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode), + NOPOOLPERM)) + return 0; + + spin_lock(&ci->i_ceph_lock); + flags = ci->i_ceph_flags; + pool = ceph_file_layout_pg_pool(ci->i_layout); + spin_unlock(&ci->i_ceph_lock); +check: + if (flags & CEPH_I_POOL_PERM) { + if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) { + dout("ceph_pool_perm_check pool %u no read perm\n", + pool); + return -EPERM; + } + if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) { + dout("ceph_pool_perm_check pool %u no write perm\n", + pool); + return -EPERM; + } + return 0; + } + + ret = __ceph_pool_perm_get(ci, pool); + if (ret < 0) + return ret; + + flags = CEPH_I_POOL_PERM; + if (ret & POOL_READ) + flags |= CEPH_I_POOL_RD; + if (ret & POOL_WRITE) + flags |= CEPH_I_POOL_WR; + + spin_lock(&ci->i_ceph_lock); + if (pool == ceph_file_layout_pg_pool(ci->i_layout)) { + ci->i_ceph_flags = flags; + } else { + pool = ceph_file_layout_pg_pool(ci->i_layout); + flags = ci->i_ceph_flags; + } + spin_unlock(&ci->i_ceph_lock); + goto check; +} + +void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc) +{ + struct ceph_pool_perm *perm; + struct rb_node *n; + + while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) { + n = rb_first(&mdsc->pool_perm_tree); + perm = rb_entry(n, struct ceph_pool_perm, node); + rb_erase(n, &mdsc->pool_perm_tree); + kfree(perm); + } +} diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index be5ea6af8366..dc10c9dd36c1 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -833,7 +833,9 @@ int __ceph_caps_used(struct ceph_inode_info *ci) used |= CEPH_CAP_PIN; if (ci->i_rd_ref) used |= CEPH_CAP_FILE_RD; - if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages) + if (ci->i_rdcache_ref || + (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */ + ci->vfs_inode.i_data.nrpages)) used |= CEPH_CAP_FILE_CACHE; if (ci->i_wr_ref) used |= CEPH_CAP_FILE_WR; @@ -926,16 +928,6 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) /* remove from session list */ spin_lock(&session->s_cap_lock); - /* - * s_cap_reconnect is protected by s_cap_lock. no one changes - * s_cap_gen while session is in the reconnect state. - */ - if (queue_release && - (!session->s_cap_reconnect || - cap->cap_gen == session->s_cap_gen)) - __queue_cap_release(session, ci->i_vino.ino, cap->cap_id, - cap->mseq, cap->issue_seq); - if (session->s_cap_iterator == cap) { /* not yet, we are iterating over this very cap */ dout("__ceph_remove_cap delaying %p removal from session %p\n", @@ -948,6 +940,25 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) } /* protect backpointer with s_cap_lock: see iterate_session_caps */ cap->ci = NULL; + + /* + * s_cap_reconnect is protected by s_cap_lock. no one changes + * s_cap_gen while session is in the reconnect state. + */ + if (queue_release && + (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) { + cap->queue_release = 1; + if (removed) { + list_add_tail(&cap->session_caps, + &session->s_cap_releases); + session->s_num_cap_releases++; + removed = 0; + } + } else { + cap->queue_release = 0; + } + cap->cap_ino = ci->i_vino.ino; + spin_unlock(&session->s_cap_lock); /* remove from inode list */ @@ -977,8 +988,8 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) static int send_cap_msg(struct ceph_mds_session *session, u64 ino, u64 cid, int op, int caps, int wanted, int dirty, - u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq, - u64 size, u64 max_size, + u32 seq, u64 flush_tid, u64 oldest_flush_tid, + u32 issue_seq, u32 mseq, u64 size, u64 max_size, struct timespec *mtime, struct timespec *atime, u64 time_warp_seq, kuid_t uid, kgid_t gid, umode_t mode, @@ -992,20 +1003,23 @@ static int send_cap_msg(struct ceph_mds_session *session, size_t extra_len; dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s" - " seq %u/%u mseq %u follows %lld size %llu/%llu" + " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu" " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op), cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted), ceph_cap_string(dirty), - seq, issue_seq, mseq, follows, size, max_size, + seq, issue_seq, flush_tid, oldest_flush_tid, + mseq, follows, size, max_size, xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0); - /* flock buffer size + inline version + inline data size */ - extra_len = 4 + 8 + 4; + /* flock buffer size + inline version + inline data size + + * osd_epoch_barrier + oldest_flush_tid */ + extra_len = 4 + 8 + 4 + 4 + 8; msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len, GFP_NOFS, false); if (!msg) return -ENOMEM; + msg->hdr.version = cpu_to_le16(6); msg->hdr.tid = cpu_to_le64(flush_tid); fc = msg->front.iov_base; @@ -1041,6 +1055,10 @@ static int send_cap_msg(struct ceph_mds_session *session, ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE); /* inline data size */ ceph_encode_32(&p, 0); + /* osd_epoch_barrier */ + ceph_encode_32(&p, 0); + /* oldest_flush_tid */ + ceph_encode_64(&p, oldest_flush_tid); fc->xattr_version = cpu_to_le64(xattr_version); if (xattrs_buf) { @@ -1053,44 +1071,6 @@ static int send_cap_msg(struct ceph_mds_session *session, return 0; } -void __queue_cap_release(struct ceph_mds_session *session, - u64 ino, u64 cap_id, u32 migrate_seq, - u32 issue_seq) -{ - struct ceph_msg *msg; - struct ceph_mds_cap_release *head; - struct ceph_mds_cap_item *item; - - BUG_ON(!session->s_num_cap_releases); - msg = list_first_entry(&session->s_cap_releases, - struct ceph_msg, list_head); - - dout(" adding %llx release to mds%d msg %p (%d left)\n", - ino, session->s_mds, msg, session->s_num_cap_releases); - - BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); - head = msg->front.iov_base; - le32_add_cpu(&head->num, 1); - item = msg->front.iov_base + msg->front.iov_len; - item->ino = cpu_to_le64(ino); - item->cap_id = cpu_to_le64(cap_id); - item->migrate_seq = cpu_to_le32(migrate_seq); - item->seq = cpu_to_le32(issue_seq); - - session->s_num_cap_releases--; - - msg->front.iov_len += sizeof(*item); - if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { - dout(" release msg %p full\n", msg); - list_move_tail(&msg->list_head, &session->s_cap_releases_done); - } else { - dout(" release msg %p at %d/%d (%d)\n", msg, - (int)le32_to_cpu(head->num), - (int)CEPH_CAPS_PER_RELEASE, - (int)msg->front.iov_len); - } -} - /* * Queue cap releases when an inode is dropped from our cache. Since * inode is about to be destroyed, there is no need for i_ceph_lock. @@ -1127,7 +1107,7 @@ void ceph_queue_caps_release(struct inode *inode) */ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, int op, int used, int want, int retain, int flushing, - unsigned *pflush_tid) + u64 flush_tid, u64 oldest_flush_tid) __releases(cap->ci->i_ceph_lock) { struct ceph_inode_info *ci = cap->ci; @@ -1145,8 +1125,6 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, u64 xattr_version = 0; struct ceph_buffer *xattr_blob = NULL; int delayed = 0; - u64 flush_tid = 0; - int i; int ret; bool inline_data; @@ -1190,26 +1168,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, cap->implemented &= cap->issued | used; cap->mds_wanted = want; - if (flushing) { - /* - * assign a tid for flush operations so we can avoid - * flush1 -> dirty1 -> flush2 -> flushack1 -> mark - * clean type races. track latest tid for every bit - * so we can handle flush AxFw, flush Fw, and have the - * first ack clean Ax. - */ - flush_tid = ++ci->i_cap_flush_last_tid; - if (pflush_tid) - *pflush_tid = flush_tid; - dout(" cap_flush_tid %d\n", (int)flush_tid); - for (i = 0; i < CEPH_CAP_BITS; i++) - if (flushing & (1 << i)) - ci->i_cap_flush_tid[i] = flush_tid; - - follows = ci->i_head_snapc->seq; - } else { - follows = 0; - } + follows = flushing ? ci->i_head_snapc->seq : 0; keep = cap->implemented; seq = cap->seq; @@ -1237,7 +1196,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, spin_unlock(&ci->i_ceph_lock); ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id, - op, keep, want, flushing, seq, flush_tid, issue_seq, mseq, + op, keep, want, flushing, seq, + flush_tid, oldest_flush_tid, issue_seq, mseq, size, max_size, &mtime, &atime, time_warp_seq, uid, gid, mode, xattr_version, xattr_blob, follows, inline_data); @@ -1259,14 +1219,14 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, * asynchronously back to the MDS once sync writes complete and dirty * data is written out. * - * Unless @again is true, skip cap_snaps that were already sent to + * Unless @kick is true, skip cap_snaps that were already sent to * the MDS (i.e., during this session). * * Called under i_ceph_lock. Takes s_mutex as needed. */ void __ceph_flush_snaps(struct ceph_inode_info *ci, struct ceph_mds_session **psession, - int again) + int kick) __releases(ci->i_ceph_lock) __acquires(ci->i_ceph_lock) { @@ -1297,11 +1257,8 @@ retry: if (capsnap->dirty_pages || capsnap->writing) break; - /* - * if cap writeback already occurred, we should have dropped - * the capsnap in ceph_put_wrbuffer_cap_refs. - */ - BUG_ON(capsnap->dirty == 0); + /* should be removed by ceph_try_drop_cap_snap() */ + BUG_ON(!capsnap->need_flush); /* pick mds, take s_mutex */ if (ci->i_auth_cap == NULL) { @@ -1310,7 +1267,7 @@ retry: } /* only flush each capsnap once */ - if (!again && !list_empty(&capsnap->flushing_item)) { + if (!kick && !list_empty(&capsnap->flushing_item)) { dout("already flushed %p, skipping\n", capsnap); continue; } @@ -1320,6 +1277,9 @@ retry: if (session && session->s_mds != mds) { dout("oops, wrong session %p mutex\n", session); + if (kick) + goto out; + mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); session = NULL; @@ -1343,20 +1303,22 @@ retry: goto retry; } - capsnap->flush_tid = ++ci->i_cap_flush_last_tid; + spin_lock(&mdsc->cap_dirty_lock); + capsnap->flush_tid = ++mdsc->last_cap_flush_tid; + spin_unlock(&mdsc->cap_dirty_lock); + atomic_inc(&capsnap->nref); - if (!list_empty(&capsnap->flushing_item)) - list_del_init(&capsnap->flushing_item); - list_add_tail(&capsnap->flushing_item, - &session->s_cap_snaps_flushing); + if (list_empty(&capsnap->flushing_item)) + list_add_tail(&capsnap->flushing_item, + &session->s_cap_snaps_flushing); spin_unlock(&ci->i_ceph_lock); dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n", inode, capsnap, capsnap->follows, capsnap->flush_tid); send_cap_msg(session, ceph_vino(inode).ino, 0, CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0, - capsnap->dirty, 0, capsnap->flush_tid, 0, mseq, - capsnap->size, 0, + capsnap->dirty, 0, capsnap->flush_tid, 0, + 0, mseq, capsnap->size, 0, &capsnap->mtime, &capsnap->atime, capsnap->time_warp_seq, capsnap->uid, capsnap->gid, capsnap->mode, @@ -1396,7 +1358,8 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci) * Caller is then responsible for calling __mark_inode_dirty with the * returned flags value. */ -int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) +int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, + struct ceph_cap_flush **pcf) { struct ceph_mds_client *mdsc = ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; @@ -1416,9 +1379,14 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) ceph_cap_string(was | mask)); ci->i_dirty_caps |= mask; if (was == 0) { - if (!ci->i_head_snapc) + WARN_ON_ONCE(ci->i_prealloc_cap_flush); + swap(ci->i_prealloc_cap_flush, *pcf); + + if (!ci->i_head_snapc) { + WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem)); ci->i_head_snapc = ceph_get_snap_context( ci->i_snap_realm->cached_context); + } dout(" inode %p now dirty snapc %p auth cap %p\n", &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); BUG_ON(!list_empty(&ci->i_dirty_item)); @@ -1429,6 +1397,8 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) ihold(inode); dirty |= I_DIRTY_SYNC; } + } else { + WARN_ON_ONCE(!ci->i_prealloc_cap_flush); } BUG_ON(list_empty(&ci->i_dirty_item)); if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && @@ -1438,6 +1408,74 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) return dirty; } +static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci, + struct ceph_cap_flush *cf) +{ + struct rb_node **p = &ci->i_cap_flush_tree.rb_node; + struct rb_node *parent = NULL; + struct ceph_cap_flush *other = NULL; + + while (*p) { + parent = *p; + other = rb_entry(parent, struct ceph_cap_flush, i_node); + + if (cf->tid < other->tid) + p = &(*p)->rb_left; + else if (cf->tid > other->tid) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&cf->i_node, parent, p); + rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree); +} + +static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc, + struct ceph_cap_flush *cf) +{ + struct rb_node **p = &mdsc->cap_flush_tree.rb_node; + struct rb_node *parent = NULL; + struct ceph_cap_flush *other = NULL; + + while (*p) { + parent = *p; + other = rb_entry(parent, struct ceph_cap_flush, g_node); + + if (cf->tid < other->tid) + p = &(*p)->rb_left; + else if (cf->tid > other->tid) + p = &(*p)->rb_right; + else + BUG(); + } + + rb_link_node(&cf->g_node, parent, p); + rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree); +} + +struct ceph_cap_flush *ceph_alloc_cap_flush(void) +{ + return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL); +} + +void ceph_free_cap_flush(struct ceph_cap_flush *cf) +{ + if (cf) + kmem_cache_free(ceph_cap_flush_cachep, cf); +} + +static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc) +{ + struct rb_node *n = rb_first(&mdsc->cap_flush_tree); + if (n) { + struct ceph_cap_flush *cf = + rb_entry(n, struct ceph_cap_flush, g_node); + return cf->tid; + } + return 0; +} + /* * Add dirty inode to the flushing list. Assigned a seq number so we * can wait for caps to flush without starving. @@ -1445,14 +1483,17 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) * Called under i_ceph_lock. */ static int __mark_caps_flushing(struct inode *inode, - struct ceph_mds_session *session) + struct ceph_mds_session *session, + u64 *flush_tid, u64 *oldest_flush_tid) { struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_cap_flush *cf = NULL; int flushing; BUG_ON(ci->i_dirty_caps == 0); BUG_ON(list_empty(&ci->i_dirty_item)); + BUG_ON(!ci->i_prealloc_cap_flush); flushing = ci->i_dirty_caps; dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", @@ -1463,22 +1504,31 @@ static int __mark_caps_flushing(struct inode *inode, ci->i_dirty_caps = 0; dout(" inode %p now !dirty\n", inode); + swap(cf, ci->i_prealloc_cap_flush); + cf->caps = flushing; + cf->kick = false; + spin_lock(&mdsc->cap_dirty_lock); list_del_init(&ci->i_dirty_item); + cf->tid = ++mdsc->last_cap_flush_tid; + __add_cap_flushing_to_mdsc(mdsc, cf); + *oldest_flush_tid = __get_oldest_flush_tid(mdsc); + if (list_empty(&ci->i_flushing_item)) { - ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); mdsc->num_cap_flushing++; - dout(" inode %p now flushing seq %lld\n", inode, - ci->i_cap_flush_seq); + dout(" inode %p now flushing tid %llu\n", inode, cf->tid); } else { list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); - dout(" inode %p now flushing (more) seq %lld\n", inode, - ci->i_cap_flush_seq); + dout(" inode %p now flushing (more) tid %llu\n", + inode, cf->tid); } spin_unlock(&mdsc->cap_dirty_lock); + __add_cap_flushing_to_inode(ci, cf); + + *flush_tid = cf->tid; return flushing; } @@ -1524,6 +1574,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, struct ceph_mds_client *mdsc = fsc->mdsc; struct inode *inode = &ci->vfs_inode; struct ceph_cap *cap; + u64 flush_tid, oldest_flush_tid; int file_wanted, used, cap_used; int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ int issued, implemented, want, retain, revoking, flushing = 0; @@ -1553,13 +1604,13 @@ retry: retry_locked: file_wanted = __ceph_caps_file_wanted(ci); used = __ceph_caps_used(ci); - want = file_wanted | used; issued = __ceph_caps_issued(ci, &implemented); revoking = implemented & ~issued; - retain = want | CEPH_CAP_PIN; + want = file_wanted; + retain = file_wanted | used | CEPH_CAP_PIN; if (!mdsc->stopping && inode->i_nlink > 0) { - if (want) { + if (file_wanted) { retain |= CEPH_CAP_ANY; /* be greedy */ } else if (S_ISDIR(inode->i_mode) && (issued & CEPH_CAP_FILE_SHARED) && @@ -1602,9 +1653,10 @@ retry_locked: * If we fail, it's because pages are locked.... try again later. */ if ((!is_delayed || mdsc->stopping) && - ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ - inode->i_data.nrpages && /* have cached pages */ - (file_wanted == 0 || /* no open files */ + !S_ISDIR(inode->i_mode) && /* ignore readdir cache */ + ci->i_wrbuffer_ref == 0 && /* no dirty pages... */ + inode->i_data.nrpages && /* have cached pages */ + (file_wanted == 0 || /* no open files */ (revoking & (CEPH_CAP_FILE_CACHE| CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */ !tried_invalidate) { @@ -1742,17 +1794,25 @@ ack: took_snap_rwsem = 1; } - if (cap == ci->i_auth_cap && ci->i_dirty_caps) - flushing = __mark_caps_flushing(inode, session); - else + if (cap == ci->i_auth_cap && ci->i_dirty_caps) { + flushing = __mark_caps_flushing(inode, session, + &flush_tid, + &oldest_flush_tid); + } else { flushing = 0; + flush_tid = 0; + spin_lock(&mdsc->cap_dirty_lock); + oldest_flush_tid = __get_oldest_flush_tid(mdsc); + spin_unlock(&mdsc->cap_dirty_lock); + } mds = cap->mds; /* remember mds, so we don't repeat */ sent++; /* __send_cap drops i_ceph_lock */ delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used, - want, retain, flushing, NULL); + want, retain, flushing, + flush_tid, oldest_flush_tid); goto retry; /* retake i_ceph_lock and restart our cap scan. */ } @@ -1781,12 +1841,13 @@ ack: /* * Try to flush dirty caps back to the auth mds. */ -static int try_flush_caps(struct inode *inode, unsigned *flush_tid) +static int try_flush_caps(struct inode *inode, u64 *ptid) { struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_inode_info *ci = ceph_inode(inode); - int flushing = 0; struct ceph_mds_session *session = NULL; + int flushing = 0; + u64 flush_tid = 0, oldest_flush_tid = 0; retry: spin_lock(&ci->i_ceph_lock); @@ -1811,42 +1872,54 @@ retry: if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) goto out; - flushing = __mark_caps_flushing(inode, session); + flushing = __mark_caps_flushing(inode, session, &flush_tid, + &oldest_flush_tid); /* __send_cap drops i_ceph_lock */ delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want, - cap->issued | cap->implemented, flushing, - flush_tid); - if (!delayed) - goto out_unlocked; + (cap->issued | cap->implemented), + flushing, flush_tid, oldest_flush_tid); - spin_lock(&ci->i_ceph_lock); - __cap_delay_requeue(mdsc, ci); + if (delayed) { + spin_lock(&ci->i_ceph_lock); + __cap_delay_requeue(mdsc, ci); + spin_unlock(&ci->i_ceph_lock); + } + } else { + struct rb_node *n = rb_last(&ci->i_cap_flush_tree); + if (n) { + struct ceph_cap_flush *cf = + rb_entry(n, struct ceph_cap_flush, i_node); + flush_tid = cf->tid; + } + flushing = ci->i_flushing_caps; + spin_unlock(&ci->i_ceph_lock); } out: - spin_unlock(&ci->i_ceph_lock); -out_unlocked: if (session) mutex_unlock(&session->s_mutex); + + *ptid = flush_tid; return flushing; } /* * Return true if we've flushed caps through the given flush_tid. */ -static int caps_are_flushed(struct inode *inode, unsigned tid) +static int caps_are_flushed(struct inode *inode, u64 flush_tid) { struct ceph_inode_info *ci = ceph_inode(inode); - int i, ret = 1; + struct ceph_cap_flush *cf; + struct rb_node *n; + int ret = 1; spin_lock(&ci->i_ceph_lock); - for (i = 0; i < CEPH_CAP_BITS; i++) - if ((ci->i_flushing_caps & (1 << i)) && - ci->i_cap_flush_tid[i] <= tid) { - /* still flushing this bit */ + n = rb_first(&ci->i_cap_flush_tree); + if (n) { + cf = rb_entry(n, struct ceph_cap_flush, i_node); + if (cf->tid <= flush_tid) ret = 0; - break; - } + } spin_unlock(&ci->i_ceph_lock); return ret; } @@ -1864,13 +1937,16 @@ static void sync_write_wait(struct inode *inode) struct ceph_osd_request *req; u64 last_tid; + if (!S_ISREG(inode->i_mode)) + return; + spin_lock(&ci->i_unsafe_lock); if (list_empty(head)) goto out; /* set upper bound as _last_ entry in chain */ - req = list_entry(head->prev, struct ceph_osd_request, - r_unsafe_item); + req = list_last_entry(head, struct ceph_osd_request, + r_unsafe_item); last_tid = req->r_tid; do { @@ -1888,18 +1964,64 @@ static void sync_write_wait(struct inode *inode) */ if (list_empty(head)) break; - req = list_entry(head->next, struct ceph_osd_request, - r_unsafe_item); + req = list_first_entry(head, struct ceph_osd_request, + r_unsafe_item); } while (req->r_tid < last_tid); out: spin_unlock(&ci->i_unsafe_lock); } +/* + * wait for any uncommitted directory operations to commit. + */ +static int unsafe_dirop_wait(struct inode *inode) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + struct list_head *head = &ci->i_unsafe_dirops; + struct ceph_mds_request *req; + u64 last_tid; + int ret = 0; + + if (!S_ISDIR(inode->i_mode)) + return 0; + + spin_lock(&ci->i_unsafe_lock); + if (list_empty(head)) + goto out; + + req = list_last_entry(head, struct ceph_mds_request, + r_unsafe_dir_item); + last_tid = req->r_tid; + + do { + ceph_mdsc_get_request(req); + spin_unlock(&ci->i_unsafe_lock); + + dout("unsafe_dirop_wait %p wait on tid %llu (until %llu)\n", + inode, req->r_tid, last_tid); + ret = !wait_for_completion_timeout(&req->r_safe_completion, + ceph_timeout_jiffies(req->r_timeout)); + if (ret) + ret = -EIO; /* timed out */ + + ceph_mdsc_put_request(req); + + spin_lock(&ci->i_unsafe_lock); + if (ret || list_empty(head)) + break; + req = list_first_entry(head, struct ceph_mds_request, + r_unsafe_dir_item); + } while (req->r_tid < last_tid); +out: + spin_unlock(&ci->i_unsafe_lock); + return ret; +} + int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) { struct inode *inode = file->f_mapping->host; struct ceph_inode_info *ci = ceph_inode(inode); - unsigned flush_tid; + u64 flush_tid; int ret; int dirty; @@ -1908,25 +2030,30 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret < 0) - return ret; + goto out; + + if (datasync) + goto out; + mutex_lock(&inode->i_mutex); dirty = try_flush_caps(inode, &flush_tid); dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); + ret = unsafe_dirop_wait(inode); + /* * only wait on non-file metadata writeback (the mds * can recover size and mtime, so we don't need to * wait for that) */ - if (!datasync && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { - dout("fsync waiting for flush_tid %u\n", flush_tid); + if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { ret = wait_event_interruptible(ci->i_cap_wq, - caps_are_flushed(inode, flush_tid)); + caps_are_flushed(inode, flush_tid)); } - - dout("fsync %p%s done\n", inode, datasync ? " datasync" : ""); mutex_unlock(&inode->i_mutex); +out: + dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret); return ret; } @@ -1939,7 +2066,7 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) { struct ceph_inode_info *ci = ceph_inode(inode); - unsigned flush_tid; + u64 flush_tid; int err = 0; int dirty; int wait = wbc->sync_mode == WB_SYNC_ALL; @@ -1994,6 +2121,104 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc, } } +static int __kick_flushing_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + struct ceph_inode_info *ci, + bool kick_all) +{ + struct inode *inode = &ci->vfs_inode; + struct ceph_cap *cap; + struct ceph_cap_flush *cf; + struct rb_node *n; + int delayed = 0; + u64 first_tid = 0; + u64 oldest_flush_tid; + + spin_lock(&mdsc->cap_dirty_lock); + oldest_flush_tid = __get_oldest_flush_tid(mdsc); + spin_unlock(&mdsc->cap_dirty_lock); + + while (true) { + spin_lock(&ci->i_ceph_lock); + cap = ci->i_auth_cap; + if (!(cap && cap->session == session)) { + pr_err("%p auth cap %p not mds%d ???\n", inode, + cap, session->s_mds); + spin_unlock(&ci->i_ceph_lock); + break; + } + + for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { + cf = rb_entry(n, struct ceph_cap_flush, i_node); + if (cf->tid < first_tid) + continue; + if (kick_all || cf->kick) + break; + } + if (!n) { + spin_unlock(&ci->i_ceph_lock); + break; + } + + cf = rb_entry(n, struct ceph_cap_flush, i_node); + cf->kick = false; + + first_tid = cf->tid + 1; + + dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode, + cap, cf->tid, ceph_cap_string(cf->caps)); + delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, + __ceph_caps_used(ci), + __ceph_caps_wanted(ci), + cap->issued | cap->implemented, + cf->caps, cf->tid, oldest_flush_tid); + } + return delayed; +} + +void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) +{ + struct ceph_inode_info *ci; + struct ceph_cap *cap; + struct ceph_cap_flush *cf; + struct rb_node *n; + + dout("early_kick_flushing_caps mds%d\n", session->s_mds); + list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { + spin_lock(&ci->i_ceph_lock); + cap = ci->i_auth_cap; + if (!(cap && cap->session == session)) { + pr_err("%p auth cap %p not mds%d ???\n", + &ci->vfs_inode, cap, session->s_mds); + spin_unlock(&ci->i_ceph_lock); + continue; + } + + + /* + * if flushing caps were revoked, we re-send the cap flush + * in client reconnect stage. This guarantees MDS * processes + * the cap flush message before issuing the flushing caps to + * other client. + */ + if ((cap->issued & ci->i_flushing_caps) != + ci->i_flushing_caps) { + spin_unlock(&ci->i_ceph_lock); + if (!__kick_flushing_caps(mdsc, session, ci, true)) + continue; + spin_lock(&ci->i_ceph_lock); + } + + for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { + cf = rb_entry(n, struct ceph_cap_flush, i_node); + cf->kick = true; + } + + spin_unlock(&ci->i_ceph_lock); + } +} + void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { @@ -2003,28 +2228,10 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, dout("kick_flushing_caps mds%d\n", session->s_mds); list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { - struct inode *inode = &ci->vfs_inode; - struct ceph_cap *cap; - int delayed = 0; - - spin_lock(&ci->i_ceph_lock); - cap = ci->i_auth_cap; - if (cap && cap->session == session) { - dout("kick_flushing_caps %p cap %p %s\n", inode, - cap, ceph_cap_string(ci->i_flushing_caps)); - delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, - __ceph_caps_used(ci), - __ceph_caps_wanted(ci), - cap->issued | cap->implemented, - ci->i_flushing_caps, NULL); - if (delayed) { - spin_lock(&ci->i_ceph_lock); - __cap_delay_requeue(mdsc, ci); - spin_unlock(&ci->i_ceph_lock); - } - } else { - pr_err("%p auth cap %p not mds%d ???\n", inode, - cap, session->s_mds); + int delayed = __kick_flushing_caps(mdsc, session, ci, false); + if (delayed) { + spin_lock(&ci->i_ceph_lock); + __cap_delay_requeue(mdsc, ci); spin_unlock(&ci->i_ceph_lock); } } @@ -2036,26 +2243,25 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_cap *cap; - int delayed = 0; spin_lock(&ci->i_ceph_lock); cap = ci->i_auth_cap; - dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode, - ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq); + dout("kick_flushing_inode_caps %p flushing %s\n", inode, + ceph_cap_string(ci->i_flushing_caps)); __ceph_flush_snaps(ci, &session, 1); if (ci->i_flushing_caps) { + int delayed; + spin_lock(&mdsc->cap_dirty_lock); list_move_tail(&ci->i_flushing_item, &cap->session->s_cap_flushing); spin_unlock(&mdsc->cap_dirty_lock); - delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, - __ceph_caps_used(ci), - __ceph_caps_wanted(ci), - cap->issued | cap->implemented, - ci->i_flushing_caps, NULL); + spin_unlock(&ci->i_ceph_lock); + + delayed = __kick_flushing_caps(mdsc, session, ci, true); if (delayed) { spin_lock(&ci->i_ceph_lock); __cap_delay_requeue(mdsc, ci); @@ -2073,7 +2279,8 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, * * Protected by i_ceph_lock. */ -static void __take_cap_refs(struct ceph_inode_info *ci, int got) +static void __take_cap_refs(struct ceph_inode_info *ci, int got, + bool snap_rwsem_locked) { if (got & CEPH_CAP_PIN) ci->i_pin_ref++; @@ -2081,8 +2288,14 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) ci->i_rd_ref++; if (got & CEPH_CAP_FILE_CACHE) ci->i_rdcache_ref++; - if (got & CEPH_CAP_FILE_WR) + if (got & CEPH_CAP_FILE_WR) { + if (ci->i_wr_ref == 0 && !ci->i_head_snapc) { + BUG_ON(!snap_rwsem_locked); + ci->i_head_snapc = ceph_get_snap_context( + ci->i_snap_realm->cached_context); + } ci->i_wr_ref++; + } if (got & CEPH_CAP_FILE_BUFFER) { if (ci->i_wb_ref == 0) ihold(&ci->vfs_inode); @@ -2100,16 +2313,19 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) * requested from the MDS. */ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, - loff_t endoff, int *got, int *check_max, int *err) + loff_t endoff, bool nonblock, int *got, int *err) { struct inode *inode = &ci->vfs_inode; + struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; int ret = 0; int have, implemented; int file_wanted; + bool snap_rwsem_locked = false; dout("get_cap_refs %p need %s want %s\n", inode, ceph_cap_string(need), ceph_cap_string(want)); +again: spin_lock(&ci->i_ceph_lock); /* make sure file is actually open */ @@ -2125,6 +2341,10 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, /* finish pending truncate */ while (ci->i_truncate_pending) { spin_unlock(&ci->i_ceph_lock); + if (snap_rwsem_locked) { + up_read(&mdsc->snap_rwsem); + snap_rwsem_locked = false; + } __ceph_do_pending_vmtruncate(inode); spin_lock(&ci->i_ceph_lock); } @@ -2136,7 +2356,7 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, dout("get_cap_refs %p endoff %llu > maxsize %llu\n", inode, endoff, ci->i_max_size); if (endoff > ci->i_requested_max_size) { - *check_max = 1; + *err = -EAGAIN; ret = 1; } goto out_unlock; @@ -2164,8 +2384,29 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, inode, ceph_cap_string(have), ceph_cap_string(not), ceph_cap_string(revoking)); if ((revoking & not) == 0) { + if (!snap_rwsem_locked && + !ci->i_head_snapc && + (need & CEPH_CAP_FILE_WR)) { + if (!down_read_trylock(&mdsc->snap_rwsem)) { + /* + * we can not call down_read() when + * task isn't in TASK_RUNNING state + */ + if (nonblock) { + *err = -EAGAIN; + ret = 1; + goto out_unlock; + } + + spin_unlock(&ci->i_ceph_lock); + down_read(&mdsc->snap_rwsem); + snap_rwsem_locked = true; + goto again; + } + snap_rwsem_locked = true; + } *got = need | (have & want); - __take_cap_refs(ci, *got); + __take_cap_refs(ci, *got, true); ret = 1; } } else { @@ -2189,6 +2430,8 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, } out_unlock: spin_unlock(&ci->i_ceph_lock); + if (snap_rwsem_locked) + up_read(&mdsc->snap_rwsem); dout("get_cap_refs %p ret %d got %s\n", inode, ret, ceph_cap_string(*got)); @@ -2231,50 +2474,70 @@ static void check_max_size(struct inode *inode, loff_t endoff) int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, loff_t endoff, int *got, struct page **pinned_page) { - int _got, check_max, ret, err = 0; + int _got, ret, err = 0; -retry: - if (endoff > 0) - check_max_size(&ci->vfs_inode, endoff); - _got = 0; - check_max = 0; - ret = wait_event_interruptible(ci->i_cap_wq, - try_get_cap_refs(ci, need, want, endoff, - &_got, &check_max, &err)); - if (err) - ret = err; + ret = ceph_pool_perm_check(ci, need); if (ret < 0) return ret; - if (check_max) - goto retry; + while (true) { + if (endoff > 0) + check_max_size(&ci->vfs_inode, endoff); - if (ci->i_inline_version != CEPH_INLINE_NONE && - (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && - i_size_read(&ci->vfs_inode) > 0) { - struct page *page = find_get_page(ci->vfs_inode.i_mapping, 0); - if (page) { - if (PageUptodate(page)) { - *pinned_page = page; - goto out; - } - page_cache_release(page); - } - /* - * drop cap refs first because getattr while holding - * caps refs can cause deadlock. - */ - ceph_put_cap_refs(ci, _got); + err = 0; _got = 0; + ret = try_get_cap_refs(ci, need, want, endoff, + false, &_got, &err); + if (ret) { + if (err == -EAGAIN) + continue; + if (err < 0) + return err; + } else { + ret = wait_event_interruptible(ci->i_cap_wq, + try_get_cap_refs(ci, need, want, endoff, + true, &_got, &err)); + if (err == -EAGAIN) + continue; + if (err < 0) + ret = err; + if (ret < 0) + return ret; + } - /* getattr request will bring inline data into page cache */ - ret = __ceph_do_getattr(&ci->vfs_inode, NULL, - CEPH_STAT_CAP_INLINE_DATA, true); - if (ret < 0) - return ret; - goto retry; + if (ci->i_inline_version != CEPH_INLINE_NONE && + (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && + i_size_read(&ci->vfs_inode) > 0) { + struct page *page = + find_get_page(ci->vfs_inode.i_mapping, 0); + if (page) { + if (PageUptodate(page)) { + *pinned_page = page; + break; + } + page_cache_release(page); + } + /* + * drop cap refs first because getattr while + * holding * caps refs can cause deadlock. + */ + ceph_put_cap_refs(ci, _got); + _got = 0; + + /* + * getattr request will bring inline data into + * page cache + */ + ret = __ceph_do_getattr(&ci->vfs_inode, NULL, + CEPH_STAT_CAP_INLINE_DATA, + true); + if (ret < 0) + return ret; + continue; + } + break; } -out: + *got = _got; return 0; } @@ -2286,10 +2549,31 @@ out: void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) { spin_lock(&ci->i_ceph_lock); - __take_cap_refs(ci, caps); + __take_cap_refs(ci, caps, false); spin_unlock(&ci->i_ceph_lock); } + +/* + * drop cap_snap that is not associated with any snapshot. + * we don't need to send FLUSHSNAP message for it. + */ +static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap) +{ + if (!capsnap->need_flush && + !capsnap->writing && !capsnap->dirty_pages) { + + dout("dropping cap_snap %p follows %llu\n", + capsnap, capsnap->follows); + ceph_put_snap_context(capsnap->context); + list_del(&capsnap->ci_item); + list_del(&capsnap->flushing_item); + ceph_put_cap_snap(capsnap); + return 1; + } + return 0; +} + /* * Release cap refs. * @@ -2303,7 +2587,6 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) { struct inode *inode = &ci->vfs_inode; int last = 0, put = 0, flushsnaps = 0, wake = 0; - struct ceph_cap_snap *capsnap; spin_lock(&ci->i_ceph_lock); if (had & CEPH_CAP_PIN) @@ -2325,17 +2608,24 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) if (had & CEPH_CAP_FILE_WR) if (--ci->i_wr_ref == 0) { last++; - if (!list_empty(&ci->i_cap_snaps)) { - capsnap = list_first_entry(&ci->i_cap_snaps, - struct ceph_cap_snap, - ci_item); - if (capsnap->writing) { - capsnap->writing = 0; - flushsnaps = - __ceph_finish_cap_snap(ci, - capsnap); - wake = 1; - } + if (__ceph_have_pending_cap_snap(ci)) { + struct ceph_cap_snap *capsnap = + list_last_entry(&ci->i_cap_snaps, + struct ceph_cap_snap, + ci_item); + capsnap->writing = 0; + if (ceph_try_drop_cap_snap(capsnap)) + put++; + else if (__ceph_finish_cap_snap(ci, capsnap)) + flushsnaps = 1; + wake = 1; + } + if (ci->i_wrbuffer_ref_head == 0 && + ci->i_dirty_caps == 0 && + ci->i_flushing_caps == 0) { + BUG_ON(!ci->i_head_snapc); + ceph_put_snap_context(ci->i_head_snapc); + ci->i_head_snapc = NULL; } /* see comment in __ceph_remove_cap() */ if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) @@ -2352,7 +2642,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) ceph_flush_snaps(ci); if (wake) wake_up_all(&ci->i_cap_wq); - if (put) + while (put-- > 0) iput(inode); } @@ -2380,7 +2670,9 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, if (ci->i_head_snapc == snapc) { ci->i_wrbuffer_ref_head -= nr; if (ci->i_wrbuffer_ref_head == 0 && - ci->i_dirty_caps == 0 && ci->i_flushing_caps == 0) { + ci->i_wr_ref == 0 && + ci->i_dirty_caps == 0 && + ci->i_flushing_caps == 0) { BUG_ON(!ci->i_head_snapc); ceph_put_snap_context(ci->i_head_snapc); ci->i_head_snapc = NULL; @@ -2401,25 +2693,15 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, capsnap->dirty_pages -= nr; if (capsnap->dirty_pages == 0) { complete_capsnap = 1; - if (capsnap->dirty == 0) - /* cap writeback completed before we created - * the cap_snap; no FLUSHSNAP is needed */ - drop_capsnap = 1; + drop_capsnap = ceph_try_drop_cap_snap(capsnap); } dout("put_wrbuffer_cap_refs on %p cap_snap %p " - " snap %lld %d/%d -> %d/%d %s%s%s\n", + " snap %lld %d/%d -> %d/%d %s%s\n", inode, capsnap, capsnap->context->seq, ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, ci->i_wrbuffer_ref, capsnap->dirty_pages, last ? " (wrbuffer last)" : "", - complete_capsnap ? " (complete capsnap)" : "", - drop_capsnap ? " (drop capsnap)" : ""); - if (drop_capsnap) { - ceph_put_snap_context(capsnap->context); - list_del(&capsnap->ci_item); - list_del(&capsnap->flushing_item); - ceph_put_cap_snap(capsnap); - } + complete_capsnap ? " (complete capsnap)" : ""); } spin_unlock(&ci->i_ceph_lock); @@ -2526,7 +2808,8 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, * try to invalidate (once). (If there are dirty buffers, we * will invalidate _after_ writeback.) */ - if (((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && + if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */ + ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && !ci->i_wrbuffer_ref) { if (try_nonblocking_invalidate(inode)) { @@ -2732,16 +3015,29 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, { struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; + struct ceph_cap_flush *cf; + struct rb_node *n; + LIST_HEAD(to_remove); unsigned seq = le32_to_cpu(m->seq); int dirty = le32_to_cpu(m->dirty); int cleaned = 0; int drop = 0; - int i; - for (i = 0; i < CEPH_CAP_BITS; i++) - if ((dirty & (1 << i)) && - (u16)flush_tid == ci->i_cap_flush_tid[i]) - cleaned |= 1 << i; + n = rb_first(&ci->i_cap_flush_tree); + while (n) { + cf = rb_entry(n, struct ceph_cap_flush, i_node); + n = rb_next(&cf->i_node); + if (cf->tid == flush_tid) + cleaned = cf->caps; + if (cf->tid <= flush_tid) { + rb_erase(&cf->i_node, &ci->i_cap_flush_tree); + list_add_tail(&cf->list, &to_remove); + } else { + cleaned &= ~cf->caps; + if (!cleaned) + break; + } + } dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," " flushing %s -> %s\n", @@ -2749,12 +3045,23 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), ceph_cap_string(ci->i_flushing_caps & ~cleaned)); - if (ci->i_flushing_caps == (ci->i_flushing_caps & ~cleaned)) + if (list_empty(&to_remove) && !cleaned) goto out; ci->i_flushing_caps &= ~cleaned; spin_lock(&mdsc->cap_dirty_lock); + + if (!list_empty(&to_remove)) { + list_for_each_entry(cf, &to_remove, list) + rb_erase(&cf->g_node, &mdsc->cap_flush_tree); + + n = rb_first(&mdsc->cap_flush_tree); + cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL; + if (!cf || cf->tid > flush_tid) + wake_up_all(&mdsc->cap_flushing_wq); + } + if (ci->i_flushing_caps == 0) { list_del_init(&ci->i_flushing_item); if (!list_empty(&session->s_cap_flushing)) @@ -2764,14 +3071,14 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, struct ceph_inode_info, i_flushing_item)->vfs_inode); mdsc->num_cap_flushing--; - wake_up_all(&mdsc->cap_flushing_wq); dout(" inode %p now !flushing\n", inode); if (ci->i_dirty_caps == 0) { dout(" inode %p now clean\n", inode); BUG_ON(!list_empty(&ci->i_dirty_item)); drop = 1; - if (ci->i_wrbuffer_ref_head == 0) { + if (ci->i_wr_ref == 0 && + ci->i_wrbuffer_ref_head == 0) { BUG_ON(!ci->i_head_snapc); ceph_put_snap_context(ci->i_head_snapc); ci->i_head_snapc = NULL; @@ -2785,6 +3092,13 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, out: spin_unlock(&ci->i_ceph_lock); + + while (!list_empty(&to_remove)) { + cf = list_first_entry(&to_remove, + struct ceph_cap_flush, list); + list_del(&cf->list); + ceph_free_cap_flush(cf); + } if (drop) iput(inode); } @@ -2800,6 +3114,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, struct ceph_mds_session *session) { struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; u64 follows = le64_to_cpu(m->snap_follows); struct ceph_cap_snap *capsnap; int drop = 0; @@ -2823,6 +3138,7 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, list_del(&capsnap->ci_item); list_del(&capsnap->flushing_item); ceph_put_cap_snap(capsnap); + wake_up_all(&mdsc->cap_flushing_wq); drop = 1; break; } else { @@ -2971,7 +3287,6 @@ retry: mutex_lock_nested(&session->s_mutex, SINGLE_DEPTH_NESTING); } - ceph_add_cap_releases(mdsc, tsession); new_cap = ceph_get_cap(mdsc, NULL); } else { WARN_ON(1); @@ -3167,16 +3482,20 @@ void ceph_handle_caps(struct ceph_mds_session *session, dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, (unsigned)seq); - if (op == CEPH_CAP_OP_IMPORT) - ceph_add_cap_releases(mdsc, session); - if (!inode) { dout(" i don't have ino %llx\n", vino.ino); if (op == CEPH_CAP_OP_IMPORT) { + cap = ceph_get_cap(mdsc, NULL); + cap->cap_ino = vino.ino; + cap->queue_release = 1; + cap->cap_id = cap_id; + cap->mseq = mseq; + cap->seq = seq; spin_lock(&session->s_cap_lock); - __queue_cap_release(session, vino.ino, cap_id, - mseq, seq); + list_add_tail(&cap->session_caps, + &session->s_cap_releases); + session->s_num_cap_releases++; spin_unlock(&session->s_cap_lock); } goto flush_cap_releases; @@ -3252,11 +3571,10 @@ void ceph_handle_caps(struct ceph_mds_session *session, flush_cap_releases: /* - * send any full release message to try to move things + * send any cap release message to try to move things * along for the mds (who clearly thinks we still have this * cap). */ - ceph_add_cap_releases(mdsc, session); ceph_send_cap_releases(mdsc, session); done: diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 4248307fea90..9314b4ea2375 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -38,7 +38,7 @@ int ceph_init_dentry(struct dentry *dentry) if (dentry->d_fsdata) return 0; - di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); + di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO); if (!di) return -ENOMEM; /* oh well */ @@ -107,6 +107,27 @@ static int fpos_cmp(loff_t l, loff_t r) } /* + * make note of the last dentry we read, so we can + * continue at the same lexicographical point, + * regardless of what dir changes take place on the + * server. + */ +static int note_last_dentry(struct ceph_file_info *fi, const char *name, + int len, unsigned next_offset) +{ + char *buf = kmalloc(len+1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + kfree(fi->last_name); + fi->last_name = buf; + memcpy(fi->last_name, name, len); + fi->last_name[len] = 0; + fi->next_offset = next_offset; + dout("note_last_dentry '%s'\n", fi->last_name); + return 0; +} + +/* * When possible, we try to satisfy a readdir by peeking at the * dcache. We make this work by carefully ordering dentries on * d_child when we initially get results back from the MDS, and @@ -123,123 +144,113 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, struct ceph_file_info *fi = file->private_data; struct dentry *parent = file->f_path.dentry; struct inode *dir = d_inode(parent); - struct list_head *p; - struct dentry *dentry, *last; + struct dentry *dentry, *last = NULL; struct ceph_dentry_info *di; + unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *); int err = 0; + loff_t ptr_pos = 0; + struct ceph_readdir_cache_control cache_ctl = {}; - /* claim ref on last dentry we returned */ - last = fi->dentry; - fi->dentry = NULL; - - dout("__dcache_readdir %p v%u at %llu (last %p)\n", - dir, shared_gen, ctx->pos, last); + dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos); - spin_lock(&parent->d_lock); - - /* start at beginning? */ - if (ctx->pos == 2 || last == NULL || - fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) { - if (list_empty(&parent->d_subdirs)) - goto out_unlock; - p = parent->d_subdirs.prev; - dout(" initial p %p/%p\n", p->prev, p->next); - } else { - p = last->d_child.prev; + /* we can calculate cache index for the first dirfrag */ + if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) { + cache_ctl.index = fpos_off(ctx->pos) - 2; + BUG_ON(cache_ctl.index < 0); + ptr_pos = cache_ctl.index * sizeof(struct dentry *); } -more: - dentry = list_entry(p, struct dentry, d_child); - di = ceph_dentry(dentry); - while (1) { - dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, - d_unhashed(dentry) ? "!hashed" : "hashed", - parent->d_subdirs.prev, parent->d_subdirs.next); - if (p == &parent->d_subdirs) { + while (true) { + pgoff_t pgoff; + bool emit_dentry; + + if (ptr_pos >= i_size_read(dir)) { fi->flags |= CEPH_F_ATEND; - goto out_unlock; + err = 0; + break; + } + + err = -EAGAIN; + pgoff = ptr_pos >> PAGE_CACHE_SHIFT; + if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) { + ceph_readdir_cache_release(&cache_ctl); + cache_ctl.page = find_lock_page(&dir->i_data, pgoff); + if (!cache_ctl.page) { + dout(" page %lu not found\n", pgoff); + break; + } + /* reading/filling the cache are serialized by + * i_mutex, no need to use page lock */ + unlock_page(cache_ctl.page); + cache_ctl.dentries = kmap(cache_ctl.page); } - spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); + + rcu_read_lock(); + spin_lock(&parent->d_lock); + /* check i_size again here, because empty directory can be + * marked as complete while not holding the i_mutex. */ + if (ceph_dir_is_complete_ordered(dir) && + ptr_pos < i_size_read(dir)) + dentry = cache_ctl.dentries[cache_ctl.index % nsize]; + else + dentry = NULL; + spin_unlock(&parent->d_lock); + if (dentry && !lockref_get_not_dead(&dentry->d_lockref)) + dentry = NULL; + rcu_read_unlock(); + if (!dentry) + break; + + emit_dentry = false; + di = ceph_dentry(dentry); + spin_lock(&dentry->d_lock); if (di->lease_shared_gen == shared_gen && - !d_unhashed(dentry) && d_really_is_positive(dentry) && + d_really_is_positive(dentry) && ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR && ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH && - fpos_cmp(ctx->pos, di->offset) <= 0) - break; - dout(" skipping %p %pd at %llu (%llu)%s%s\n", dentry, - dentry, di->offset, - ctx->pos, d_unhashed(dentry) ? " unhashed" : "", - !d_inode(dentry) ? " null" : ""); + fpos_cmp(ctx->pos, di->offset) <= 0) { + emit_dentry = true; + } spin_unlock(&dentry->d_lock); - p = p->prev; - dentry = list_entry(p, struct dentry, d_child); - di = ceph_dentry(dentry); - } - - dget_dlock(dentry); - spin_unlock(&dentry->d_lock); - spin_unlock(&parent->d_lock); - /* make sure a dentry wasn't dropped while we didn't have parent lock */ - if (!ceph_dir_is_complete_ordered(dir)) { - dout(" lost dir complete on %p; falling back to mds\n", dir); - dput(dentry); - err = -EAGAIN; - goto out; - } + if (emit_dentry) { + dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos, + dentry, dentry, d_inode(dentry)); + ctx->pos = di->offset; + if (!dir_emit(ctx, dentry->d_name.name, + dentry->d_name.len, + ceph_translate_ino(dentry->d_sb, + d_inode(dentry)->i_ino), + d_inode(dentry)->i_mode >> 12)) { + dput(dentry); + err = 0; + break; + } + ctx->pos++; - dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos, - dentry, dentry, d_inode(dentry)); - if (!dir_emit(ctx, dentry->d_name.name, - dentry->d_name.len, - ceph_translate_ino(dentry->d_sb, d_inode(dentry)->i_ino), - d_inode(dentry)->i_mode >> 12)) { - if (last) { - /* remember our position */ - fi->dentry = last; - fi->next_offset = fpos_off(di->offset); + if (last) + dput(last); + last = dentry; + } else { + dput(dentry); } - dput(dentry); - return 0; - } - - ctx->pos = di->offset + 1; - if (last) - dput(last); - last = dentry; - - spin_lock(&parent->d_lock); - p = p->prev; /* advance to next dentry */ - goto more; - -out_unlock: - spin_unlock(&parent->d_lock); -out: - if (last) + cache_ctl.index++; + ptr_pos += sizeof(struct dentry *); + } + ceph_readdir_cache_release(&cache_ctl); + if (last) { + int ret; + di = ceph_dentry(last); + ret = note_last_dentry(fi, last->d_name.name, last->d_name.len, + fpos_off(di->offset) + 1); + if (ret < 0) + err = ret; dput(last); + } return err; } -/* - * make note of the last dentry we read, so we can - * continue at the same lexicographical point, - * regardless of what dir changes take place on the - * server. - */ -static int note_last_dentry(struct ceph_file_info *fi, const char *name, - int len) -{ - kfree(fi->last_name); - fi->last_name = kmalloc(len+1, GFP_NOFS); - if (!fi->last_name) - return -ENOMEM; - memcpy(fi->last_name, name, len); - fi->last_name[len] = 0; - dout("note_last_dentry '%s'\n", fi->last_name); - return 0; -} - static int ceph_readdir(struct file *file, struct dir_context *ctx) { struct ceph_file_info *fi = file->private_data; @@ -280,8 +291,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) /* can we use the dcache? */ spin_lock(&ci->i_ceph_lock); - if ((ctx->pos == 2 || fi->dentry) && - ceph_test_mount_opt(fsc, DCACHE) && + if (ceph_test_mount_opt(fsc, DCACHE) && !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && ceph_snap(inode) != CEPH_SNAPDIR && __ceph_dir_is_complete_ordered(ci) && @@ -296,24 +306,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) } else { spin_unlock(&ci->i_ceph_lock); } - if (fi->dentry) { - err = note_last_dentry(fi, fi->dentry->d_name.name, - fi->dentry->d_name.len); - if (err) - return err; - dput(fi->dentry); - fi->dentry = NULL; - } /* proceed with a normal readdir */ - - if (ctx->pos == 2) { - /* note dir version at start of readdir so we can tell - * if any dentries get dropped */ - fi->dir_release_count = atomic_read(&ci->i_release_count); - fi->dir_ordered_count = ci->i_ordered_count; - } - more: /* do we have the correct frag content buffered? */ if (fi->frag != frag || fi->last_readdir == NULL) { @@ -342,12 +336,15 @@ more: req->r_direct_hash = ceph_frag_value(frag); req->r_direct_is_hash = true; if (fi->last_name) { - req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); + req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL); if (!req->r_path2) { ceph_mdsc_put_request(req); return -ENOMEM; } } + req->r_dir_release_cnt = fi->dir_release_count; + req->r_dir_ordered_cnt = fi->dir_ordered_count; + req->r_readdir_cache_idx = fi->readdir_cache_idx; req->r_readdir_offset = fi->next_offset; req->r_args.readdir.frag = cpu_to_le32(frag); @@ -364,26 +361,38 @@ more: (int)req->r_reply_info.dir_end, (int)req->r_reply_info.dir_complete); - if (!req->r_did_prepopulate) { - dout("readdir !did_prepopulate"); - /* preclude from marking dir complete */ - fi->dir_release_count--; - } /* note next offset and last dentry name */ rinfo = &req->r_reply_info; if (le32_to_cpu(rinfo->dir_dir->frag) != frag) { frag = le32_to_cpu(rinfo->dir_dir->frag); - if (ceph_frag_is_leftmost(frag)) - fi->next_offset = 2; - else - fi->next_offset = 0; - off = fi->next_offset; + off = req->r_readdir_offset; + fi->next_offset = off; } + fi->frag = frag; fi->offset = fi->next_offset; fi->last_readdir = req; + if (req->r_did_prepopulate) { + fi->readdir_cache_idx = req->r_readdir_cache_idx; + if (fi->readdir_cache_idx < 0) { + /* preclude from marking dir ordered */ + fi->dir_ordered_count = 0; + } else if (ceph_frag_is_leftmost(frag) && off == 2) { + /* note dir version at start of readdir so + * we can tell if any dentries get dropped */ + fi->dir_release_count = req->r_dir_release_cnt; + fi->dir_ordered_count = req->r_dir_ordered_cnt; + } + } else { + dout("readdir !did_prepopulate"); + /* disable readdir cache */ + fi->readdir_cache_idx = -1; + /* preclude from marking dir complete */ + fi->dir_release_count = 0; + } + if (req->r_reply_info.dir_end) { kfree(fi->last_name); fi->last_name = NULL; @@ -394,10 +403,10 @@ more: } else { err = note_last_dentry(fi, rinfo->dir_dname[rinfo->dir_nr-1], - rinfo->dir_dname_len[rinfo->dir_nr-1]); + rinfo->dir_dname_len[rinfo->dir_nr-1], + fi->next_offset + rinfo->dir_nr); if (err) return err; - fi->next_offset += rinfo->dir_nr; } } @@ -453,16 +462,22 @@ more: * were released during the whole readdir, and we should have * the complete dir contents in our cache. */ - spin_lock(&ci->i_ceph_lock); - if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { - if (ci->i_ordered_count == fi->dir_ordered_count) + if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) { + spin_lock(&ci->i_ceph_lock); + if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) { dout(" marking %p complete and ordered\n", inode); - else + /* use i_size to track number of entries in + * readdir cache */ + BUG_ON(fi->readdir_cache_idx < 0); + i_size_write(inode, fi->readdir_cache_idx * + sizeof(struct dentry*)); + } else { dout(" marking %p complete\n", inode); + } __ceph_dir_set_complete(ci, fi->dir_release_count, fi->dir_ordered_count); + spin_unlock(&ci->i_ceph_lock); } - spin_unlock(&ci->i_ceph_lock); dout("readdir %p file %p done.\n", inode, file); return 0; @@ -476,14 +491,12 @@ static void reset_readdir(struct ceph_file_info *fi, unsigned frag) } kfree(fi->last_name); fi->last_name = NULL; + fi->dir_release_count = 0; + fi->readdir_cache_idx = -1; if (ceph_frag_is_leftmost(frag)) fi->next_offset = 2; /* compensate for . and .. */ else fi->next_offset = 0; - if (fi->dentry) { - dput(fi->dentry); - fi->dentry = NULL; - } fi->flags &= ~CEPH_F_ATEND; } @@ -497,13 +510,12 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) mutex_lock(&inode->i_mutex); retval = -EINVAL; switch (whence) { - case SEEK_END: - offset += inode->i_size + 2; /* FIXME */ - break; case SEEK_CUR: offset += file->f_pos; case SEEK_SET: break; + case SEEK_END: + retval = -EOPNOTSUPP; default: goto out; } @@ -516,20 +528,18 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) } retval = offset; - /* - * discard buffered readdir content on seekdir(0), or - * seek to new frag, or seek prior to current chunk. - */ if (offset == 0 || fpos_frag(offset) != fi->frag || fpos_off(offset) < fi->offset) { + /* discard buffered readdir content on seekdir(0), or + * seek to new frag, or seek prior to current chunk */ dout("dir_llseek dropping %p content\n", file); reset_readdir(fi, fpos_frag(offset)); + } else if (fpos_cmp(offset, old_offset) > 0) { + /* reset dir_release_count if we did a forward seek */ + fi->dir_release_count = 0; + fi->readdir_cache_idx = -1; } - - /* bump dir_release_count if we did a forward seek */ - if (fpos_cmp(offset, old_offset) > 0) - fi->dir_release_count--; } out: mutex_unlock(&inode->i_mutex); @@ -764,7 +774,7 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, err = PTR_ERR(req); goto out; } - req->r_path2 = kstrdup(dest, GFP_NOFS); + req->r_path2 = kstrdup(dest, GFP_KERNEL); if (!req->r_path2) { err = -ENOMEM; ceph_mdsc_put_request(req); @@ -985,16 +995,15 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, * to do it here. */ + /* d_move screws up sibling dentries' offsets */ + ceph_dir_clear_complete(old_dir); + ceph_dir_clear_complete(new_dir); + d_move(old_dentry, new_dentry); /* ensure target dentry is invalidated, despite rehashing bug in vfs_rename_dir */ ceph_invalidate_dentry_lease(new_dentry); - - /* d_move screws up sibling dentries' offsets */ - ceph_dir_clear_complete(old_dir); - ceph_dir_clear_complete(new_dir); - } ceph_mdsc_put_request(req); return err; @@ -1189,7 +1198,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, return -EISDIR; if (!cf->dir_info) { - cf->dir_info = kmalloc(bufsize, GFP_NOFS); + cf->dir_info = kmalloc(bufsize, GFP_KERNEL); if (!cf->dir_info) return -ENOMEM; cf->dir_info_len = @@ -1224,66 +1233,6 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, } /* - * an fsync() on a dir will wait for any uncommitted directory - * operations to commit. - */ -static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end, - int datasync) -{ - struct inode *inode = file_inode(file); - struct ceph_inode_info *ci = ceph_inode(inode); - struct list_head *head = &ci->i_unsafe_dirops; - struct ceph_mds_request *req; - u64 last_tid; - int ret = 0; - - dout("dir_fsync %p\n", inode); - ret = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (ret) - return ret; - mutex_lock(&inode->i_mutex); - - spin_lock(&ci->i_unsafe_lock); - if (list_empty(head)) - goto out; - - req = list_entry(head->prev, - struct ceph_mds_request, r_unsafe_dir_item); - last_tid = req->r_tid; - - do { - ceph_mdsc_get_request(req); - spin_unlock(&ci->i_unsafe_lock); - - dout("dir_fsync %p wait on tid %llu (until %llu)\n", - inode, req->r_tid, last_tid); - if (req->r_timeout) { - unsigned long time_left = wait_for_completion_timeout( - &req->r_safe_completion, - req->r_timeout); - if (time_left > 0) - ret = 0; - else - ret = -EIO; /* timed out */ - } else { - wait_for_completion(&req->r_safe_completion); - } - ceph_mdsc_put_request(req); - - spin_lock(&ci->i_unsafe_lock); - if (ret || list_empty(head)) - break; - req = list_entry(head->next, - struct ceph_mds_request, r_unsafe_dir_item); - } while (req->r_tid < last_tid); -out: - spin_unlock(&ci->i_unsafe_lock); - mutex_unlock(&inode->i_mutex); - - return ret; -} - -/* * We maintain a private dentry LRU. * * FIXME: this needs to be changed to a per-mds lru to be useful. @@ -1353,7 +1302,7 @@ const struct file_operations ceph_dir_fops = { .open = ceph_open, .release = ceph_release, .unlocked_ioctl = ceph_ioctl, - .fsync = ceph_dir_fsync, + .fsync = ceph_fsync, }; const struct file_operations ceph_snapdir_fops = { diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 3b6b522b4b31..faf92095e105 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -89,13 +89,14 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) case S_IFDIR: dout("init_file %p %p 0%o (regular)\n", inode, file, inode->i_mode); - cf = kmem_cache_alloc(ceph_file_cachep, GFP_NOFS | __GFP_ZERO); + cf = kmem_cache_alloc(ceph_file_cachep, GFP_KERNEL | __GFP_ZERO); if (cf == NULL) { ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */ return -ENOMEM; } cf->fmode = fmode; cf->next_offset = 2; + cf->readdir_cache_idx = -1; file->private_data = cf; BUG_ON(inode->i_fop->release != ceph_release); break; @@ -324,7 +325,6 @@ int ceph_release(struct inode *inode, struct file *file) ceph_mdsc_put_request(cf->last_readdir); kfree(cf->last_name); kfree(cf->dir_info); - dput(cf->dentry); kmem_cache_free(ceph_file_cachep, cf); /* wake up anyone waiting for caps on this inode */ @@ -483,7 +483,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i, } } else { num_pages = calc_pages_for(off, len); - pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); + pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) return PTR_ERR(pages); ret = striped_read(inode, off, len, pages, @@ -557,13 +557,13 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) * objects, rollback on failure, etc.) */ static ssize_t -ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) +ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, + struct ceph_snap_context *snapc) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_snap_context *snapc; struct ceph_vino vino; struct ceph_osd_request *req; struct page **pages; @@ -600,7 +600,6 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) size_t start; ssize_t n; - snapc = ci->i_snap_realm->cached_context; vino = ceph_vino(inode); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, pos, &len, 0, @@ -614,7 +613,7 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) break; } - osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); + osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); n = iov_iter_get_pages_alloc(from, &pages, len, &start); if (unlikely(n < 0)) { @@ -674,13 +673,13 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) * objects, rollback on failure, etc.) */ static ssize_t -ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) +ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, + struct ceph_snap_context *snapc) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); - struct ceph_snap_context *snapc; struct ceph_vino vino; struct ceph_osd_request *req; struct page **pages; @@ -717,7 +716,6 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) size_t left; int n; - snapc = ci->i_snap_realm->cached_context; vino = ceph_vino(inode); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, pos, &len, 0, 1, @@ -736,7 +734,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) */ num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - pages = ceph_alloc_page_vector(num_pages, GFP_NOFS); + pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) { ret = PTR_ERR(pages); goto out; @@ -860,7 +858,7 @@ again: struct page *page = NULL; loff_t i_size; if (retry_op == READ_INLINE) { - page = __page_cache_alloc(GFP_NOFS); + page = __page_cache_alloc(GFP_KERNEL); if (!page) return -ENOMEM; } @@ -941,6 +939,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->client->osdc; + struct ceph_cap_flush *prealloc_cf; ssize_t count, written = 0; int err, want, got; loff_t pos; @@ -948,6 +947,10 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; + prealloc_cf = ceph_alloc_cap_flush(); + if (!prealloc_cf) + return -ENOMEM; + mutex_lock(&inode->i_mutex); /* We can write back this queue in page reclaim */ @@ -996,14 +999,30 @@ retry_snap: if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) { + struct ceph_snap_context *snapc; struct iov_iter data; mutex_unlock(&inode->i_mutex); + + spin_lock(&ci->i_ceph_lock); + if (__ceph_have_pending_cap_snap(ci)) { + struct ceph_cap_snap *capsnap = + list_last_entry(&ci->i_cap_snaps, + struct ceph_cap_snap, + ci_item); + snapc = ceph_get_snap_context(capsnap->context); + } else { + BUG_ON(!ci->i_head_snapc); + snapc = ceph_get_snap_context(ci->i_head_snapc); + } + spin_unlock(&ci->i_ceph_lock); + /* we might need to revert back to that point */ data = *from; if (iocb->ki_flags & IOCB_DIRECT) - written = ceph_sync_direct_write(iocb, &data, pos); + written = ceph_sync_direct_write(iocb, &data, pos, + snapc); else - written = ceph_sync_write(iocb, &data, pos); + written = ceph_sync_write(iocb, &data, pos, snapc); if (written == -EOLDSNAPC) { dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n", @@ -1014,6 +1033,7 @@ retry_snap: } if (written > 0) iov_iter_advance(from, written); + ceph_put_snap_context(snapc); } else { loff_t old_size = inode->i_size; /* @@ -1035,7 +1055,8 @@ retry_snap: int dirty; spin_lock(&ci->i_ceph_lock); ci->i_inline_version = CEPH_INLINE_NONE; - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, + &prealloc_cf); spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); @@ -1059,6 +1080,7 @@ retry_snap: out: mutex_unlock(&inode->i_mutex); out_unlocked: + ceph_free_cap_flush(prealloc_cf); current->backing_dev_info = NULL; return written ? written : err; } @@ -1255,6 +1277,7 @@ static long ceph_fallocate(struct file *file, int mode, struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->client->osdc; + struct ceph_cap_flush *prealloc_cf; int want, got = 0; int dirty; int ret = 0; @@ -1267,6 +1290,10 @@ static long ceph_fallocate(struct file *file, int mode, if (!S_ISREG(inode->i_mode)) return -EOPNOTSUPP; + prealloc_cf = ceph_alloc_cap_flush(); + if (!prealloc_cf) + return -ENOMEM; + mutex_lock(&inode->i_mutex); if (ceph_snap(inode) != CEPH_NOSNAP) { @@ -1313,7 +1340,8 @@ static long ceph_fallocate(struct file *file, int mode, if (!ret) { spin_lock(&ci->i_ceph_lock); ci->i_inline_version = CEPH_INLINE_NONE; - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, + &prealloc_cf); spin_unlock(&ci->i_ceph_lock); if (dirty) __mark_inode_dirty(inode, dirty); @@ -1322,6 +1350,7 @@ static long ceph_fallocate(struct file *file, int mode, ceph_put_cap_refs(ci, got); unlock: mutex_unlock(&inode->i_mutex); + ceph_free_cap_flush(prealloc_cf); return ret; } diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 571acd88606c..96d2bd829902 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -389,9 +389,10 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_inline_version = 0; ci->i_time_warp_seq = 0; ci->i_ceph_flags = 0; - ci->i_ordered_count = 0; - atomic_set(&ci->i_release_count, 1); - atomic_set(&ci->i_complete_count, 0); + atomic64_set(&ci->i_ordered_count, 1); + atomic64_set(&ci->i_release_count, 1); + atomic64_set(&ci->i_complete_seq[0], 0); + atomic64_set(&ci->i_complete_seq[1], 0); ci->i_symlink = NULL; memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); @@ -415,9 +416,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_flushing_caps = 0; INIT_LIST_HEAD(&ci->i_dirty_item); INIT_LIST_HEAD(&ci->i_flushing_item); - ci->i_cap_flush_seq = 0; - ci->i_cap_flush_last_tid = 0; - memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid)); + ci->i_prealloc_cap_flush = NULL; + ci->i_cap_flush_tree = RB_ROOT; init_waitqueue_head(&ci->i_cap_wq); ci->i_hold_caps_min = 0; ci->i_hold_caps_max = 0; @@ -752,7 +752,10 @@ static int fill_inode(struct inode *inode, struct page *locked_page, if (new_version || (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { + if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool) + ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; ci->i_layout = info->layout; + queue_trunc = ceph_fill_file_size(inode, issued, le32_to_cpu(info->truncate_seq), le64_to_cpu(info->truncate_size), @@ -858,9 +861,10 @@ static int fill_inode(struct inode *inode, struct page *locked_page, (issued & CEPH_CAP_FILE_EXCL) == 0 && !__ceph_dir_is_complete(ci)) { dout(" marking %p complete (empty)\n", inode); + i_size_write(inode, 0); __ceph_dir_set_complete(ci, - atomic_read(&ci->i_release_count), - ci->i_ordered_count); + atomic64_read(&ci->i_release_count), + atomic64_read(&ci->i_ordered_count)); } wake = true; @@ -1212,6 +1216,10 @@ retry_lookup: dout("fill_trace doing d_move %p -> %p\n", req->r_old_dentry, dn); + /* d_move screws up sibling dentries' offsets */ + ceph_dir_clear_ordered(dir); + ceph_dir_clear_ordered(olddir); + d_move(req->r_old_dentry, dn); dout(" src %p '%pd' dst %p '%pd'\n", req->r_old_dentry, @@ -1222,10 +1230,6 @@ retry_lookup: rehashing bug in vfs_rename_dir */ ceph_invalidate_dentry_lease(dn); - /* d_move screws up sibling dentries' offsets */ - ceph_dir_clear_ordered(dir); - ceph_dir_clear_ordered(olddir); - dout("dn %p gets new offset %lld\n", req->r_old_dentry, ceph_dentry(req->r_old_dentry)->offset); @@ -1333,6 +1337,49 @@ static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req, return err; } +void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl) +{ + if (ctl->page) { + kunmap(ctl->page); + page_cache_release(ctl->page); + ctl->page = NULL; + } +} + +static int fill_readdir_cache(struct inode *dir, struct dentry *dn, + struct ceph_readdir_cache_control *ctl, + struct ceph_mds_request *req) +{ + struct ceph_inode_info *ci = ceph_inode(dir); + unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*); + unsigned idx = ctl->index % nsize; + pgoff_t pgoff = ctl->index / nsize; + + if (!ctl->page || pgoff != page_index(ctl->page)) { + ceph_readdir_cache_release(ctl); + ctl->page = grab_cache_page(&dir->i_data, pgoff); + if (!ctl->page) { + ctl->index = -1; + return -ENOMEM; + } + /* reading/filling the cache are serialized by + * i_mutex, no need to use page lock */ + unlock_page(ctl->page); + ctl->dentries = kmap(ctl->page); + } + + if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && + req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { + dout("readdir cache dn %p idx %d\n", dn, ctl->index); + ctl->dentries[idx] = dn; + ctl->index++; + } else { + dout("disable readdir cache\n"); + ctl->index = -1; + } + return 0; +} + int ceph_readdir_prepopulate(struct ceph_mds_request *req, struct ceph_mds_session *session) { @@ -1345,8 +1392,11 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, struct inode *snapdir = NULL; struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; struct ceph_dentry_info *di; - u64 r_readdir_offset = req->r_readdir_offset; u32 frag = le32_to_cpu(rhead->args.readdir.frag); + struct ceph_readdir_cache_control cache_ctl = {}; + + if (req->r_aborted) + return readdir_prepopulate_inodes_only(req, session); if (rinfo->dir_dir && le32_to_cpu(rinfo->dir_dir->frag) != frag) { @@ -1354,14 +1404,11 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, frag, le32_to_cpu(rinfo->dir_dir->frag)); frag = le32_to_cpu(rinfo->dir_dir->frag); if (ceph_frag_is_leftmost(frag)) - r_readdir_offset = 2; + req->r_readdir_offset = 2; else - r_readdir_offset = 0; + req->r_readdir_offset = 0; } - if (req->r_aborted) - return readdir_prepopulate_inodes_only(req, session); - if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { snapdir = ceph_get_snapdir(d_inode(parent)); parent = d_find_alias(snapdir); @@ -1374,6 +1421,17 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req, ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); } + if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2) { + /* note dir version at start of readdir so we can tell + * if any dentries get dropped */ + struct ceph_inode_info *ci = ceph_inode(d_inode(parent)); + req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); + req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count); + req->r_readdir_cache_idx = 0; + } + + cache_ctl.index = req->r_readdir_cache_idx; + /* FIXME: release caps/leases if error occurs */ for (i = 0; i < rinfo->dir_nr; i++) { struct ceph_vino vino; @@ -1413,13 +1471,6 @@ retry_lookup: d_delete(dn); dput(dn); goto retry_lookup; - } else { - /* reorder parent's d_subdirs */ - spin_lock(&parent->d_lock); - spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED); - list_move(&dn->d_child, &parent->d_subdirs); - spin_unlock(&dn->d_lock); - spin_unlock(&parent->d_lock); } /* inode */ @@ -1436,13 +1487,15 @@ retry_lookup: } } - if (fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session, - req->r_request_started, -1, - &req->r_caps_reservation) < 0) { + ret = fill_inode(in, NULL, &rinfo->dir_in[i], NULL, session, + req->r_request_started, -1, + &req->r_caps_reservation); + if (ret < 0) { pr_err("fill_inode badness on %p\n", in); if (d_really_is_negative(dn)) iput(in); d_drop(dn); + err = ret; goto next_item; } @@ -1458,19 +1511,28 @@ retry_lookup: } di = dn->d_fsdata; - di->offset = ceph_make_fpos(frag, i + r_readdir_offset); + di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset); update_dentry_lease(dn, rinfo->dir_dlease[i], req->r_session, req->r_request_started); + + if (err == 0 && cache_ctl.index >= 0) { + ret = fill_readdir_cache(d_inode(parent), dn, + &cache_ctl, req); + if (ret < 0) + err = ret; + } next_item: if (dn) dput(dn); } - if (err == 0) - req->r_did_prepopulate = true; - out: + if (err == 0) { + req->r_did_prepopulate = true; + req->r_readdir_cache_idx = cache_ctl.index; + } + ceph_readdir_cache_release(&cache_ctl); if (snapdir) { iput(snapdir); dput(parent); @@ -1712,11 +1774,13 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) const unsigned int ia_valid = attr->ia_valid; struct ceph_mds_request *req; struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; + struct ceph_cap_flush *prealloc_cf; int issued; int release = 0, dirtied = 0; int mask = 0; int err = 0; int inode_dirty_flags = 0; + bool lock_snap_rwsem = false; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; @@ -1725,13 +1789,31 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) if (err != 0) return err; + prealloc_cf = ceph_alloc_cap_flush(); + if (!prealloc_cf) + return -ENOMEM; + req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, USE_AUTH_MDS); - if (IS_ERR(req)) + if (IS_ERR(req)) { + ceph_free_cap_flush(prealloc_cf); return PTR_ERR(req); + } spin_lock(&ci->i_ceph_lock); issued = __ceph_caps_issued(ci, NULL); + + if (!ci->i_head_snapc && + (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) { + lock_snap_rwsem = true; + if (!down_read_trylock(&mdsc->snap_rwsem)) { + spin_unlock(&ci->i_ceph_lock); + down_read(&mdsc->snap_rwsem); + spin_lock(&ci->i_ceph_lock); + issued = __ceph_caps_issued(ci, NULL); + } + } + dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); if (ia_valid & ATTR_UID) { @@ -1874,12 +1956,15 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) dout("setattr %p ATTR_FILE ... hrm!\n", inode); if (dirtied) { - inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied); + inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, + &prealloc_cf); inode->i_ctime = CURRENT_TIME; } release &= issued; spin_unlock(&ci->i_ceph_lock); + if (lock_snap_rwsem) + up_read(&mdsc->snap_rwsem); if (inode_dirty_flags) __mark_inode_dirty(inode, inode_dirty_flags); @@ -1904,9 +1989,11 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) ceph_mdsc_put_request(req); if (mask & CEPH_SETATTR_SIZE) __ceph_do_pending_vmtruncate(inode); + ceph_free_cap_flush(prealloc_cf); return err; out_put: ceph_mdsc_put_request(req); + ceph_free_cap_flush(prealloc_cf); return err; } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 84f37f34f9aa..6aa07af67603 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -8,6 +8,7 @@ #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/utsname.h> +#include <linux/ratelimit.h> #include "super.h" #include "mds_client.h" @@ -458,7 +459,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, s->s_cap_reconnect = 0; s->s_cap_iterator = NULL; INIT_LIST_HEAD(&s->s_cap_releases); - INIT_LIST_HEAD(&s->s_cap_releases_done); INIT_LIST_HEAD(&s->s_cap_flushing); INIT_LIST_HEAD(&s->s_cap_snaps_flushing); @@ -629,6 +629,9 @@ static void __register_request(struct ceph_mds_client *mdsc, req->r_uid = current_fsuid(); req->r_gid = current_fsgid(); + if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK) + mdsc->oldest_tid = req->r_tid; + if (dir) { struct ceph_inode_info *ci = ceph_inode(dir); @@ -644,6 +647,21 @@ static void __unregister_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) { dout("__unregister_request %p tid %lld\n", req, req->r_tid); + + if (req->r_tid == mdsc->oldest_tid) { + struct rb_node *p = rb_next(&req->r_node); + mdsc->oldest_tid = 0; + while (p) { + struct ceph_mds_request *next_req = + rb_entry(p, struct ceph_mds_request, r_node); + if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) { + mdsc->oldest_tid = next_req->r_tid; + break; + } + p = rb_next(p); + } + } + rb_erase(&req->r_node, &mdsc->request_tree); RB_CLEAR_NODE(&req->r_node); @@ -998,27 +1016,25 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, * session caps */ -/* - * Free preallocated cap messages assigned to this session - */ -static void cleanup_cap_releases(struct ceph_mds_session *session) +/* caller holds s_cap_lock, we drop it */ +static void cleanup_cap_releases(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session) + __releases(session->s_cap_lock) { - struct ceph_msg *msg; + LIST_HEAD(tmp_list); + list_splice_init(&session->s_cap_releases, &tmp_list); + session->s_num_cap_releases = 0; + spin_unlock(&session->s_cap_lock); - spin_lock(&session->s_cap_lock); - while (!list_empty(&session->s_cap_releases)) { - msg = list_first_entry(&session->s_cap_releases, - struct ceph_msg, list_head); - list_del_init(&msg->list_head); - ceph_msg_put(msg); - } - while (!list_empty(&session->s_cap_releases_done)) { - msg = list_first_entry(&session->s_cap_releases_done, - struct ceph_msg, list_head); - list_del_init(&msg->list_head); - ceph_msg_put(msg); + dout("cleanup_cap_releases mds%d\n", session->s_mds); + while (!list_empty(&tmp_list)) { + struct ceph_cap *cap; + /* zero out the in-progress message */ + cap = list_first_entry(&tmp_list, + struct ceph_cap, session_caps); + list_del(&cap->session_caps); + ceph_put_cap(mdsc, cap); } - spin_unlock(&session->s_cap_lock); } static void cleanup_session_requests(struct ceph_mds_client *mdsc, @@ -1033,7 +1049,8 @@ static void cleanup_session_requests(struct ceph_mds_client *mdsc, req = list_first_entry(&session->s_unsafe, struct ceph_mds_request, r_unsafe_item); list_del_init(&req->r_unsafe_item); - pr_info(" dropping unsafe request %llu\n", req->r_tid); + pr_warn_ratelimited(" dropping unsafe request %llu\n", + req->r_tid); __unregister_request(mdsc, req); } /* zero r_attempts, so kick_requests() will re-send requests */ @@ -1095,10 +1112,16 @@ static int iterate_session_caps(struct ceph_mds_session *session, dout("iterate_session_caps finishing cap %p removal\n", cap); BUG_ON(cap->session != session); + cap->session = NULL; list_del_init(&cap->session_caps); session->s_nr_caps--; - cap->session = NULL; - old_cap = cap; /* put_cap it w/o locks held */ + if (cap->queue_release) { + list_add_tail(&cap->session_caps, + &session->s_cap_releases); + session->s_num_cap_releases++; + } else { + old_cap = cap; /* put_cap it w/o locks held */ + } } if (ret < 0) goto out; @@ -1119,6 +1142,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) { struct ceph_inode_info *ci = ceph_inode(inode); + LIST_HEAD(to_remove); int drop = 0; dout("removing cap %p, ci is %p, inode is %p\n", @@ -1126,12 +1150,27 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, spin_lock(&ci->i_ceph_lock); __ceph_remove_cap(cap, false); if (!ci->i_auth_cap) { + struct ceph_cap_flush *cf; struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; + while (true) { + struct rb_node *n = rb_first(&ci->i_cap_flush_tree); + if (!n) + break; + cf = rb_entry(n, struct ceph_cap_flush, i_node); + rb_erase(&cf->i_node, &ci->i_cap_flush_tree); + list_add(&cf->list, &to_remove); + } + spin_lock(&mdsc->cap_dirty_lock); + + list_for_each_entry(cf, &to_remove, list) + rb_erase(&cf->g_node, &mdsc->cap_flush_tree); + if (!list_empty(&ci->i_dirty_item)) { - pr_info(" dropping dirty %s state for %p %lld\n", + pr_warn_ratelimited( + " dropping dirty %s state for %p %lld\n", ceph_cap_string(ci->i_dirty_caps), inode, ceph_ino(inode)); ci->i_dirty_caps = 0; @@ -1139,7 +1178,8 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, drop = 1; } if (!list_empty(&ci->i_flushing_item)) { - pr_info(" dropping dirty+flushing %s state for %p %lld\n", + pr_warn_ratelimited( + " dropping dirty+flushing %s state for %p %lld\n", ceph_cap_string(ci->i_flushing_caps), inode, ceph_ino(inode)); ci->i_flushing_caps = 0; @@ -1148,8 +1188,20 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, drop = 1; } spin_unlock(&mdsc->cap_dirty_lock); + + if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { + list_add(&ci->i_prealloc_cap_flush->list, &to_remove); + ci->i_prealloc_cap_flush = NULL; + } } spin_unlock(&ci->i_ceph_lock); + while (!list_empty(&to_remove)) { + struct ceph_cap_flush *cf; + cf = list_first_entry(&to_remove, + struct ceph_cap_flush, list); + list_del(&cf->list); + ceph_free_cap_flush(cf); + } while (drop--) iput(inode); return 0; @@ -1191,11 +1243,12 @@ static void remove_session_caps(struct ceph_mds_session *session) spin_lock(&session->s_cap_lock); } } - spin_unlock(&session->s_cap_lock); + + // drop cap expires and unlock s_cap_lock + cleanup_cap_releases(session->s_mdsc, session); BUG_ON(session->s_nr_caps > 0); BUG_ON(!list_empty(&session->s_cap_flushing)); - cleanup_cap_releases(session); } /* @@ -1371,7 +1424,8 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), ceph_cap_string(used), ceph_cap_string(wanted)); if (cap == ci->i_auth_cap) { - if (ci->i_dirty_caps | ci->i_flushing_caps) + if (ci->i_dirty_caps || ci->i_flushing_caps || + !list_empty(&ci->i_cap_snaps)) goto out; if ((used | wanted) & CEPH_CAP_ANY_WR) goto out; @@ -1417,121 +1471,80 @@ static int trim_caps(struct ceph_mds_client *mdsc, session->s_trim_caps = 0; } - ceph_add_cap_releases(mdsc, session); ceph_send_cap_releases(mdsc, session); return 0; } -/* - * Allocate cap_release messages. If there is a partially full message - * in the queue, try to allocate enough to cover it's remainder, so that - * we can send it immediately. - * - * Called under s_mutex. - */ -int ceph_add_cap_releases(struct ceph_mds_client *mdsc, - struct ceph_mds_session *session) +static int check_capsnap_flush(struct ceph_inode_info *ci, + u64 want_snap_seq) { - struct ceph_msg *msg, *partial = NULL; - struct ceph_mds_cap_release *head; - int err = -ENOMEM; - int extra = mdsc->fsc->mount_options->cap_release_safety; - int num; - - dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds, - extra); - - spin_lock(&session->s_cap_lock); - - if (!list_empty(&session->s_cap_releases)) { - msg = list_first_entry(&session->s_cap_releases, - struct ceph_msg, - list_head); - head = msg->front.iov_base; - num = le32_to_cpu(head->num); - if (num) { - dout(" partial %p with (%d/%d)\n", msg, num, - (int)CEPH_CAPS_PER_RELEASE); - extra += CEPH_CAPS_PER_RELEASE - num; - partial = msg; - } - } - while (session->s_num_cap_releases < session->s_nr_caps + extra) { - spin_unlock(&session->s_cap_lock); - msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, - GFP_NOFS, false); - if (!msg) - goto out_unlocked; - dout("add_cap_releases %p msg %p now %d\n", session, msg, - (int)msg->front.iov_len); - head = msg->front.iov_base; - head->num = cpu_to_le32(0); - msg->front.iov_len = sizeof(*head); - spin_lock(&session->s_cap_lock); - list_add(&msg->list_head, &session->s_cap_releases); - session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE; - } - - if (partial) { - head = partial->front.iov_base; - num = le32_to_cpu(head->num); - dout(" queueing partial %p with %d/%d\n", partial, num, - (int)CEPH_CAPS_PER_RELEASE); - list_move_tail(&partial->list_head, - &session->s_cap_releases_done); - session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num; + int ret = 1; + spin_lock(&ci->i_ceph_lock); + if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) { + struct ceph_cap_snap *capsnap = + list_first_entry(&ci->i_cap_snaps, + struct ceph_cap_snap, ci_item); + ret = capsnap->follows >= want_snap_seq; } - err = 0; - spin_unlock(&session->s_cap_lock); -out_unlocked: - return err; + spin_unlock(&ci->i_ceph_lock); + return ret; } -static int check_cap_flush(struct inode *inode, u64 want_flush_seq) +static int check_caps_flush(struct ceph_mds_client *mdsc, + u64 want_flush_tid) { - struct ceph_inode_info *ci = ceph_inode(inode); - int ret; - spin_lock(&ci->i_ceph_lock); - if (ci->i_flushing_caps) - ret = ci->i_cap_flush_seq >= want_flush_seq; - else - ret = 1; - spin_unlock(&ci->i_ceph_lock); + struct rb_node *n; + struct ceph_cap_flush *cf; + int ret = 1; + + spin_lock(&mdsc->cap_dirty_lock); + n = rb_first(&mdsc->cap_flush_tree); + cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL; + if (cf && cf->tid <= want_flush_tid) { + dout("check_caps_flush still flushing tid %llu <= %llu\n", + cf->tid, want_flush_tid); + ret = 0; + } + spin_unlock(&mdsc->cap_dirty_lock); return ret; } /* * flush all dirty inode data to disk. * - * returns true if we've flushed through want_flush_seq + * returns true if we've flushed through want_flush_tid */ -static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) +static void wait_caps_flush(struct ceph_mds_client *mdsc, + u64 want_flush_tid, u64 want_snap_seq) { int mds; - dout("check_cap_flush want %lld\n", want_flush_seq); + dout("check_caps_flush want %llu snap want %llu\n", + want_flush_tid, want_snap_seq); mutex_lock(&mdsc->mutex); - for (mds = 0; mds < mdsc->max_sessions; mds++) { + for (mds = 0; mds < mdsc->max_sessions; ) { struct ceph_mds_session *session = mdsc->sessions[mds]; struct inode *inode = NULL; - if (!session) + if (!session) { + mds++; continue; + } get_session(session); mutex_unlock(&mdsc->mutex); mutex_lock(&session->s_mutex); - if (!list_empty(&session->s_cap_flushing)) { - struct ceph_inode_info *ci = - list_entry(session->s_cap_flushing.next, - struct ceph_inode_info, - i_flushing_item); - - if (!check_cap_flush(&ci->vfs_inode, want_flush_seq)) { - dout("check_cap_flush still flushing %p " - "seq %lld <= %lld to mds%d\n", - &ci->vfs_inode, ci->i_cap_flush_seq, - want_flush_seq, session->s_mds); + if (!list_empty(&session->s_cap_snaps_flushing)) { + struct ceph_cap_snap *capsnap = + list_first_entry(&session->s_cap_snaps_flushing, + struct ceph_cap_snap, + flushing_item); + struct ceph_inode_info *ci = capsnap->ci; + if (!check_capsnap_flush(ci, want_snap_seq)) { + dout("check_cap_flush still flushing snap %p " + "follows %lld <= %lld to mds%d\n", + &ci->vfs_inode, capsnap->follows, + want_snap_seq, mds); inode = igrab(&ci->vfs_inode); } } @@ -1540,15 +1553,21 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) if (inode) { wait_event(mdsc->cap_flushing_wq, - check_cap_flush(inode, want_flush_seq)); + check_capsnap_flush(ceph_inode(inode), + want_snap_seq)); iput(inode); + } else { + mds++; } mutex_lock(&mdsc->mutex); } - mutex_unlock(&mdsc->mutex); - dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); + + wait_event(mdsc->cap_flushing_wq, + check_caps_flush(mdsc, want_flush_tid)); + + dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid); } /* @@ -1557,60 +1576,74 @@ static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) void ceph_send_cap_releases(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { - struct ceph_msg *msg; + struct ceph_msg *msg = NULL; + struct ceph_mds_cap_release *head; + struct ceph_mds_cap_item *item; + struct ceph_cap *cap; + LIST_HEAD(tmp_list); + int num_cap_releases; - dout("send_cap_releases mds%d\n", session->s_mds); spin_lock(&session->s_cap_lock); - while (!list_empty(&session->s_cap_releases_done)) { - msg = list_first_entry(&session->s_cap_releases_done, - struct ceph_msg, list_head); - list_del_init(&msg->list_head); - spin_unlock(&session->s_cap_lock); - msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); - dout("send_cap_releases mds%d %p\n", session->s_mds, msg); - ceph_con_send(&session->s_con, msg); - spin_lock(&session->s_cap_lock); - } +again: + list_splice_init(&session->s_cap_releases, &tmp_list); + num_cap_releases = session->s_num_cap_releases; + session->s_num_cap_releases = 0; spin_unlock(&session->s_cap_lock); -} -static void discard_cap_releases(struct ceph_mds_client *mdsc, - struct ceph_mds_session *session) -{ - struct ceph_msg *msg; - struct ceph_mds_cap_release *head; - unsigned num; - - dout("discard_cap_releases mds%d\n", session->s_mds); + while (!list_empty(&tmp_list)) { + if (!msg) { + msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, + PAGE_CACHE_SIZE, GFP_NOFS, false); + if (!msg) + goto out_err; + head = msg->front.iov_base; + head->num = cpu_to_le32(0); + msg->front.iov_len = sizeof(*head); + } + cap = list_first_entry(&tmp_list, struct ceph_cap, + session_caps); + list_del(&cap->session_caps); + num_cap_releases--; - if (!list_empty(&session->s_cap_releases)) { - /* zero out the in-progress message */ - msg = list_first_entry(&session->s_cap_releases, - struct ceph_msg, list_head); head = msg->front.iov_base; - num = le32_to_cpu(head->num); - dout("discard_cap_releases mds%d %p %u\n", - session->s_mds, msg, num); - head->num = cpu_to_le32(0); - msg->front.iov_len = sizeof(*head); - session->s_num_cap_releases += num; + le32_add_cpu(&head->num, 1); + item = msg->front.iov_base + msg->front.iov_len; + item->ino = cpu_to_le64(cap->cap_ino); + item->cap_id = cpu_to_le64(cap->cap_id); + item->migrate_seq = cpu_to_le32(cap->mseq); + item->seq = cpu_to_le32(cap->issue_seq); + msg->front.iov_len += sizeof(*item); + + ceph_put_cap(mdsc, cap); + + if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + dout("send_cap_releases mds%d %p\n", session->s_mds, msg); + ceph_con_send(&session->s_con, msg); + msg = NULL; + } } - /* requeue completed messages */ - while (!list_empty(&session->s_cap_releases_done)) { - msg = list_first_entry(&session->s_cap_releases_done, - struct ceph_msg, list_head); - list_del_init(&msg->list_head); + BUG_ON(num_cap_releases != 0); - head = msg->front.iov_base; - num = le32_to_cpu(head->num); - dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, - num); - session->s_num_cap_releases += num; - head->num = cpu_to_le32(0); - msg->front.iov_len = sizeof(*head); - list_add(&msg->list_head, &session->s_cap_releases); + spin_lock(&session->s_cap_lock); + if (!list_empty(&session->s_cap_releases)) + goto again; + spin_unlock(&session->s_cap_lock); + + if (msg) { + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + dout("send_cap_releases mds%d %p\n", session->s_mds, msg); + ceph_con_send(&session->s_con, msg); } + return; +out_err: + pr_err("send_cap_releases mds%d, failed to allocate message\n", + session->s_mds); + spin_lock(&session->s_cap_lock); + list_splice(&tmp_list, &session->s_cap_releases); + session->s_num_cap_releases += num_cap_releases; + spin_unlock(&session->s_cap_lock); } /* @@ -1635,7 +1668,8 @@ int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, order = get_order(size * num_entries); while (order >= 0) { - rinfo->dir_in = (void*)__get_free_pages(GFP_NOFS | __GFP_NOWARN, + rinfo->dir_in = (void*)__get_free_pages(GFP_KERNEL | + __GFP_NOWARN, order); if (rinfo->dir_in) break; @@ -1697,13 +1731,9 @@ static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) struct ceph_mds_request, r_node); } -static u64 __get_oldest_tid(struct ceph_mds_client *mdsc) +static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc) { - struct ceph_mds_request *req = __get_oldest_req(mdsc); - - if (req) - return req->r_tid; - return 0; + return mdsc->oldest_tid; } /* @@ -2267,15 +2297,18 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, /* wait */ mutex_unlock(&mdsc->mutex); dout("do_request waiting\n"); - if (req->r_timeout) { - err = (long)wait_for_completion_killable_timeout( - &req->r_completion, req->r_timeout); - if (err == 0) - err = -EIO; - } else if (req->r_wait_for_completion) { + if (!req->r_timeout && req->r_wait_for_completion) { err = req->r_wait_for_completion(mdsc, req); } else { - err = wait_for_completion_killable(&req->r_completion); + long timeleft = wait_for_completion_killable_timeout( + &req->r_completion, + ceph_timeout_jiffies(req->r_timeout)); + if (timeleft > 0) + err = 0; + else if (!timeleft) + err = -EIO; /* timed out */ + else + err = timeleft; /* killed */ } dout("do_request waited, got %d\n", err); mutex_lock(&mdsc->mutex); @@ -2496,7 +2529,6 @@ out_err: } mutex_unlock(&mdsc->mutex); - ceph_add_cap_releases(mdsc, req->r_session); mutex_unlock(&session->s_mutex); /* kick calling process */ @@ -2888,8 +2920,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, */ session->s_cap_reconnect = 1; /* drop old cap expires; we're about to reestablish that state */ - discard_cap_releases(mdsc, session); - spin_unlock(&session->s_cap_lock); + cleanup_cap_releases(mdsc, session); /* trim unused caps to reduce MDS's cache rejoin time */ if (mdsc->fsc->sb->s_root) @@ -2956,6 +2987,9 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, reply->hdr.data_len = cpu_to_le32(pagelist->length); ceph_msg_data_add_pagelist(reply, pagelist); + + ceph_early_kick_flushing_caps(mdsc, session); + ceph_con_send(&session->s_con, reply); mutex_unlock(&session->s_mutex); @@ -3352,7 +3386,6 @@ static void delayed_work(struct work_struct *work) send_renew_caps(mdsc, s); else ceph_con_keepalive(&s->s_con); - ceph_add_cap_releases(mdsc, s); if (s->s_state == CEPH_MDS_SESSION_OPEN || s->s_state == CEPH_MDS_SESSION_HUNG) ceph_send_cap_releases(mdsc, s); @@ -3390,11 +3423,13 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) atomic_set(&mdsc->num_sessions, 0); mdsc->max_sessions = 0; mdsc->stopping = 0; + mdsc->last_snap_seq = 0; init_rwsem(&mdsc->snap_rwsem); mdsc->snap_realms = RB_ROOT; INIT_LIST_HEAD(&mdsc->snap_empty); spin_lock_init(&mdsc->snap_empty_lock); mdsc->last_tid = 0; + mdsc->oldest_tid = 0; mdsc->request_tree = RB_ROOT; INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); mdsc->last_renew_caps = jiffies; @@ -3402,7 +3437,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) spin_lock_init(&mdsc->cap_delay_lock); INIT_LIST_HEAD(&mdsc->snap_flush_list); spin_lock_init(&mdsc->snap_flush_lock); - mdsc->cap_flush_seq = 0; + mdsc->last_cap_flush_tid = 1; + mdsc->cap_flush_tree = RB_ROOT; INIT_LIST_HEAD(&mdsc->cap_dirty); INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); mdsc->num_cap_flushing = 0; @@ -3414,6 +3450,9 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) ceph_caps_init(mdsc); ceph_adjust_min_caps(mdsc, fsc->min_caps); + init_rwsem(&mdsc->pool_perm_rwsem); + mdsc->pool_perm_tree = RB_ROOT; + return 0; } @@ -3423,8 +3462,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) */ static void wait_requests(struct ceph_mds_client *mdsc) { + struct ceph_options *opts = mdsc->fsc->client->options; struct ceph_mds_request *req; - struct ceph_fs_client *fsc = mdsc->fsc; mutex_lock(&mdsc->mutex); if (__get_oldest_req(mdsc)) { @@ -3432,7 +3471,7 @@ static void wait_requests(struct ceph_mds_client *mdsc) dout("wait_requests waiting for requests\n"); wait_for_completion_timeout(&mdsc->safe_umount_waiters, - fsc->client->options->mount_timeout * HZ); + ceph_timeout_jiffies(opts->mount_timeout)); /* tear down remaining requests */ mutex_lock(&mdsc->mutex); @@ -3485,7 +3524,8 @@ restart: nextreq = rb_entry(n, struct ceph_mds_request, r_node); else nextreq = NULL; - if ((req->r_op & CEPH_MDS_OP_WRITE)) { + if (req->r_op != CEPH_MDS_OP_SETFILELOCK && + (req->r_op & CEPH_MDS_OP_WRITE)) { /* write op */ ceph_mdsc_get_request(req); if (nextreq) @@ -3513,7 +3553,7 @@ restart: void ceph_mdsc_sync(struct ceph_mds_client *mdsc) { - u64 want_tid, want_flush; + u64 want_tid, want_flush, want_snap; if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) return; @@ -3525,13 +3565,18 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) ceph_flush_dirty_caps(mdsc); spin_lock(&mdsc->cap_dirty_lock); - want_flush = mdsc->cap_flush_seq; + want_flush = mdsc->last_cap_flush_tid; spin_unlock(&mdsc->cap_dirty_lock); - dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); + down_read(&mdsc->snap_rwsem); + want_snap = mdsc->last_snap_seq; + up_read(&mdsc->snap_rwsem); + + dout("sync want tid %lld flush_seq %lld snap_seq %lld\n", + want_tid, want_flush, want_snap); wait_unsafe_requests(mdsc, want_tid); - wait_caps_flush(mdsc, want_flush); + wait_caps_flush(mdsc, want_flush, want_snap); } /* @@ -3549,10 +3594,9 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc) */ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) { + struct ceph_options *opts = mdsc->fsc->client->options; struct ceph_mds_session *session; int i; - struct ceph_fs_client *fsc = mdsc->fsc; - unsigned long timeout = fsc->client->options->mount_timeout * HZ; dout("close_sessions\n"); @@ -3573,7 +3617,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) dout("waiting for sessions to close\n"); wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), - timeout); + ceph_timeout_jiffies(opts->mount_timeout)); /* tear down remaining sessions */ mutex_lock(&mdsc->mutex); @@ -3607,6 +3651,7 @@ static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); ceph_caps_finalize(mdsc); + ceph_pool_perm_destroy(mdsc); } void ceph_mdsc_destroy(struct ceph_fs_client *fsc) diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 1875b5d985c6..762757e6cebf 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -139,7 +139,6 @@ struct ceph_mds_session { int s_cap_reconnect; int s_readonly; struct list_head s_cap_releases; /* waiting cap_release messages */ - struct list_head s_cap_releases_done; /* ready to send */ struct ceph_cap *s_cap_iterator; /* protected by mutex */ @@ -228,7 +227,7 @@ struct ceph_mds_request { int r_err; bool r_aborted; - unsigned long r_timeout; /* optional. jiffies */ + unsigned long r_timeout; /* optional. jiffies, 0 is "wait forever" */ unsigned long r_started; /* start time to measure timeout against */ unsigned long r_request_started; /* start time for mds request only, used to measure lease durations */ @@ -254,12 +253,21 @@ struct ceph_mds_request { bool r_got_unsafe, r_got_safe, r_got_result; bool r_did_prepopulate; + long long r_dir_release_cnt; + long long r_dir_ordered_cnt; + int r_readdir_cache_idx; u32 r_readdir_offset; struct ceph_cap_reservation r_caps_reservation; int r_num_caps; }; +struct ceph_pool_perm { + struct rb_node node; + u32 pool; + int perm; +}; + /* * mds client state */ @@ -284,12 +292,15 @@ struct ceph_mds_client { * references (implying they contain no inodes with caps) that * should be destroyed. */ + u64 last_snap_seq; struct rw_semaphore snap_rwsem; struct rb_root snap_realms; struct list_head snap_empty; spinlock_t snap_empty_lock; /* protect snap_empty */ u64 last_tid; /* most recent mds request */ + u64 oldest_tid; /* oldest incomplete mds request, + excluding setfilelock requests */ struct rb_root request_tree; /* pending mds requests */ struct delayed_work delayed_work; /* delayed work */ unsigned long last_renew_caps; /* last time we renewed our caps */ @@ -298,7 +309,8 @@ struct ceph_mds_client { struct list_head snap_flush_list; /* cap_snaps ready to flush */ spinlock_t snap_flush_lock; - u64 cap_flush_seq; + u64 last_cap_flush_tid; + struct rb_root cap_flush_tree; struct list_head cap_dirty; /* inodes with dirty caps */ struct list_head cap_dirty_migrating; /* ...that are migration... */ int num_cap_flushing; /* # caps we are flushing */ @@ -328,6 +340,9 @@ struct ceph_mds_client { spinlock_t dentry_lru_lock; struct list_head dentry_lru; int num_dentry; + + struct rw_semaphore pool_perm_rwsem; + struct rb_root pool_perm_tree; }; extern const char *ceph_mds_op_name(int op); @@ -379,8 +394,6 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req) kref_put(&req->r_kref, ceph_mdsc_release_request); } -extern int ceph_add_cap_releases(struct ceph_mds_client *mdsc, - struct ceph_mds_session *session); extern void ceph_send_cap_releases(struct ceph_mds_client *mdsc, struct ceph_mds_session *session); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index a97e39f09ba6..233d906aec02 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -296,7 +296,7 @@ static int cmpu64_rev(const void *a, const void *b) } -static struct ceph_snap_context *empty_snapc; +struct ceph_snap_context *ceph_empty_snapc; /* * build the snap context for a given realm. @@ -338,9 +338,9 @@ static int build_snap_context(struct ceph_snap_realm *realm) return 0; } - if (num == 0 && realm->seq == empty_snapc->seq) { - ceph_get_snap_context(empty_snapc); - snapc = empty_snapc; + if (num == 0 && realm->seq == ceph_empty_snapc->seq) { + ceph_get_snap_context(ceph_empty_snapc); + snapc = ceph_empty_snapc; goto done; } @@ -436,6 +436,14 @@ static int dup_array(u64 **dst, __le64 *src, u32 num) return 0; } +static bool has_new_snaps(struct ceph_snap_context *o, + struct ceph_snap_context *n) +{ + if (n->num_snaps == 0) + return false; + /* snaps are in descending order */ + return n->snaps[0] > o->seq; +} /* * When a snapshot is applied, the size/mtime inode metadata is queued @@ -455,6 +463,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) { struct inode *inode = &ci->vfs_inode; struct ceph_cap_snap *capsnap; + struct ceph_snap_context *old_snapc, *new_snapc; int used, dirty; capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS); @@ -467,6 +476,9 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) used = __ceph_caps_used(ci); dirty = __ceph_caps_dirty(ci); + old_snapc = ci->i_head_snapc; + new_snapc = ci->i_snap_realm->cached_context; + /* * If there is a write in progress, treat that as a dirty Fw, * even though it hasn't completed yet; by the time we finish @@ -481,76 +493,95 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci) writes in progress now were started before the previous cap_snap. lucky us. */ dout("queue_cap_snap %p already pending\n", inode); - kfree(capsnap); - } else if (ci->i_snap_realm->cached_context == empty_snapc) { - dout("queue_cap_snap %p empty snapc\n", inode); - kfree(capsnap); - } else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL| - CEPH_CAP_FILE_EXCL|CEPH_CAP_FILE_WR)) { - struct ceph_snap_context *snapc = ci->i_head_snapc; - - /* - * if we are a sync write, we may need to go to the snaprealm - * to get the current snapc. - */ - if (!snapc) - snapc = ci->i_snap_realm->cached_context; + goto update_snapc; + } + if (ci->i_wrbuffer_ref_head == 0 && + !(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) { + dout("queue_cap_snap %p nothing dirty|writing\n", inode); + goto update_snapc; + } - dout("queue_cap_snap %p cap_snap %p queuing under %p %s\n", - inode, capsnap, snapc, ceph_cap_string(dirty)); - ihold(inode); + BUG_ON(!old_snapc); - atomic_set(&capsnap->nref, 1); - capsnap->ci = ci; - INIT_LIST_HEAD(&capsnap->ci_item); - INIT_LIST_HEAD(&capsnap->flushing_item); - - capsnap->follows = snapc->seq; - capsnap->issued = __ceph_caps_issued(ci, NULL); - capsnap->dirty = dirty; - - capsnap->mode = inode->i_mode; - capsnap->uid = inode->i_uid; - capsnap->gid = inode->i_gid; - - if (dirty & CEPH_CAP_XATTR_EXCL) { - __ceph_build_xattrs_blob(ci); - capsnap->xattr_blob = - ceph_buffer_get(ci->i_xattrs.blob); - capsnap->xattr_version = ci->i_xattrs.version; - } else { - capsnap->xattr_blob = NULL; - capsnap->xattr_version = 0; + /* + * There is no need to send FLUSHSNAP message to MDS if there is + * no new snapshot. But when there is dirty pages or on-going + * writes, we still need to create cap_snap. cap_snap is needed + * by the write path and page writeback path. + * + * also see ceph_try_drop_cap_snap() + */ + if (has_new_snaps(old_snapc, new_snapc)) { + if (dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR)) + capsnap->need_flush = true; + } else { + if (!(used & CEPH_CAP_FILE_WR) && + ci->i_wrbuffer_ref_head == 0) { + dout("queue_cap_snap %p " + "no new_snap|dirty_page|writing\n", inode); + goto update_snapc; } + } - capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE; - - /* dirty page count moved from _head to this cap_snap; - all subsequent writes page dirties occur _after_ this - snapshot. */ - capsnap->dirty_pages = ci->i_wrbuffer_ref_head; - ci->i_wrbuffer_ref_head = 0; - capsnap->context = snapc; - ci->i_head_snapc = - ceph_get_snap_context(ci->i_snap_realm->cached_context); - dout(" new snapc is %p\n", ci->i_head_snapc); - list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); - - if (used & CEPH_CAP_FILE_WR) { - dout("queue_cap_snap %p cap_snap %p snapc %p" - " seq %llu used WR, now pending\n", inode, - capsnap, snapc, snapc->seq); - capsnap->writing = 1; - } else { - /* note mtime, size NOW. */ - __ceph_finish_cap_snap(ci, capsnap); - } + dout("queue_cap_snap %p cap_snap %p queuing under %p %s %s\n", + inode, capsnap, old_snapc, ceph_cap_string(dirty), + capsnap->need_flush ? "" : "no_flush"); + ihold(inode); + + atomic_set(&capsnap->nref, 1); + capsnap->ci = ci; + INIT_LIST_HEAD(&capsnap->ci_item); + INIT_LIST_HEAD(&capsnap->flushing_item); + + capsnap->follows = old_snapc->seq; + capsnap->issued = __ceph_caps_issued(ci, NULL); + capsnap->dirty = dirty; + + capsnap->mode = inode->i_mode; + capsnap->uid = inode->i_uid; + capsnap->gid = inode->i_gid; + + if (dirty & CEPH_CAP_XATTR_EXCL) { + __ceph_build_xattrs_blob(ci); + capsnap->xattr_blob = + ceph_buffer_get(ci->i_xattrs.blob); + capsnap->xattr_version = ci->i_xattrs.version; } else { - dout("queue_cap_snap %p nothing dirty|writing\n", inode); - kfree(capsnap); + capsnap->xattr_blob = NULL; + capsnap->xattr_version = 0; } + capsnap->inline_data = ci->i_inline_version != CEPH_INLINE_NONE; + + /* dirty page count moved from _head to this cap_snap; + all subsequent writes page dirties occur _after_ this + snapshot. */ + capsnap->dirty_pages = ci->i_wrbuffer_ref_head; + ci->i_wrbuffer_ref_head = 0; + capsnap->context = old_snapc; + list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps); + old_snapc = NULL; + + if (used & CEPH_CAP_FILE_WR) { + dout("queue_cap_snap %p cap_snap %p snapc %p" + " seq %llu used WR, now pending\n", inode, + capsnap, old_snapc, old_snapc->seq); + capsnap->writing = 1; + } else { + /* note mtime, size NOW. */ + __ceph_finish_cap_snap(ci, capsnap); + } + capsnap = NULL; + +update_snapc: + if (ci->i_head_snapc) { + ci->i_head_snapc = ceph_get_snap_context(new_snapc); + dout(" new snapc is %p\n", new_snapc); + } spin_unlock(&ci->i_ceph_lock); + + kfree(capsnap); + ceph_put_snap_context(old_snapc); } /* @@ -699,6 +730,8 @@ more: /* queue realm for cap_snap creation */ list_add(&realm->dirty_item, &dirty_realms); + if (realm->seq > mdsc->last_snap_seq) + mdsc->last_snap_seq = realm->seq; invalidate = 1; } else if (!realm->cached_context) { @@ -964,14 +997,14 @@ out: int __init ceph_snap_init(void) { - empty_snapc = ceph_create_snap_context(0, GFP_NOFS); - if (!empty_snapc) + ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS); + if (!ceph_empty_snapc) return -ENOMEM; - empty_snapc->seq = 1; + ceph_empty_snapc->seq = 1; return 0; } void ceph_snap_exit(void) { - ceph_put_snap_context(empty_snapc); + ceph_put_snap_context(ceph_empty_snapc); } diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 4e9905374078..d1c833c321b9 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -134,10 +134,12 @@ enum { Opt_noino32, Opt_fscache, Opt_nofscache, + Opt_poolperm, + Opt_nopoolperm, #ifdef CONFIG_CEPH_FS_POSIX_ACL Opt_acl, #endif - Opt_noacl + Opt_noacl, }; static match_table_t fsopt_tokens = { @@ -165,6 +167,8 @@ static match_table_t fsopt_tokens = { {Opt_noino32, "noino32"}, {Opt_fscache, "fsc"}, {Opt_nofscache, "nofsc"}, + {Opt_poolperm, "poolperm"}, + {Opt_nopoolperm, "nopoolperm"}, #ifdef CONFIG_CEPH_FS_POSIX_ACL {Opt_acl, "acl"}, #endif @@ -268,6 +272,13 @@ static int parse_fsopt_token(char *c, void *private) case Opt_nofscache: fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE; break; + case Opt_poolperm: + fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM; + printk ("pool perm"); + break; + case Opt_nopoolperm: + fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM; + break; #ifdef CONFIG_CEPH_FS_POSIX_ACL case Opt_acl: fsopt->sb_flags |= MS_POSIXACL; @@ -436,6 +447,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_puts(m, ",nodcache"); if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) seq_puts(m, ",fsc"); + if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM) + seq_puts(m, ",nopoolperm"); #ifdef CONFIG_CEPH_FS_POSIX_ACL if (fsopt->sb_flags & MS_POSIXACL) @@ -609,6 +622,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) */ struct kmem_cache *ceph_inode_cachep; struct kmem_cache *ceph_cap_cachep; +struct kmem_cache *ceph_cap_flush_cachep; struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; @@ -634,6 +648,10 @@ static int __init init_caches(void) SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); if (ceph_cap_cachep == NULL) goto bad_cap; + ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush, + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); + if (ceph_cap_flush_cachep == NULL) + goto bad_cap_flush; ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD); @@ -652,6 +670,8 @@ static int __init init_caches(void) bad_file: kmem_cache_destroy(ceph_dentry_cachep); bad_dentry: + kmem_cache_destroy(ceph_cap_flush_cachep); +bad_cap_flush: kmem_cache_destroy(ceph_cap_cachep); bad_cap: kmem_cache_destroy(ceph_inode_cachep); @@ -668,6 +688,7 @@ static void destroy_caches(void) kmem_cache_destroy(ceph_inode_cachep); kmem_cache_destroy(ceph_cap_cachep); + kmem_cache_destroy(ceph_cap_flush_cachep); kmem_cache_destroy(ceph_dentry_cachep); kmem_cache_destroy(ceph_file_cachep); @@ -729,7 +750,7 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc, req->r_ino1.ino = CEPH_INO_ROOT; req->r_ino1.snap = CEPH_NOSNAP; req->r_started = started; - req->r_timeout = fsc->client->options->mount_timeout * HZ; + req->r_timeout = fsc->client->options->mount_timeout; req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); req->r_num_caps = 2; err = ceph_mdsc_do_request(mdsc, NULL, req); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index fa20e1318939..860cc016e70d 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -35,6 +35,7 @@ #define CEPH_MOUNT_OPT_INO32 (1<<8) /* 32 bit inos */ #define CEPH_MOUNT_OPT_DCACHE (1<<9) /* use dcache for readdir etc */ #define CEPH_MOUNT_OPT_FSCACHE (1<<10) /* use fscache */ +#define CEPH_MOUNT_OPT_NOPOOLPERM (1<<11) /* no pool permission check */ #define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES | \ CEPH_MOUNT_OPT_DCACHE) @@ -121,11 +122,21 @@ struct ceph_cap { struct rb_node ci_node; /* per-ci cap tree */ struct ceph_mds_session *session; struct list_head session_caps; /* per-session caplist */ - int mds; u64 cap_id; /* unique cap id (mds provided) */ - int issued; /* latest, from the mds */ - int implemented; /* implemented superset of issued (for revocation) */ - int mds_wanted; + union { + /* in-use caps */ + struct { + int issued; /* latest, from the mds */ + int implemented; /* implemented superset of + issued (for revocation) */ + int mds, mds_wanted; + }; + /* caps to release */ + struct { + u64 cap_ino; + int queue_release; + }; + }; u32 seq, issue_seq, mseq; u32 cap_gen; /* active/stale cycle */ unsigned long last_used; @@ -163,6 +174,7 @@ struct ceph_cap_snap { int writing; /* a sync write is still in progress */ int dirty_pages; /* dirty pages awaiting writeback */ bool inline_data; + bool need_flush; }; static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) @@ -174,6 +186,17 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap) } } +struct ceph_cap_flush { + u64 tid; + int caps; + bool kick; + struct rb_node g_node; // global + union { + struct rb_node i_node; // inode + struct list_head list; + }; +}; + /* * The frag tree describes how a directory is fragmented, potentially across * multiple metadata servers. It is also used to indicate points where @@ -259,9 +282,9 @@ struct ceph_inode_info { u32 i_time_warp_seq; unsigned i_ceph_flags; - int i_ordered_count; - atomic_t i_release_count; - atomic_t i_complete_count; + atomic64_t i_release_count; + atomic64_t i_ordered_count; + atomic64_t i_complete_seq[2]; struct ceph_dir_layout i_dir_layout; struct ceph_file_layout i_layout; @@ -283,11 +306,11 @@ struct ceph_inode_info { struct ceph_cap *i_auth_cap; /* authoritative cap, if any */ unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */ struct list_head i_dirty_item, i_flushing_item; - u64 i_cap_flush_seq; /* we need to track cap writeback on a per-cap-bit basis, to allow * overlapping, pipelined cap flushes to the mds. we can probably * reduce the tid to 8 bits if we're concerned about inode size. */ - u16 i_cap_flush_last_tid, i_cap_flush_tid[CEPH_CAP_BITS]; + struct ceph_cap_flush *i_prealloc_cap_flush; + struct rb_root i_cap_flush_tree; wait_queue_head_t i_cap_wq; /* threads waiting on a capability */ unsigned long i_hold_caps_min; /* jiffies */ unsigned long i_hold_caps_max; /* jiffies */ @@ -438,36 +461,46 @@ static inline struct inode *ceph_find_inode(struct super_block *sb, /* * Ceph inode. */ -#define CEPH_I_DIR_ORDERED 1 /* dentries in dir are ordered */ -#define CEPH_I_NODELAY 4 /* do not delay cap release */ -#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ -#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */ +#define CEPH_I_DIR_ORDERED (1 << 0) /* dentries in dir are ordered */ +#define CEPH_I_NODELAY (1 << 1) /* do not delay cap release */ +#define CEPH_I_FLUSH (1 << 2) /* do not delay flush of dirty metadata */ +#define CEPH_I_NOFLUSH (1 << 3) /* do not flush dirty caps */ +#define CEPH_I_POOL_PERM (1 << 4) /* pool rd/wr bits are valid */ +#define CEPH_I_POOL_RD (1 << 5) /* can read from pool */ +#define CEPH_I_POOL_WR (1 << 6) /* can write to pool */ + static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci, - int release_count, int ordered_count) + long long release_count, + long long ordered_count) { - atomic_set(&ci->i_complete_count, release_count); - if (ci->i_ordered_count == ordered_count) - ci->i_ceph_flags |= CEPH_I_DIR_ORDERED; - else - ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED; + smp_mb__before_atomic(); + atomic64_set(&ci->i_complete_seq[0], release_count); + atomic64_set(&ci->i_complete_seq[1], ordered_count); } static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci) { - atomic_inc(&ci->i_release_count); + atomic64_inc(&ci->i_release_count); +} + +static inline void __ceph_dir_clear_ordered(struct ceph_inode_info *ci) +{ + atomic64_inc(&ci->i_ordered_count); } static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci) { - return atomic_read(&ci->i_complete_count) == - atomic_read(&ci->i_release_count); + return atomic64_read(&ci->i_complete_seq[0]) == + atomic64_read(&ci->i_release_count); } static inline bool __ceph_dir_is_complete_ordered(struct ceph_inode_info *ci) { - return __ceph_dir_is_complete(ci) && - (ci->i_ceph_flags & CEPH_I_DIR_ORDERED); + return atomic64_read(&ci->i_complete_seq[0]) == + atomic64_read(&ci->i_release_count) && + atomic64_read(&ci->i_complete_seq[1]) == + atomic64_read(&ci->i_ordered_count); } static inline void ceph_dir_clear_complete(struct inode *inode) @@ -477,20 +510,13 @@ static inline void ceph_dir_clear_complete(struct inode *inode) static inline void ceph_dir_clear_ordered(struct inode *inode) { - struct ceph_inode_info *ci = ceph_inode(inode); - spin_lock(&ci->i_ceph_lock); - ci->i_ordered_count++; - ci->i_ceph_flags &= ~CEPH_I_DIR_ORDERED; - spin_unlock(&ci->i_ceph_lock); + __ceph_dir_clear_ordered(ceph_inode(inode)); } static inline bool ceph_dir_is_complete_ordered(struct inode *inode) { - struct ceph_inode_info *ci = ceph_inode(inode); - bool ret; - spin_lock(&ci->i_ceph_lock); - ret = __ceph_dir_is_complete_ordered(ci); - spin_unlock(&ci->i_ceph_lock); + bool ret = __ceph_dir_is_complete_ordered(ceph_inode(inode)); + smp_rmb(); return ret; } @@ -552,7 +578,10 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci) { return ci->i_dirty_caps | ci->i_flushing_caps; } -extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask); +extern struct ceph_cap_flush *ceph_alloc_cap_flush(void); +extern void ceph_free_cap_flush(struct ceph_cap_flush *cf); +extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, + struct ceph_cap_flush **pcf); extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci, struct ceph_cap *ocap, int mask); @@ -606,16 +635,20 @@ struct ceph_file_info { unsigned offset; /* offset of last chunk, adjusted for . and .. */ unsigned next_offset; /* offset of next chunk (last_name's + 1) */ char *last_name; /* last entry in previous chunk */ - struct dentry *dentry; /* next dentry (for dcache readdir) */ - int dir_release_count; - int dir_ordered_count; + long long dir_release_count; + long long dir_ordered_count; + int readdir_cache_idx; /* used for -o dirstat read() on directory thing */ char *dir_info; int dir_info_len; }; - +struct ceph_readdir_cache_control { + struct page *page; + struct dentry **dentries; + int index; +}; /* * A "snap realm" describes a subset of the file hierarchy sharing @@ -687,6 +720,7 @@ static inline int default_congestion_kb(void) /* snap.c */ +extern struct ceph_snap_context *ceph_empty_snapc; struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, u64 ino); extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, @@ -713,8 +747,8 @@ extern void ceph_snap_exit(void); static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci) { return !list_empty(&ci->i_cap_snaps) && - list_entry(ci->i_cap_snaps.prev, struct ceph_cap_snap, - ci_item)->writing; + list_last_entry(&ci->i_cap_snaps, struct ceph_cap_snap, + ci_item)->writing; } /* inode.c */ @@ -838,12 +872,12 @@ extern void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap); extern int ceph_is_any_caps(struct inode *inode); -extern void __queue_cap_release(struct ceph_mds_session *session, u64 ino, - u64 cap_id, u32 migrate_seq, u32 issue_seq); extern void ceph_queue_caps_release(struct inode *inode); extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); extern int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync); +extern void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session); extern void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, struct ceph_mds_session *session); extern struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, @@ -879,6 +913,9 @@ extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode); /* addr.c */ extern const struct address_space_operations ceph_aops; extern int ceph_mmap(struct file *file, struct vm_area_struct *vma); +extern int ceph_uninline_data(struct file *filp, struct page *locked_page); +extern int ceph_pool_perm_check(struct ceph_inode_info *ci, int need); +extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc); /* file.c */ extern const struct file_operations ceph_file_fops; @@ -890,7 +927,6 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry, extern int ceph_release(struct inode *inode, struct file *filp); extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, char *data, size_t len); -int ceph_uninline_data(struct file *filp, struct page *locked_page); /* dir.c */ extern const struct file_operations ceph_dir_fops; extern const struct file_operations ceph_snapdir_fops; @@ -911,6 +947,7 @@ extern void ceph_dentry_lru_del(struct dentry *dn); extern void ceph_invalidate_dentry_lease(struct dentry *dentry); extern unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn); extern struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry); +extern void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl); /* * our d_ops vary depending on whether the inode is live, diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index cd7ffad4041d..819163d8313b 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -911,6 +911,8 @@ int __ceph_setxattr(struct dentry *dentry, const char *name, struct inode *inode = d_inode(dentry); struct ceph_vxattr *vxattr; struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; + struct ceph_cap_flush *prealloc_cf = NULL; int issued; int err; int dirty = 0; @@ -920,6 +922,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name, char *newval = NULL; struct ceph_inode_xattr *xattr = NULL; int required_blob_size; + bool lock_snap_rwsem = false; if (!ceph_is_valid_xattr(name)) return -EOPNOTSUPP; @@ -948,12 +951,27 @@ int __ceph_setxattr(struct dentry *dentry, const char *name, if (!xattr) goto out; + prealloc_cf = ceph_alloc_cap_flush(); + if (!prealloc_cf) + goto out; + spin_lock(&ci->i_ceph_lock); retry: issued = __ceph_caps_issued(ci, NULL); - dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) goto do_sync; + + if (!lock_snap_rwsem && !ci->i_head_snapc) { + lock_snap_rwsem = true; + if (!down_read_trylock(&mdsc->snap_rwsem)) { + spin_unlock(&ci->i_ceph_lock); + down_read(&mdsc->snap_rwsem); + spin_lock(&ci->i_ceph_lock); + goto retry; + } + } + + dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); __build_xattrs(inode); required_blob_size = __get_required_blob_size(ci, name_len, val_len); @@ -966,7 +984,7 @@ retry: dout(" preaallocating new blob size=%d\n", required_blob_size); blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) - goto out; + goto do_sync_unlocked; spin_lock(&ci->i_ceph_lock); if (ci->i_xattrs.prealloc_blob) ceph_buffer_put(ci->i_xattrs.prealloc_blob); @@ -978,21 +996,28 @@ retry: flags, value ? 1 : -1, &xattr); if (!err) { - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, + &prealloc_cf); ci->i_xattrs.dirty = true; inode->i_ctime = CURRENT_TIME; } spin_unlock(&ci->i_ceph_lock); + if (lock_snap_rwsem) + up_read(&mdsc->snap_rwsem); if (dirty) __mark_inode_dirty(inode, dirty); + ceph_free_cap_flush(prealloc_cf); return err; do_sync: spin_unlock(&ci->i_ceph_lock); do_sync_unlocked: + if (lock_snap_rwsem) + up_read(&mdsc->snap_rwsem); err = ceph_sync_setxattr(dentry, name, value, size, flags); out: + ceph_free_cap_flush(prealloc_cf); kfree(newname); kfree(newval); kfree(xattr); @@ -1044,10 +1069,13 @@ int __ceph_removexattr(struct dentry *dentry, const char *name) struct inode *inode = d_inode(dentry); struct ceph_vxattr *vxattr; struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; + struct ceph_cap_flush *prealloc_cf = NULL; int issued; int err; int required_blob_size; int dirty; + bool lock_snap_rwsem = false; if (!ceph_is_valid_xattr(name)) return -EOPNOTSUPP; @@ -1060,14 +1088,29 @@ int __ceph_removexattr(struct dentry *dentry, const char *name) if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) goto do_sync_unlocked; + prealloc_cf = ceph_alloc_cap_flush(); + if (!prealloc_cf) + return -ENOMEM; + err = -ENOMEM; spin_lock(&ci->i_ceph_lock); retry: issued = __ceph_caps_issued(ci, NULL); - dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued)); - if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) goto do_sync; + + if (!lock_snap_rwsem && !ci->i_head_snapc) { + lock_snap_rwsem = true; + if (!down_read_trylock(&mdsc->snap_rwsem)) { + spin_unlock(&ci->i_ceph_lock); + down_read(&mdsc->snap_rwsem); + spin_lock(&ci->i_ceph_lock); + goto retry; + } + } + + dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued)); + __build_xattrs(inode); required_blob_size = __get_required_blob_size(ci, 0, 0); @@ -1080,7 +1123,7 @@ retry: dout(" preaallocating new blob size=%d\n", required_blob_size); blob = ceph_buffer_new(required_blob_size, GFP_NOFS); if (!blob) - goto out; + goto do_sync_unlocked; spin_lock(&ci->i_ceph_lock); if (ci->i_xattrs.prealloc_blob) ceph_buffer_put(ci->i_xattrs.prealloc_blob); @@ -1090,18 +1133,24 @@ retry: err = __remove_xattr_by_name(ceph_inode(inode), name); - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, + &prealloc_cf); ci->i_xattrs.dirty = true; inode->i_ctime = CURRENT_TIME; spin_unlock(&ci->i_ceph_lock); + if (lock_snap_rwsem) + up_read(&mdsc->snap_rwsem); if (dirty) __mark_inode_dirty(inode, dirty); + ceph_free_cap_flush(prealloc_cf); return err; do_sync: spin_unlock(&ci->i_ceph_lock); do_sync_unlocked: + if (lock_snap_rwsem) + up_read(&mdsc->snap_rwsem); + ceph_free_cap_flush(prealloc_cf); err = ceph_send_removexattr(dentry, name); -out: return err; } diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index add566303c68..c35ffdc12bba 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -142,6 +142,8 @@ static inline struct super_block *pts_sb_from_inode(struct inode *inode) if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) return inode->i_sb; #endif + if (!devpts_mnt) + return NULL; return devpts_mnt->mnt_sb; } @@ -525,10 +527,14 @@ static struct file_system_type devpts_fs_type = { int devpts_new_index(struct inode *ptmx_inode) { struct super_block *sb = pts_sb_from_inode(ptmx_inode); - struct pts_fs_info *fsi = DEVPTS_SB(sb); + struct pts_fs_info *fsi; int index; int ida_ret; + if (!sb) + return -ENODEV; + + fsi = DEVPTS_SB(sb); retry: if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) return -ENOMEM; @@ -584,11 +590,18 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, struct dentry *dentry; struct super_block *sb = pts_sb_from_inode(ptmx_inode); struct inode *inode; - struct dentry *root = sb->s_root; - struct pts_fs_info *fsi = DEVPTS_SB(sb); - struct pts_mount_opts *opts = &fsi->mount_opts; + struct dentry *root; + struct pts_fs_info *fsi; + struct pts_mount_opts *opts; char s[12]; + if (!sb) + return ERR_PTR(-ENODEV); + + root = sb->s_root; + fsi = DEVPTS_SB(sb); + opts = &fsi->mount_opts; + inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); @@ -676,12 +689,16 @@ static int __init init_devpts_fs(void) struct ctl_table_header *table; if (!err) { + struct vfsmount *mnt; + table = register_sysctl_table(pty_root_table); - devpts_mnt = kern_mount(&devpts_fs_type); - if (IS_ERR(devpts_mnt)) { - err = PTR_ERR(devpts_mnt); + mnt = kern_mount(&devpts_fs_type); + if (IS_ERR(mnt)) { + err = PTR_ERR(mnt); unregister_filesystem(&devpts_fs_type); unregister_sysctl_table(table); + } else { + devpts_mnt = mnt; } } return err; diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index e5bbf748b698..eae2c11268bc 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -489,6 +489,7 @@ static void cuse_fc_release(struct fuse_conn *fc) */ static int cuse_channel_open(struct inode *inode, struct file *file) { + struct fuse_dev *fud; struct cuse_conn *cc; int rc; @@ -499,17 +500,22 @@ static int cuse_channel_open(struct inode *inode, struct file *file) fuse_conn_init(&cc->fc); + fud = fuse_dev_alloc(&cc->fc); + if (!fud) { + kfree(cc); + return -ENOMEM; + } + INIT_LIST_HEAD(&cc->list); cc->fc.release = cuse_fc_release; - cc->fc.connected = 1; cc->fc.initialized = 1; rc = cuse_send_init(cc); if (rc) { - fuse_conn_put(&cc->fc); + fuse_dev_free(fud); return rc; } - file->private_data = &cc->fc; /* channel owns base reference to cc */ + file->private_data = fud; return 0; } @@ -527,7 +533,8 @@ static int cuse_channel_open(struct inode *inode, struct file *file) */ static int cuse_channel_release(struct inode *inode, struct file *file) { - struct cuse_conn *cc = fc_to_cc(file->private_data); + struct fuse_dev *fud = file->private_data; + struct cuse_conn *cc = fc_to_cc(fud->fc); int rc; /* remove from the conntbl, no more access from this point on */ diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index c8b68ab2e574..80cc1b35d460 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -25,13 +25,13 @@ MODULE_ALIAS("devname:fuse"); static struct kmem_cache *fuse_req_cachep; -static struct fuse_conn *fuse_get_conn(struct file *file) +static struct fuse_dev *fuse_get_dev(struct file *file) { /* * Lockless access is OK, because file->private data is set * once during mount and is valid until the file is released. */ - return file->private_data; + return ACCESS_ONCE(file->private_data); } static void fuse_request_init(struct fuse_req *req, struct page **pages, @@ -48,6 +48,7 @@ static void fuse_request_init(struct fuse_req *req, struct page **pages, req->pages = pages; req->page_descs = page_descs; req->max_pages = npages; + __set_bit(FR_PENDING, &req->flags); } static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) @@ -168,6 +169,10 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, if (!fc->connected) goto out; + err = -ECONNREFUSED; + if (fc->conn_error) + goto out; + req = fuse_request_alloc(npages); err = -ENOMEM; if (!req) { @@ -177,8 +182,10 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, } fuse_req_init_context(req); - req->waiting = 1; - req->background = for_background; + __set_bit(FR_WAITING, &req->flags); + if (for_background) + __set_bit(FR_BACKGROUND, &req->flags); + return req; out: @@ -268,15 +275,15 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, req = get_reserved_req(fc, file); fuse_req_init_context(req); - req->waiting = 1; - req->background = 0; + __set_bit(FR_WAITING, &req->flags); + __clear_bit(FR_BACKGROUND, &req->flags); return req; } void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) { if (atomic_dec_and_test(&req->count)) { - if (unlikely(req->background)) { + if (test_bit(FR_BACKGROUND, &req->flags)) { /* * We get here in the unlikely case that a background * request was allocated but not sent @@ -287,8 +294,10 @@ void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) spin_unlock(&fc->lock); } - if (req->waiting) + if (test_bit(FR_WAITING, &req->flags)) { + __clear_bit(FR_WAITING, &req->flags); atomic_dec(&fc->num_waiting); + } if (req->stolen_file) put_reserved_req(fc, req); @@ -309,46 +318,38 @@ static unsigned len_args(unsigned numargs, struct fuse_arg *args) return nbytes; } -static u64 fuse_get_unique(struct fuse_conn *fc) +static u64 fuse_get_unique(struct fuse_iqueue *fiq) { - fc->reqctr++; - /* zero is special */ - if (fc->reqctr == 0) - fc->reqctr = 1; - - return fc->reqctr; + return ++fiq->reqctr; } -static void queue_request(struct fuse_conn *fc, struct fuse_req *req) +static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) { req->in.h.len = sizeof(struct fuse_in_header) + len_args(req->in.numargs, (struct fuse_arg *) req->in.args); - list_add_tail(&req->list, &fc->pending); - req->state = FUSE_REQ_PENDING; - if (!req->waiting) { - req->waiting = 1; - atomic_inc(&fc->num_waiting); - } - wake_up(&fc->waitq); - kill_fasync(&fc->fasync, SIGIO, POLL_IN); + list_add_tail(&req->list, &fiq->pending); + wake_up_locked(&fiq->waitq); + kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, u64 nodeid, u64 nlookup) { + struct fuse_iqueue *fiq = &fc->iq; + forget->forget_one.nodeid = nodeid; forget->forget_one.nlookup = nlookup; - spin_lock(&fc->lock); - if (fc->connected) { - fc->forget_list_tail->next = forget; - fc->forget_list_tail = forget; - wake_up(&fc->waitq); - kill_fasync(&fc->fasync, SIGIO, POLL_IN); + spin_lock(&fiq->waitq.lock); + if (fiq->connected) { + fiq->forget_list_tail->next = forget; + fiq->forget_list_tail = forget; + wake_up_locked(&fiq->waitq); + kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } else { kfree(forget); } - spin_unlock(&fc->lock); + spin_unlock(&fiq->waitq.lock); } static void flush_bg_queue(struct fuse_conn *fc) @@ -356,12 +357,15 @@ static void flush_bg_queue(struct fuse_conn *fc) while (fc->active_background < fc->max_background && !list_empty(&fc->bg_queue)) { struct fuse_req *req; + struct fuse_iqueue *fiq = &fc->iq; req = list_entry(fc->bg_queue.next, struct fuse_req, list); list_del(&req->list); fc->active_background++; - req->in.h.unique = fuse_get_unique(fc); - queue_request(fc, req); + spin_lock(&fiq->waitq.lock); + req->in.h.unique = fuse_get_unique(fiq); + queue_request(fiq, req); + spin_unlock(&fiq->waitq.lock); } } @@ -372,20 +376,22 @@ static void flush_bg_queue(struct fuse_conn *fc) * was closed. The requester thread is woken up (if still waiting), * the 'end' callback is called if given, else the reference to the * request is released - * - * Called with fc->lock, unlocks it */ static void request_end(struct fuse_conn *fc, struct fuse_req *req) -__releases(fc->lock) { - void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; - req->end = NULL; - list_del(&req->list); - list_del(&req->intr_entry); - req->state = FUSE_REQ_FINISHED; - if (req->background) { - req->background = 0; + struct fuse_iqueue *fiq = &fc->iq; + + if (test_and_set_bit(FR_FINISHED, &req->flags)) + return; + spin_lock(&fiq->waitq.lock); + list_del_init(&req->intr_entry); + spin_unlock(&fiq->waitq.lock); + WARN_ON(test_bit(FR_PENDING, &req->flags)); + WARN_ON(test_bit(FR_SENT, &req->flags)); + if (test_bit(FR_BACKGROUND, &req->flags)) { + spin_lock(&fc->lock); + clear_bit(FR_BACKGROUND, &req->flags); if (fc->num_background == fc->max_background) fc->blocked = 0; @@ -401,122 +407,105 @@ __releases(fc->lock) fc->num_background--; fc->active_background--; flush_bg_queue(fc); + spin_unlock(&fc->lock); } - spin_unlock(&fc->lock); wake_up(&req->waitq); - if (end) - end(fc, req); + if (req->end) + req->end(fc, req); fuse_put_request(fc, req); } -static void wait_answer_interruptible(struct fuse_conn *fc, - struct fuse_req *req) -__releases(fc->lock) -__acquires(fc->lock) -{ - if (signal_pending(current)) - return; - - spin_unlock(&fc->lock); - wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED); - spin_lock(&fc->lock); -} - -static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) +static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) { - list_add_tail(&req->intr_entry, &fc->interrupts); - wake_up(&fc->waitq); - kill_fasync(&fc->fasync, SIGIO, POLL_IN); + spin_lock(&fiq->waitq.lock); + if (list_empty(&req->intr_entry)) { + list_add_tail(&req->intr_entry, &fiq->interrupts); + wake_up_locked(&fiq->waitq); + } + spin_unlock(&fiq->waitq.lock); + kill_fasync(&fiq->fasync, SIGIO, POLL_IN); } static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) -__releases(fc->lock) -__acquires(fc->lock) { + struct fuse_iqueue *fiq = &fc->iq; + int err; + if (!fc->no_interrupt) { /* Any signal may interrupt this */ - wait_answer_interruptible(fc, req); - - if (req->aborted) - goto aborted; - if (req->state == FUSE_REQ_FINISHED) + err = wait_event_interruptible(req->waitq, + test_bit(FR_FINISHED, &req->flags)); + if (!err) return; - req->interrupted = 1; - if (req->state == FUSE_REQ_SENT) - queue_interrupt(fc, req); + set_bit(FR_INTERRUPTED, &req->flags); + /* matches barrier in fuse_dev_do_read() */ + smp_mb__after_atomic(); + if (test_bit(FR_SENT, &req->flags)) + queue_interrupt(fiq, req); } - if (!req->force) { + if (!test_bit(FR_FORCE, &req->flags)) { sigset_t oldset; /* Only fatal signals may interrupt this */ block_sigs(&oldset); - wait_answer_interruptible(fc, req); + err = wait_event_interruptible(req->waitq, + test_bit(FR_FINISHED, &req->flags)); restore_sigs(&oldset); - if (req->aborted) - goto aborted; - if (req->state == FUSE_REQ_FINISHED) + if (!err) return; + spin_lock(&fiq->waitq.lock); /* Request is not yet in userspace, bail out */ - if (req->state == FUSE_REQ_PENDING) { + if (test_bit(FR_PENDING, &req->flags)) { list_del(&req->list); + spin_unlock(&fiq->waitq.lock); __fuse_put_request(req); req->out.h.error = -EINTR; return; } + spin_unlock(&fiq->waitq.lock); } /* * Either request is already in userspace, or it was forced. * Wait it out. */ - spin_unlock(&fc->lock); - wait_event(req->waitq, req->state == FUSE_REQ_FINISHED); - spin_lock(&fc->lock); - - if (!req->aborted) - return; - - aborted: - BUG_ON(req->state != FUSE_REQ_FINISHED); - if (req->locked) { - /* This is uninterruptible sleep, because data is - being copied to/from the buffers of req. During - locked state, there mustn't be any filesystem - operation (e.g. page fault), since that could lead - to deadlock */ - spin_unlock(&fc->lock); - wait_event(req->waitq, !req->locked); - spin_lock(&fc->lock); - } + wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); } static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { - BUG_ON(req->background); - spin_lock(&fc->lock); - if (!fc->connected) + struct fuse_iqueue *fiq = &fc->iq; + + BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); + spin_lock(&fiq->waitq.lock); + if (!fiq->connected) { + spin_unlock(&fiq->waitq.lock); req->out.h.error = -ENOTCONN; - else if (fc->conn_error) - req->out.h.error = -ECONNREFUSED; - else { - req->in.h.unique = fuse_get_unique(fc); - queue_request(fc, req); + } else { + req->in.h.unique = fuse_get_unique(fiq); + queue_request(fiq, req); /* acquire extra reference, since request is still needed after request_end() */ __fuse_get_request(req); + spin_unlock(&fiq->waitq.lock); request_wait_answer(fc, req); + /* Pairs with smp_wmb() in request_end() */ + smp_rmb(); } - spin_unlock(&fc->lock); } void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) { - req->isreply = 1; + __set_bit(FR_ISREPLY, &req->flags); + if (!test_bit(FR_WAITING, &req->flags)) { + __set_bit(FR_WAITING, &req->flags); + atomic_inc(&fc->num_waiting); + } __fuse_request_send(fc, req); } EXPORT_SYMBOL_GPL(fuse_request_send); @@ -586,10 +575,20 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) return ret; } -static void fuse_request_send_nowait_locked(struct fuse_conn *fc, - struct fuse_req *req) +/* + * Called under fc->lock + * + * fc->connected must have been checked previously + */ +void fuse_request_send_background_locked(struct fuse_conn *fc, + struct fuse_req *req) { - BUG_ON(!req->background); + BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); + if (!test_bit(FR_WAITING, &req->flags)) { + __set_bit(FR_WAITING, &req->flags); + atomic_inc(&fc->num_waiting); + } + __set_bit(FR_ISREPLY, &req->flags); fc->num_background++; if (fc->num_background == fc->max_background) fc->blocked = 1; @@ -602,54 +601,40 @@ static void fuse_request_send_nowait_locked(struct fuse_conn *fc, flush_bg_queue(fc); } -static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) +void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) { + BUG_ON(!req->end); spin_lock(&fc->lock); if (fc->connected) { - fuse_request_send_nowait_locked(fc, req); + fuse_request_send_background_locked(fc, req); spin_unlock(&fc->lock); } else { + spin_unlock(&fc->lock); req->out.h.error = -ENOTCONN; - request_end(fc, req); + req->end(fc, req); + fuse_put_request(fc, req); } } - -void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) -{ - req->isreply = 1; - fuse_request_send_nowait(fc, req); -} EXPORT_SYMBOL_GPL(fuse_request_send_background); static int fuse_request_send_notify_reply(struct fuse_conn *fc, struct fuse_req *req, u64 unique) { int err = -ENODEV; + struct fuse_iqueue *fiq = &fc->iq; - req->isreply = 0; + __clear_bit(FR_ISREPLY, &req->flags); req->in.h.unique = unique; - spin_lock(&fc->lock); - if (fc->connected) { - queue_request(fc, req); + spin_lock(&fiq->waitq.lock); + if (fiq->connected) { + queue_request(fiq, req); err = 0; } - spin_unlock(&fc->lock); + spin_unlock(&fiq->waitq.lock); return err; } -/* - * Called under fc->lock - * - * fc->connected must have been checked previously - */ -void fuse_request_send_background_locked(struct fuse_conn *fc, - struct fuse_req *req) -{ - req->isreply = 1; - fuse_request_send_nowait_locked(fc, req); -} - void fuse_force_forget(struct file *file, u64 nodeid) { struct inode *inode = file_inode(file); @@ -665,7 +650,7 @@ void fuse_force_forget(struct file *file, u64 nodeid) req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; - req->isreply = 0; + __clear_bit(FR_ISREPLY, &req->flags); __fuse_request_send(fc, req); /* ignore errors */ fuse_put_request(fc, req); @@ -676,38 +661,39 @@ void fuse_force_forget(struct file *file, u64 nodeid) * anything that could cause a page-fault. If the request was already * aborted bail out. */ -static int lock_request(struct fuse_conn *fc, struct fuse_req *req) +static int lock_request(struct fuse_req *req) { int err = 0; if (req) { - spin_lock(&fc->lock); - if (req->aborted) + spin_lock(&req->waitq.lock); + if (test_bit(FR_ABORTED, &req->flags)) err = -ENOENT; else - req->locked = 1; - spin_unlock(&fc->lock); + set_bit(FR_LOCKED, &req->flags); + spin_unlock(&req->waitq.lock); } return err; } /* - * Unlock request. If it was aborted during being locked, the - * requester thread is currently waiting for it to be unlocked, so - * wake it up. + * Unlock request. If it was aborted while locked, caller is responsible + * for unlocking and ending the request. */ -static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) +static int unlock_request(struct fuse_req *req) { + int err = 0; if (req) { - spin_lock(&fc->lock); - req->locked = 0; - if (req->aborted) - wake_up(&req->waitq); - spin_unlock(&fc->lock); + spin_lock(&req->waitq.lock); + if (test_bit(FR_ABORTED, &req->flags)) + err = -ENOENT; + else + clear_bit(FR_LOCKED, &req->flags); + spin_unlock(&req->waitq.lock); } + return err; } struct fuse_copy_state { - struct fuse_conn *fc; int write; struct fuse_req *req; struct iov_iter *iter; @@ -721,13 +707,10 @@ struct fuse_copy_state { unsigned move_pages:1; }; -static void fuse_copy_init(struct fuse_copy_state *cs, - struct fuse_conn *fc, - int write, +static void fuse_copy_init(struct fuse_copy_state *cs, int write, struct iov_iter *iter) { memset(cs, 0, sizeof(*cs)); - cs->fc = fc; cs->write = write; cs->iter = iter; } @@ -760,7 +743,10 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) struct page *page; int err; - unlock_request(cs->fc, cs->req); + err = unlock_request(cs->req); + if (err) + return err; + fuse_copy_finish(cs); if (cs->pipebufs) { struct pipe_buffer *buf = cs->pipebufs; @@ -809,7 +795,7 @@ static int fuse_copy_fill(struct fuse_copy_state *cs) iov_iter_advance(cs->iter, err); } - return lock_request(cs->fc, cs->req); + return lock_request(cs->req); } /* Do as much copy to/from userspace buffer as we can */ @@ -860,7 +846,10 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) struct page *newpage; struct pipe_buffer *buf = cs->pipebufs; - unlock_request(cs->fc, cs->req); + err = unlock_request(cs->req); + if (err) + return err; + fuse_copy_finish(cs); err = buf->ops->confirm(cs->pipe, buf); @@ -914,12 +903,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) lru_cache_add_file(newpage); err = 0; - spin_lock(&cs->fc->lock); - if (cs->req->aborted) + spin_lock(&cs->req->waitq.lock); + if (test_bit(FR_ABORTED, &cs->req->flags)) err = -ENOENT; else *pagep = newpage; - spin_unlock(&cs->fc->lock); + spin_unlock(&cs->req->waitq.lock); if (err) { unlock_page(newpage); @@ -939,7 +928,7 @@ out_fallback: cs->pg = buf->page; cs->offset = buf->offset; - err = lock_request(cs->fc, cs->req); + err = lock_request(cs->req); if (err) return err; @@ -950,11 +939,15 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, unsigned offset, unsigned count) { struct pipe_buffer *buf; + int err; if (cs->nr_segs == cs->pipe->buffers) return -EIO; - unlock_request(cs->fc, cs->req); + err = unlock_request(cs->req); + if (err) + return err; + fuse_copy_finish(cs); buf = cs->pipebufs; @@ -1065,36 +1058,15 @@ static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs, return err; } -static int forget_pending(struct fuse_conn *fc) +static int forget_pending(struct fuse_iqueue *fiq) { - return fc->forget_list_head.next != NULL; + return fiq->forget_list_head.next != NULL; } -static int request_pending(struct fuse_conn *fc) +static int request_pending(struct fuse_iqueue *fiq) { - return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) || - forget_pending(fc); -} - -/* Wait until a request is available on the pending list */ -static void request_wait(struct fuse_conn *fc) -__releases(fc->lock) -__acquires(fc->lock) -{ - DECLARE_WAITQUEUE(wait, current); - - add_wait_queue_exclusive(&fc->waitq, &wait); - while (fc->connected && !request_pending(fc)) { - set_current_state(TASK_INTERRUPTIBLE); - if (signal_pending(current)) - break; - - spin_unlock(&fc->lock); - schedule(); - spin_lock(&fc->lock); - } - set_current_state(TASK_RUNNING); - remove_wait_queue(&fc->waitq, &wait); + return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) || + forget_pending(fiq); } /* @@ -1103,11 +1075,12 @@ __acquires(fc->lock) * Unlike other requests this is assembled on demand, without a need * to allocate a separate fuse_req structure. * - * Called with fc->lock held, releases it + * Called with fiq->waitq.lock held, releases it */ -static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs, +static int fuse_read_interrupt(struct fuse_iqueue *fiq, + struct fuse_copy_state *cs, size_t nbytes, struct fuse_req *req) -__releases(fc->lock) +__releases(fiq->waitq.lock) { struct fuse_in_header ih; struct fuse_interrupt_in arg; @@ -1115,7 +1088,7 @@ __releases(fc->lock) int err; list_del_init(&req->intr_entry); - req->intr_unique = fuse_get_unique(fc); + req->intr_unique = fuse_get_unique(fiq); memset(&ih, 0, sizeof(ih)); memset(&arg, 0, sizeof(arg)); ih.len = reqsize; @@ -1123,7 +1096,7 @@ __releases(fc->lock) ih.unique = req->intr_unique; arg.unique = req->in.h.unique; - spin_unlock(&fc->lock); + spin_unlock(&fiq->waitq.lock); if (nbytes < reqsize) return -EINVAL; @@ -1135,21 +1108,21 @@ __releases(fc->lock) return err ? err : reqsize; } -static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, +static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq, unsigned max, unsigned *countp) { - struct fuse_forget_link *head = fc->forget_list_head.next; + struct fuse_forget_link *head = fiq->forget_list_head.next; struct fuse_forget_link **newhead = &head; unsigned count; for (count = 0; *newhead != NULL && count < max; count++) newhead = &(*newhead)->next; - fc->forget_list_head.next = *newhead; + fiq->forget_list_head.next = *newhead; *newhead = NULL; - if (fc->forget_list_head.next == NULL) - fc->forget_list_tail = &fc->forget_list_head; + if (fiq->forget_list_head.next == NULL) + fiq->forget_list_tail = &fiq->forget_list_head; if (countp != NULL) *countp = count; @@ -1157,24 +1130,24 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc, return head; } -static int fuse_read_single_forget(struct fuse_conn *fc, +static int fuse_read_single_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) -__releases(fc->lock) +__releases(fiq->waitq.lock) { int err; - struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL); + struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); struct fuse_forget_in arg = { .nlookup = forget->forget_one.nlookup, }; struct fuse_in_header ih = { .opcode = FUSE_FORGET, .nodeid = forget->forget_one.nodeid, - .unique = fuse_get_unique(fc), + .unique = fuse_get_unique(fiq), .len = sizeof(ih) + sizeof(arg), }; - spin_unlock(&fc->lock); + spin_unlock(&fiq->waitq.lock); kfree(forget); if (nbytes < ih.len) return -EINVAL; @@ -1190,9 +1163,9 @@ __releases(fc->lock) return ih.len; } -static int fuse_read_batch_forget(struct fuse_conn *fc, +static int fuse_read_batch_forget(struct fuse_iqueue *fiq, struct fuse_copy_state *cs, size_t nbytes) -__releases(fc->lock) +__releases(fiq->waitq.lock) { int err; unsigned max_forgets; @@ -1201,18 +1174,18 @@ __releases(fc->lock) struct fuse_batch_forget_in arg = { .count = 0 }; struct fuse_in_header ih = { .opcode = FUSE_BATCH_FORGET, - .unique = fuse_get_unique(fc), + .unique = fuse_get_unique(fiq), .len = sizeof(ih) + sizeof(arg), }; if (nbytes < ih.len) { - spin_unlock(&fc->lock); + spin_unlock(&fiq->waitq.lock); return -EINVAL; } max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); - head = dequeue_forget(fc, max_forgets, &count); - spin_unlock(&fc->lock); + head = dequeue_forget(fiq, max_forgets, &count); + spin_unlock(&fiq->waitq.lock); arg.count = count; ih.len += count * sizeof(struct fuse_forget_one); @@ -1239,14 +1212,15 @@ __releases(fc->lock) return ih.len; } -static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs, +static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq, + struct fuse_copy_state *cs, size_t nbytes) -__releases(fc->lock) +__releases(fiq->waitq.lock) { - if (fc->minor < 16 || fc->forget_list_head.next->next == NULL) - return fuse_read_single_forget(fc, cs, nbytes); + if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL) + return fuse_read_single_forget(fiq, cs, nbytes); else - return fuse_read_batch_forget(fc, cs, nbytes); + return fuse_read_batch_forget(fiq, cs, nbytes); } /* @@ -1258,46 +1232,51 @@ __releases(fc->lock) * request_end(). Otherwise add it to the processing list, and set * the 'sent' flag. */ -static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, +static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, struct fuse_copy_state *cs, size_t nbytes) { - int err; + ssize_t err; + struct fuse_conn *fc = fud->fc; + struct fuse_iqueue *fiq = &fc->iq; + struct fuse_pqueue *fpq = &fud->pq; struct fuse_req *req; struct fuse_in *in; unsigned reqsize; restart: - spin_lock(&fc->lock); + spin_lock(&fiq->waitq.lock); err = -EAGAIN; - if ((file->f_flags & O_NONBLOCK) && fc->connected && - !request_pending(fc)) + if ((file->f_flags & O_NONBLOCK) && fiq->connected && + !request_pending(fiq)) goto err_unlock; - request_wait(fc); - err = -ENODEV; - if (!fc->connected) + err = wait_event_interruptible_exclusive_locked(fiq->waitq, + !fiq->connected || request_pending(fiq)); + if (err) goto err_unlock; - err = -ERESTARTSYS; - if (!request_pending(fc)) + + err = -ENODEV; + if (!fiq->connected) goto err_unlock; - if (!list_empty(&fc->interrupts)) { - req = list_entry(fc->interrupts.next, struct fuse_req, + if (!list_empty(&fiq->interrupts)) { + req = list_entry(fiq->interrupts.next, struct fuse_req, intr_entry); - return fuse_read_interrupt(fc, cs, nbytes, req); + return fuse_read_interrupt(fiq, cs, nbytes, req); } - if (forget_pending(fc)) { - if (list_empty(&fc->pending) || fc->forget_batch-- > 0) - return fuse_read_forget(fc, cs, nbytes); + if (forget_pending(fiq)) { + if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0) + return fuse_read_forget(fc, fiq, cs, nbytes); - if (fc->forget_batch <= -8) - fc->forget_batch = 16; + if (fiq->forget_batch <= -8) + fiq->forget_batch = 16; } - req = list_entry(fc->pending.next, struct fuse_req, list); - req->state = FUSE_REQ_READING; - list_move(&req->list, &fc->io); + req = list_entry(fiq->pending.next, struct fuse_req, list); + clear_bit(FR_PENDING, &req->flags); + list_del_init(&req->list); + spin_unlock(&fiq->waitq.lock); in = &req->in; reqsize = in->h.len; @@ -1310,37 +1289,48 @@ static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file, request_end(fc, req); goto restart; } - spin_unlock(&fc->lock); + spin_lock(&fpq->lock); + list_add(&req->list, &fpq->io); + spin_unlock(&fpq->lock); cs->req = req; err = fuse_copy_one(cs, &in->h, sizeof(in->h)); if (!err) err = fuse_copy_args(cs, in->numargs, in->argpages, (struct fuse_arg *) in->args, 0); fuse_copy_finish(cs); - spin_lock(&fc->lock); - req->locked = 0; - if (req->aborted) { - request_end(fc, req); - return -ENODEV; + spin_lock(&fpq->lock); + clear_bit(FR_LOCKED, &req->flags); + if (!fpq->connected) { + err = -ENODEV; + goto out_end; } if (err) { req->out.h.error = -EIO; - request_end(fc, req); - return err; + goto out_end; } - if (!req->isreply) - request_end(fc, req); - else { - req->state = FUSE_REQ_SENT; - list_move_tail(&req->list, &fc->processing); - if (req->interrupted) - queue_interrupt(fc, req); - spin_unlock(&fc->lock); + if (!test_bit(FR_ISREPLY, &req->flags)) { + err = reqsize; + goto out_end; } + list_move_tail(&req->list, &fpq->processing); + spin_unlock(&fpq->lock); + set_bit(FR_SENT, &req->flags); + /* matches barrier in request_wait_answer() */ + smp_mb__after_atomic(); + if (test_bit(FR_INTERRUPTED, &req->flags)) + queue_interrupt(fiq, req); + return reqsize; +out_end: + if (!test_bit(FR_PRIVATE, &req->flags)) + list_del_init(&req->list); + spin_unlock(&fpq->lock); + request_end(fc, req); + return err; + err_unlock: - spin_unlock(&fc->lock); + spin_unlock(&fiq->waitq.lock); return err; } @@ -1359,16 +1349,17 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to) { struct fuse_copy_state cs; struct file *file = iocb->ki_filp; - struct fuse_conn *fc = fuse_get_conn(file); - if (!fc) + struct fuse_dev *fud = fuse_get_dev(file); + + if (!fud) return -EPERM; if (!iter_is_iovec(to)) return -EINVAL; - fuse_copy_init(&cs, fc, 1, to); + fuse_copy_init(&cs, 1, to); - return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to)); + return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to)); } static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, @@ -1380,18 +1371,19 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos, int do_wakeup = 0; struct pipe_buffer *bufs; struct fuse_copy_state cs; - struct fuse_conn *fc = fuse_get_conn(in); - if (!fc) + struct fuse_dev *fud = fuse_get_dev(in); + + if (!fud) return -EPERM; bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); if (!bufs) return -ENOMEM; - fuse_copy_init(&cs, fc, 1, NULL); + fuse_copy_init(&cs, 1, NULL); cs.pipebufs = bufs; cs.pipe = pipe; - ret = fuse_dev_do_read(fc, in, &cs, len); + ret = fuse_dev_do_read(fud, in, &cs, len); if (ret < 0) goto out; @@ -1830,11 +1822,11 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, } /* Look up request on processing list by unique ID */ -static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) +static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) { struct fuse_req *req; - list_for_each_entry(req, &fc->processing, list) { + list_for_each_entry(req, &fpq->processing, list) { if (req->in.h.unique == unique || req->intr_unique == unique) return req; } @@ -1871,10 +1863,12 @@ static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, * it from the list and copy the rest of the buffer to the request. * The request is finished by calling request_end() */ -static ssize_t fuse_dev_do_write(struct fuse_conn *fc, +static ssize_t fuse_dev_do_write(struct fuse_dev *fud, struct fuse_copy_state *cs, size_t nbytes) { int err; + struct fuse_conn *fc = fud->fc; + struct fuse_pqueue *fpq = &fud->pq; struct fuse_req *req; struct fuse_out_header oh; @@ -1902,63 +1896,60 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc, if (oh.error <= -1000 || oh.error > 0) goto err_finish; - spin_lock(&fc->lock); + spin_lock(&fpq->lock); err = -ENOENT; - if (!fc->connected) - goto err_unlock; + if (!fpq->connected) + goto err_unlock_pq; - req = request_find(fc, oh.unique); + req = request_find(fpq, oh.unique); if (!req) - goto err_unlock; + goto err_unlock_pq; - if (req->aborted) { - spin_unlock(&fc->lock); - fuse_copy_finish(cs); - spin_lock(&fc->lock); - request_end(fc, req); - return -ENOENT; - } /* Is it an interrupt reply? */ if (req->intr_unique == oh.unique) { + spin_unlock(&fpq->lock); + err = -EINVAL; if (nbytes != sizeof(struct fuse_out_header)) - goto err_unlock; + goto err_finish; if (oh.error == -ENOSYS) fc->no_interrupt = 1; else if (oh.error == -EAGAIN) - queue_interrupt(fc, req); + queue_interrupt(&fc->iq, req); - spin_unlock(&fc->lock); fuse_copy_finish(cs); return nbytes; } - req->state = FUSE_REQ_WRITING; - list_move(&req->list, &fc->io); + clear_bit(FR_SENT, &req->flags); + list_move(&req->list, &fpq->io); req->out.h = oh; - req->locked = 1; + set_bit(FR_LOCKED, &req->flags); + spin_unlock(&fpq->lock); cs->req = req; if (!req->out.page_replace) cs->move_pages = 0; - spin_unlock(&fc->lock); err = copy_out_args(cs, &req->out, nbytes); fuse_copy_finish(cs); - spin_lock(&fc->lock); - req->locked = 0; - if (!err) { - if (req->aborted) - err = -ENOENT; - } else if (!req->aborted) + spin_lock(&fpq->lock); + clear_bit(FR_LOCKED, &req->flags); + if (!fpq->connected) + err = -ENOENT; + else if (err) req->out.h.error = -EIO; + if (!test_bit(FR_PRIVATE, &req->flags)) + list_del_init(&req->list); + spin_unlock(&fpq->lock); + request_end(fc, req); return err ? err : nbytes; - err_unlock: - spin_unlock(&fc->lock); + err_unlock_pq: + spin_unlock(&fpq->lock); err_finish: fuse_copy_finish(cs); return err; @@ -1967,16 +1958,17 @@ static ssize_t fuse_dev_do_write(struct fuse_conn *fc, static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) { struct fuse_copy_state cs; - struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp); - if (!fc) + struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp); + + if (!fud) return -EPERM; if (!iter_is_iovec(from)) return -EINVAL; - fuse_copy_init(&cs, fc, 0, from); + fuse_copy_init(&cs, 0, from); - return fuse_dev_do_write(fc, &cs, iov_iter_count(from)); + return fuse_dev_do_write(fud, &cs, iov_iter_count(from)); } static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, @@ -1987,12 +1979,12 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, unsigned idx; struct pipe_buffer *bufs; struct fuse_copy_state cs; - struct fuse_conn *fc; + struct fuse_dev *fud; size_t rem; ssize_t ret; - fc = fuse_get_conn(out); - if (!fc) + fud = fuse_get_dev(out); + if (!fud) return -EPERM; bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL); @@ -2039,7 +2031,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, } pipe_unlock(pipe); - fuse_copy_init(&cs, fc, 0, NULL); + fuse_copy_init(&cs, 0, NULL); cs.pipebufs = bufs; cs.nr_segs = nbuf; cs.pipe = pipe; @@ -2047,7 +2039,7 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, if (flags & SPLICE_F_MOVE) cs.move_pages = 1; - ret = fuse_dev_do_write(fc, &cs, len); + ret = fuse_dev_do_write(fud, &cs, len); for (idx = 0; idx < nbuf; idx++) { struct pipe_buffer *buf = &bufs[idx]; @@ -2061,18 +2053,21 @@ out: static unsigned fuse_dev_poll(struct file *file, poll_table *wait) { unsigned mask = POLLOUT | POLLWRNORM; - struct fuse_conn *fc = fuse_get_conn(file); - if (!fc) + struct fuse_iqueue *fiq; + struct fuse_dev *fud = fuse_get_dev(file); + + if (!fud) return POLLERR; - poll_wait(file, &fc->waitq, wait); + fiq = &fud->fc->iq; + poll_wait(file, &fiq->waitq, wait); - spin_lock(&fc->lock); - if (!fc->connected) + spin_lock(&fiq->waitq.lock); + if (!fiq->connected) mask = POLLERR; - else if (request_pending(fc)) + else if (request_pending(fiq)) mask |= POLLIN | POLLRDNORM; - spin_unlock(&fc->lock); + spin_unlock(&fiq->waitq.lock); return mask; } @@ -2083,67 +2078,18 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait) * This function releases and reacquires fc->lock */ static void end_requests(struct fuse_conn *fc, struct list_head *head) -__releases(fc->lock) -__acquires(fc->lock) { while (!list_empty(head)) { struct fuse_req *req; req = list_entry(head->next, struct fuse_req, list); req->out.h.error = -ECONNABORTED; - request_end(fc, req); - spin_lock(&fc->lock); - } -} - -/* - * Abort requests under I/O - * - * The requests are set to aborted and finished, and the request - * waiter is woken up. This will make request_wait_answer() wait - * until the request is unlocked and then return. - * - * If the request is asynchronous, then the end function needs to be - * called after waiting for the request to be unlocked (if it was - * locked). - */ -static void end_io_requests(struct fuse_conn *fc) -__releases(fc->lock) -__acquires(fc->lock) -{ - while (!list_empty(&fc->io)) { - struct fuse_req *req = - list_entry(fc->io.next, struct fuse_req, list); - void (*end) (struct fuse_conn *, struct fuse_req *) = req->end; - - req->aborted = 1; - req->out.h.error = -ECONNABORTED; - req->state = FUSE_REQ_FINISHED; + clear_bit(FR_PENDING, &req->flags); + clear_bit(FR_SENT, &req->flags); list_del_init(&req->list); - wake_up(&req->waitq); - if (end) { - req->end = NULL; - __fuse_get_request(req); - spin_unlock(&fc->lock); - wait_event(req->waitq, !req->locked); - end(fc, req); - fuse_put_request(fc, req); - spin_lock(&fc->lock); - } + request_end(fc, req); } } -static void end_queued_requests(struct fuse_conn *fc) -__releases(fc->lock) -__acquires(fc->lock) -{ - fc->max_background = UINT_MAX; - flush_bg_queue(fc); - end_requests(fc, &fc->pending); - end_requests(fc, &fc->processing); - while (forget_pending(fc)) - kfree(dequeue_forget(fc, 1, NULL)); -} - static void end_polls(struct fuse_conn *fc) { struct rb_node *p; @@ -2162,67 +2108,156 @@ static void end_polls(struct fuse_conn *fc) /* * Abort all requests. * - * Emergency exit in case of a malicious or accidental deadlock, or - * just a hung filesystem. + * Emergency exit in case of a malicious or accidental deadlock, or just a hung + * filesystem. * - * The same effect is usually achievable through killing the - * filesystem daemon and all users of the filesystem. The exception - * is the combination of an asynchronous request and the tricky - * deadlock (see Documentation/filesystems/fuse.txt). + * The same effect is usually achievable through killing the filesystem daemon + * and all users of the filesystem. The exception is the combination of an + * asynchronous request and the tricky deadlock (see + * Documentation/filesystems/fuse.txt). * - * During the aborting, progression of requests from the pending and - * processing lists onto the io list, and progression of new requests - * onto the pending list is prevented by req->connected being false. - * - * Progression of requests under I/O to the processing list is - * prevented by the req->aborted flag being true for these requests. - * For this reason requests on the io list must be aborted first. + * Aborting requests under I/O goes as follows: 1: Separate out unlocked + * requests, they should be finished off immediately. Locked requests will be + * finished after unlock; see unlock_request(). 2: Finish off the unlocked + * requests. It is possible that some request will finish before we can. This + * is OK, the request will in that case be removed from the list before we touch + * it. */ void fuse_abort_conn(struct fuse_conn *fc) { + struct fuse_iqueue *fiq = &fc->iq; + spin_lock(&fc->lock); if (fc->connected) { + struct fuse_dev *fud; + struct fuse_req *req, *next; + LIST_HEAD(to_end1); + LIST_HEAD(to_end2); + fc->connected = 0; fc->blocked = 0; fuse_set_initialized(fc); - end_io_requests(fc); - end_queued_requests(fc); + list_for_each_entry(fud, &fc->devices, entry) { + struct fuse_pqueue *fpq = &fud->pq; + + spin_lock(&fpq->lock); + fpq->connected = 0; + list_for_each_entry_safe(req, next, &fpq->io, list) { + req->out.h.error = -ECONNABORTED; + spin_lock(&req->waitq.lock); + set_bit(FR_ABORTED, &req->flags); + if (!test_bit(FR_LOCKED, &req->flags)) { + set_bit(FR_PRIVATE, &req->flags); + list_move(&req->list, &to_end1); + } + spin_unlock(&req->waitq.lock); + } + list_splice_init(&fpq->processing, &to_end2); + spin_unlock(&fpq->lock); + } + fc->max_background = UINT_MAX; + flush_bg_queue(fc); + + spin_lock(&fiq->waitq.lock); + fiq->connected = 0; + list_splice_init(&fiq->pending, &to_end2); + while (forget_pending(fiq)) + kfree(dequeue_forget(fiq, 1, NULL)); + wake_up_all_locked(&fiq->waitq); + spin_unlock(&fiq->waitq.lock); + kill_fasync(&fiq->fasync, SIGIO, POLL_IN); end_polls(fc); - wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); - kill_fasync(&fc->fasync, SIGIO, POLL_IN); + spin_unlock(&fc->lock); + + while (!list_empty(&to_end1)) { + req = list_first_entry(&to_end1, struct fuse_req, list); + __fuse_get_request(req); + list_del_init(&req->list); + request_end(fc, req); + } + end_requests(fc, &to_end2); + } else { + spin_unlock(&fc->lock); } - spin_unlock(&fc->lock); } EXPORT_SYMBOL_GPL(fuse_abort_conn); int fuse_dev_release(struct inode *inode, struct file *file) { - struct fuse_conn *fc = fuse_get_conn(file); - if (fc) { - spin_lock(&fc->lock); - fc->connected = 0; - fc->blocked = 0; - fuse_set_initialized(fc); - end_queued_requests(fc); - end_polls(fc); - wake_up_all(&fc->blocked_waitq); - spin_unlock(&fc->lock); - fuse_conn_put(fc); - } + struct fuse_dev *fud = fuse_get_dev(file); + if (fud) { + struct fuse_conn *fc = fud->fc; + struct fuse_pqueue *fpq = &fud->pq; + + WARN_ON(!list_empty(&fpq->io)); + end_requests(fc, &fpq->processing); + /* Are we the last open device? */ + if (atomic_dec_and_test(&fc->dev_count)) { + WARN_ON(fc->iq.fasync != NULL); + fuse_abort_conn(fc); + } + fuse_dev_free(fud); + } return 0; } EXPORT_SYMBOL_GPL(fuse_dev_release); static int fuse_dev_fasync(int fd, struct file *file, int on) { - struct fuse_conn *fc = fuse_get_conn(file); - if (!fc) + struct fuse_dev *fud = fuse_get_dev(file); + + if (!fud) return -EPERM; /* No locking - fasync_helper does its own locking */ - return fasync_helper(fd, file, on, &fc->fasync); + return fasync_helper(fd, file, on, &fud->fc->iq.fasync); +} + +static int fuse_device_clone(struct fuse_conn *fc, struct file *new) +{ + struct fuse_dev *fud; + + if (new->private_data) + return -EINVAL; + + fud = fuse_dev_alloc(fc); + if (!fud) + return -ENOMEM; + + new->private_data = fud; + atomic_inc(&fc->dev_count); + + return 0; +} + +static long fuse_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int err = -ENOTTY; + + if (cmd == FUSE_DEV_IOC_CLONE) { + int oldfd; + + err = -EFAULT; + if (!get_user(oldfd, (__u32 __user *) arg)) { + struct file *old = fget(oldfd); + + err = -EINVAL; + if (old) { + struct fuse_dev *fud = fuse_get_dev(old); + + if (fud) { + mutex_lock(&fuse_mutex); + err = fuse_device_clone(fud->fc, file); + mutex_unlock(&fuse_mutex); + } + fput(old); + } + } + } + return err; } const struct file_operations fuse_dev_operations = { @@ -2236,6 +2271,8 @@ const struct file_operations fuse_dev_operations = { .poll = fuse_dev_poll, .release = fuse_dev_release, .fasync = fuse_dev_fasync, + .unlocked_ioctl = fuse_dev_ioctl, + .compat_ioctl = fuse_dev_ioctl, }; EXPORT_SYMBOL_GPL(fuse_dev_operations); diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 8c5e2fa68835..014fa8ba2b51 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -96,17 +96,17 @@ static void fuse_file_put(struct fuse_file *ff, bool sync) * Drop the release request when client does not * implement 'open' */ - req->background = 0; + __clear_bit(FR_BACKGROUND, &req->flags); iput(req->misc.release.inode); fuse_put_request(ff->fc, req); } else if (sync) { - req->background = 0; + __clear_bit(FR_BACKGROUND, &req->flags); fuse_request_send(ff->fc, req); iput(req->misc.release.inode); fuse_put_request(ff->fc, req); } else { req->end = fuse_release_end; - req->background = 1; + __set_bit(FR_BACKGROUND, &req->flags); fuse_request_send_background(ff->fc, req); } kfree(ff); @@ -299,8 +299,8 @@ void fuse_sync_release(struct fuse_file *ff, int flags) { WARN_ON(atomic_read(&ff->count) > 1); fuse_prepare_release(ff, flags, FUSE_RELEASE); - ff->reserved_req->force = 1; - ff->reserved_req->background = 0; + __set_bit(FR_FORCE, &ff->reserved_req->flags); + __clear_bit(FR_BACKGROUND, &ff->reserved_req->flags); fuse_request_send(ff->fc, ff->reserved_req); fuse_put_request(ff->fc, ff->reserved_req); kfree(ff); @@ -426,7 +426,7 @@ static int fuse_flush(struct file *file, fl_owner_t id) req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; - req->force = 1; + __set_bit(FR_FORCE, &req->flags); fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); @@ -1611,7 +1611,8 @@ static int fuse_writepage_locked(struct page *page) if (!req) goto err; - req->background = 1; /* writeback always goes to bg_queue */ + /* writeback always goes to bg_queue */ + __set_bit(FR_BACKGROUND, &req->flags); tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); if (!tmp_page) goto err_free; @@ -1742,8 +1743,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, } } - if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT || - old_req->state == FUSE_REQ_PENDING)) { + if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) { struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host); copy_highpage(old_req->pages[0], page); @@ -1830,7 +1830,7 @@ static int fuse_writepages_fill(struct page *page, req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; req->misc.write.next = NULL; req->in.argpages = 1; - req->background = 1; + __set_bit(FR_BACKGROUND, &req->flags); req->num_pages = 0; req->end = fuse_writepage_end; req->inode = inode; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 7354dc142a50..405113101db8 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -241,16 +241,6 @@ struct fuse_args { #define FUSE_ARGS(args) struct fuse_args args = {} -/** The request state */ -enum fuse_req_state { - FUSE_REQ_INIT = 0, - FUSE_REQ_PENDING, - FUSE_REQ_READING, - FUSE_REQ_SENT, - FUSE_REQ_WRITING, - FUSE_REQ_FINISHED -}; - /** The request IO state (for asynchronous processing) */ struct fuse_io_priv { int async; @@ -267,7 +257,40 @@ struct fuse_io_priv { }; /** + * Request flags + * + * FR_ISREPLY: set if the request has reply + * FR_FORCE: force sending of the request even if interrupted + * FR_BACKGROUND: request is sent in the background + * FR_WAITING: request is counted as "waiting" + * FR_ABORTED: the request was aborted + * FR_INTERRUPTED: the request has been interrupted + * FR_LOCKED: data is being copied to/from the request + * FR_PENDING: request is not yet in userspace + * FR_SENT: request is in userspace, waiting for an answer + * FR_FINISHED: request is finished + * FR_PRIVATE: request is on private list + */ +enum fuse_req_flag { + FR_ISREPLY, + FR_FORCE, + FR_BACKGROUND, + FR_WAITING, + FR_ABORTED, + FR_INTERRUPTED, + FR_LOCKED, + FR_PENDING, + FR_SENT, + FR_FINISHED, + FR_PRIVATE, +}; + +/** * A request to the client + * + * .waitq.lock protects the following fields: + * - FR_ABORTED + * - FR_LOCKED (may also be modified under fc->lock, tested under both) */ struct fuse_req { /** This can be on either pending processing or io lists in @@ -283,35 +306,8 @@ struct fuse_req { /** Unique ID for the interrupt request */ u64 intr_unique; - /* - * The following bitfields are either set once before the - * request is queued or setting/clearing them is protected by - * fuse_conn->lock - */ - - /** True if the request has reply */ - unsigned isreply:1; - - /** Force sending of the request even if interrupted */ - unsigned force:1; - - /** The request was aborted */ - unsigned aborted:1; - - /** Request is sent in the background */ - unsigned background:1; - - /** The request has been interrupted */ - unsigned interrupted:1; - - /** Data is being copied to/from the request */ - unsigned locked:1; - - /** Request is counted as "waiting" */ - unsigned waiting:1; - - /** State of the request */ - enum fuse_req_state state; + /* Request flags, updated with test/set/clear_bit() */ + unsigned long flags; /** The request input */ struct fuse_in in; @@ -380,6 +376,61 @@ struct fuse_req { struct file *stolen_file; }; +struct fuse_iqueue { + /** Connection established */ + unsigned connected; + + /** Readers of the connection are waiting on this */ + wait_queue_head_t waitq; + + /** The next unique request id */ + u64 reqctr; + + /** The list of pending requests */ + struct list_head pending; + + /** Pending interrupts */ + struct list_head interrupts; + + /** Queue of pending forgets */ + struct fuse_forget_link forget_list_head; + struct fuse_forget_link *forget_list_tail; + + /** Batching of FORGET requests (positive indicates FORGET batch) */ + int forget_batch; + + /** O_ASYNC requests */ + struct fasync_struct *fasync; +}; + +struct fuse_pqueue { + /** Connection established */ + unsigned connected; + + /** Lock protecting accessess to members of this structure */ + spinlock_t lock; + + /** The list of requests being processed */ + struct list_head processing; + + /** The list of requests under I/O */ + struct list_head io; +}; + +/** + * Fuse device instance + */ +struct fuse_dev { + /** Fuse connection for this device */ + struct fuse_conn *fc; + + /** Processing queue */ + struct fuse_pqueue pq; + + /** list entry on fc->devices */ + struct list_head entry; +}; + /** * A Fuse connection. * @@ -394,6 +445,9 @@ struct fuse_conn { /** Refcount */ atomic_t count; + /** Number of fuse_dev's */ + atomic_t dev_count; + struct rcu_head rcu; /** The user id for this mount */ @@ -411,17 +465,8 @@ struct fuse_conn { /** Maximum write size */ unsigned max_write; - /** Readers of the connection are waiting on this */ - wait_queue_head_t waitq; - - /** The list of pending requests */ - struct list_head pending; - - /** The list of requests being processed */ - struct list_head processing; - - /** The list of requests under I/O */ - struct list_head io; + /** Input queue */ + struct fuse_iqueue iq; /** The next unique kernel file handle */ u64 khctr; @@ -444,16 +489,6 @@ struct fuse_conn { /** The list of background requests set aside for later queuing */ struct list_head bg_queue; - /** Pending interrupts */ - struct list_head interrupts; - - /** Queue of pending forgets */ - struct fuse_forget_link forget_list_head; - struct fuse_forget_link *forget_list_tail; - - /** Batching of FORGET requests (positive indicates FORGET batch) */ - int forget_batch; - /** Flag indicating that INIT reply has been received. Allocating * any fuse request will be suspended until the flag is set */ int initialized; @@ -469,9 +504,6 @@ struct fuse_conn { /** waitq for reserved requests */ wait_queue_head_t reserved_req_waitq; - /** The next unique request id */ - u64 reqctr; - /** Connection established, cleared on umount, connection abort and device release */ unsigned connected; @@ -594,9 +626,6 @@ struct fuse_conn { /** number of dentries used in the above array */ int ctl_ndents; - /** O_ASYNC requests */ - struct fasync_struct *fasync; - /** Key for lock owner ID scrambling */ u32 scramble_key[4]; @@ -614,6 +643,9 @@ struct fuse_conn { /** Read/write semaphore to hold when accessing sb. */ struct rw_semaphore killsb; + + /** List of device instances belonging to this connection */ + struct list_head devices; }; static inline struct fuse_conn *get_fuse_conn_super(struct super_block *sb) @@ -826,6 +858,9 @@ void fuse_conn_init(struct fuse_conn *fc); */ void fuse_conn_put(struct fuse_conn *fc); +struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc); +void fuse_dev_free(struct fuse_dev *fud); + /** * Add connection to control filesystem */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 082ac1c97f39..ac81f48ab2f4 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -362,8 +362,8 @@ static void fuse_send_destroy(struct fuse_conn *fc) if (req && fc->conn_init) { fc->destroy_req = NULL; req->in.h.opcode = FUSE_DESTROY; - req->force = 1; - req->background = 0; + __set_bit(FR_FORCE, &req->flags); + __clear_bit(FR_BACKGROUND, &req->flags); fuse_request_send(fc, req); fuse_put_request(fc, req); } @@ -567,30 +567,46 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root) return 0; } +static void fuse_iqueue_init(struct fuse_iqueue *fiq) +{ + memset(fiq, 0, sizeof(struct fuse_iqueue)); + init_waitqueue_head(&fiq->waitq); + INIT_LIST_HEAD(&fiq->pending); + INIT_LIST_HEAD(&fiq->interrupts); + fiq->forget_list_tail = &fiq->forget_list_head; + fiq->connected = 1; +} + +static void fuse_pqueue_init(struct fuse_pqueue *fpq) +{ + memset(fpq, 0, sizeof(struct fuse_pqueue)); + spin_lock_init(&fpq->lock); + INIT_LIST_HEAD(&fpq->processing); + INIT_LIST_HEAD(&fpq->io); + fpq->connected = 1; +} + void fuse_conn_init(struct fuse_conn *fc) { memset(fc, 0, sizeof(*fc)); spin_lock_init(&fc->lock); init_rwsem(&fc->killsb); atomic_set(&fc->count, 1); - init_waitqueue_head(&fc->waitq); + atomic_set(&fc->dev_count, 1); init_waitqueue_head(&fc->blocked_waitq); init_waitqueue_head(&fc->reserved_req_waitq); - INIT_LIST_HEAD(&fc->pending); - INIT_LIST_HEAD(&fc->processing); - INIT_LIST_HEAD(&fc->io); - INIT_LIST_HEAD(&fc->interrupts); + fuse_iqueue_init(&fc->iq); INIT_LIST_HEAD(&fc->bg_queue); INIT_LIST_HEAD(&fc->entry); - fc->forget_list_tail = &fc->forget_list_head; + INIT_LIST_HEAD(&fc->devices); atomic_set(&fc->num_waiting, 0); fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND; fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD; fc->khctr = 0; fc->polled_files = RB_ROOT; - fc->reqctr = 0; fc->blocked = 0; fc->initialized = 0; + fc->connected = 1; fc->attr_version = 1; get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); } @@ -930,6 +946,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) static void fuse_free_conn(struct fuse_conn *fc) { + WARN_ON(!list_empty(&fc->devices)); kfree_rcu(fc, rcu); } @@ -975,8 +992,42 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) return 0; } +struct fuse_dev *fuse_dev_alloc(struct fuse_conn *fc) +{ + struct fuse_dev *fud; + + fud = kzalloc(sizeof(struct fuse_dev), GFP_KERNEL); + if (fud) { + fud->fc = fuse_conn_get(fc); + fuse_pqueue_init(&fud->pq); + + spin_lock(&fc->lock); + list_add_tail(&fud->entry, &fc->devices); + spin_unlock(&fc->lock); + } + + return fud; +} +EXPORT_SYMBOL_GPL(fuse_dev_alloc); + +void fuse_dev_free(struct fuse_dev *fud) +{ + struct fuse_conn *fc = fud->fc; + + if (fc) { + spin_lock(&fc->lock); + list_del(&fud->entry); + spin_unlock(&fc->lock); + + fuse_conn_put(fc); + } + kfree(fud); +} +EXPORT_SYMBOL_GPL(fuse_dev_free); + static int fuse_fill_super(struct super_block *sb, void *data, int silent) { + struct fuse_dev *fud; struct fuse_conn *fc; struct inode *root; struct fuse_mount_data d; @@ -1026,12 +1077,17 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) goto err_fput; fuse_conn_init(fc); + fc->release = fuse_free_conn; + + fud = fuse_dev_alloc(fc); + if (!fud) + goto err_put_conn; fc->dev = sb->s_dev; fc->sb = sb; err = fuse_bdi_init(fc, sb); if (err) - goto err_put_conn; + goto err_dev_free; sb->s_bdi = &fc->bdi; @@ -1040,7 +1096,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) fc->dont_mask = 1; sb->s_flags |= MS_POSIXACL; - fc->release = fuse_free_conn; fc->flags = d.flags; fc->user_id = d.user_id; fc->group_id = d.group_id; @@ -1053,14 +1108,14 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) root = fuse_get_root_inode(sb, d.rootmode); root_dentry = d_make_root(root); if (!root_dentry) - goto err_put_conn; + goto err_dev_free; /* only now - we want root dentry with NULL ->d_op */ sb->s_d_op = &fuse_dentry_operations; init_req = fuse_request_alloc(0); if (!init_req) goto err_put_root; - init_req->background = 1; + __set_bit(FR_BACKGROUND, &init_req->flags); if (is_bdev) { fc->destroy_req = fuse_request_alloc(0); @@ -1079,8 +1134,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) list_add_tail(&fc->entry, &fuse_conn_list); sb->s_root = root_dentry; - fc->connected = 1; - file->private_data = fuse_conn_get(fc); + file->private_data = fud; mutex_unlock(&fuse_mutex); /* * atomic_dec_and_test() in fput() provides the necessary @@ -1099,6 +1153,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) fuse_request_free(init_req); err_put_root: dput(root_dentry); + err_dev_free: + fuse_dev_free(fud); err_put_conn: fuse_bdi_destroy(fc); fuse_conn_put(fc); diff --git a/fs/mount.h b/fs/mount.h index b5b8082bfa42..14db05d424f7 100644 --- a/fs/mount.h +++ b/fs/mount.h @@ -118,7 +118,6 @@ static inline void unlock_mount_hash(void) } struct proc_mounts { - struct seq_file m; struct mnt_namespace *ns; struct path root; int (*show)(struct seq_file *, struct vfsmount *); @@ -127,8 +126,6 @@ struct proc_mounts { loff_t cached_index; }; -#define proc_mounts(p) (container_of((p), struct proc_mounts, m)) - extern const struct seq_operations mounts_op; extern bool __is_local_mountpoint(struct dentry *dentry); diff --git a/fs/namespace.c b/fs/namespace.c index 9c1c43d0d4f1..e99f1f4e00cd 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1226,7 +1226,7 @@ EXPORT_SYMBOL(replace_mount_options); /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; down_read(&namespace_sem); if (p->cached_event == p->ns->event) { @@ -1247,7 +1247,7 @@ static void *m_start(struct seq_file *m, loff_t *pos) static void *m_next(struct seq_file *m, void *v, loff_t *pos) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; p->cached_mount = seq_list_next(v, &p->ns->list, pos); p->cached_index = *pos; @@ -1261,7 +1261,7 @@ static void m_stop(struct seq_file *m, void *v) static int m_show(struct seq_file *m, void *v) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 8d129bb7355a..682529c00996 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -458,7 +458,7 @@ check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) * pg_authenticate method for nfsv4 callback threads. * * The authflavor has been negotiated, so an incorrect flavor is a server - * bug. Drop packets with incorrect authflavor. + * bug. Deny packets with incorrect authflavor. * * All other checking done after NFS decoding where the nfs_client can be * found in nfs4_callback_compound @@ -468,12 +468,12 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp) switch (rqstp->rq_authop->flavour) { case RPC_AUTH_NULL: if (rqstp->rq_proc != CB_NULL) - return SVC_DROP; + return SVC_DENIED; break; case RPC_AUTH_GSS: /* No RPC_AUTH_GSS support yet in NFSv4.1 */ if (svc_is_backchannel(rqstp)) - return SVC_DROP; + return SVC_DENIED; } return SVC_OK; } diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index 197806fb87ff..29e3c1b011b7 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -327,10 +327,8 @@ validate_seqid(struct nfs4_slot_table *tbl, struct cb_sequenceargs * args) dprintk("%s slot table seqid: %u\n", __func__, slot->seq_nr); /* Normal */ - if (likely(args->csa_sequenceid == slot->seq_nr + 1)) { - slot->seq_nr++; + if (likely(args->csa_sequenceid == slot->seq_nr + 1)) goto out_ok; - } /* Replay */ if (args->csa_sequenceid == slot->seq_nr) { @@ -418,6 +416,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, struct cb_process_state *cps) { struct nfs4_slot_table *tbl; + struct nfs4_slot *slot; struct nfs_client *clp; int i; __be32 status = htonl(NFS4ERR_BADSESSION); @@ -429,25 +428,32 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) goto out; + tbl = &clp->cl_session->bc_slot_table; + slot = tbl->slots + args->csa_slotid; spin_lock(&tbl->slot_tbl_lock); /* state manager is resetting the session */ if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) { - spin_unlock(&tbl->slot_tbl_lock); status = htonl(NFS4ERR_DELAY); /* Return NFS4ERR_BADSESSION if we're draining the session * in order to reset it. */ if (test_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state)) status = htonl(NFS4ERR_BADSESSION); - goto out; + goto out_unlock; } - status = validate_seqid(&clp->cl_session->bc_slot_table, args); - spin_unlock(&tbl->slot_tbl_lock); + memcpy(&res->csr_sessionid, &args->csa_sessionid, + sizeof(res->csr_sessionid)); + res->csr_sequenceid = args->csa_sequenceid; + res->csr_slotid = args->csa_slotid; + res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; + res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; + + status = validate_seqid(tbl, args); if (status) - goto out; + goto out_unlock; cps->slotid = args->csa_slotid; @@ -458,15 +464,17 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, */ if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { status = htonl(NFS4ERR_DELAY); - goto out; + goto out_unlock; } - memcpy(&res->csr_sessionid, &args->csa_sessionid, - sizeof(res->csr_sessionid)); - res->csr_sequenceid = args->csa_sequenceid; - res->csr_slotid = args->csa_slotid; - res->csr_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; - res->csr_target_highestslotid = NFS41_BC_MAX_CALLBACKS - 1; + /* + * RFC5661 20.9.3 + * If CB_SEQUENCE returns an error, then the state of the slot + * (sequence ID, cached reply) MUST NOT change. + */ + slot->seq_nr++; +out_unlock: + spin_unlock(&tbl->slot_tbl_lock); out: cps->clp = clp; /* put in nfs4_callback_compound */ diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 19ca95cdfd9b..6b1697a01dde 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -909,7 +909,7 @@ static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *r xdr_init_encode(&xdr_out, &rqstp->rq_res, p); status = decode_compound_hdr_arg(&xdr_in, &hdr_arg); - if (status == __constant_htonl(NFS4ERR_RESOURCE)) + if (status == htonl(NFS4ERR_RESOURCE)) return rpc_garbage_args; if (hdr_arg.minorversion == 0) { diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 892aefff3630..ecebb406cc1a 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -825,7 +825,6 @@ error: * Load up the server record from information gained in an fsinfo record */ static void nfs_server_set_fsinfo(struct nfs_server *server, - struct nfs_fh *mntfh, struct nfs_fsinfo *fsinfo) { unsigned long max_rpc_payload; @@ -901,7 +900,7 @@ int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs if (error < 0) goto out_error; - nfs_server_set_fsinfo(server, mntfh, &fsinfo); + nfs_server_set_fsinfo(server, &fsinfo); /* Get some general file system info */ if (server->namelen == 0) { @@ -1193,8 +1192,6 @@ void nfs_clients_init(struct net *net) } #ifdef CONFIG_PROC_FS -static struct proc_dir_entry *proc_fs_nfs; - static int nfs_server_list_open(struct inode *inode, struct file *file); static void *nfs_server_list_start(struct seq_file *p, loff_t *pos); static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos); @@ -1364,27 +1361,29 @@ static int nfs_volume_list_show(struct seq_file *m, void *v) { struct nfs_server *server; struct nfs_client *clp; - char dev[8], fsid[17]; + char dev[13]; // 8 for 2^24, 1 for ':', 3 for 2^8, 1 for '\0' + char fsid[34]; // 2 * 16 for %llx, 1 for ':', 1 for '\0' struct nfs_net *nn = net_generic(seq_file_net(m), nfs_net_id); /* display header on line 1 */ if (v == &nn->nfs_volume_list) { - seq_puts(m, "NV SERVER PORT DEV FSID FSC\n"); + seq_puts(m, "NV SERVER PORT DEV FSID" + " FSC\n"); return 0; } /* display one transport per line on subsequent lines */ server = list_entry(v, struct nfs_server, master_link); clp = server->nfs_client; - snprintf(dev, 8, "%u:%u", + snprintf(dev, sizeof(dev), "%u:%u", MAJOR(server->s_dev), MINOR(server->s_dev)); - snprintf(fsid, 17, "%llx:%llx", + snprintf(fsid, sizeof(fsid), "%llx:%llx", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); rcu_read_lock(); - seq_printf(m, "v%u %s %s %-7s %-17s %s\n", + seq_printf(m, "v%u %s %s %-12s %-33s %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT), @@ -1434,27 +1433,20 @@ void nfs_fs_proc_net_exit(struct net *net) */ int __init nfs_fs_proc_init(void) { - struct proc_dir_entry *p; - - proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL); - if (!proc_fs_nfs) + if (!proc_mkdir("fs/nfsfs", NULL)) goto error_0; /* a file of servers with which we're dealing */ - p = proc_symlink("servers", proc_fs_nfs, "../../net/nfsfs/servers"); - if (!p) + if (!proc_symlink("fs/nfsfs/servers", NULL, "../../net/nfsfs/servers")) goto error_1; /* a file of volumes that we have mounted */ - p = proc_symlink("volumes", proc_fs_nfs, "../../net/nfsfs/volumes"); - if (!p) - goto error_2; - return 0; + if (!proc_symlink("fs/nfsfs/volumes", NULL, "../../net/nfsfs/volumes")) + goto error_1; -error_2: - remove_proc_entry("servers", proc_fs_nfs); + return 0; error_1: - remove_proc_entry("fs/nfsfs", NULL); + remove_proc_subtree("fs/nfsfs", NULL); error_0: return -ENOMEM; } @@ -1464,9 +1456,7 @@ error_0: */ void nfs_fs_proc_exit(void) { - remove_proc_entry("volumes", proc_fs_nfs); - remove_proc_entry("servers", proc_fs_nfs); - remove_proc_entry("fs/nfsfs", NULL); + remove_proc_subtree("fs/nfsfs", NULL); } #endif /* CONFIG_PROC_FS */ diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index b2c8b31b2be7..21457bb0edd6 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1470,9 +1470,6 @@ static int nfs_finish_open(struct nfs_open_context *ctx, { int err; - if ((open_flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) - *opened |= FILE_CREATED; - err = finish_open(file, dentry, do_open, opened); if (err) goto out; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 8b8d83a526ce..cc4fa1ed61fc 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -555,31 +555,22 @@ static int nfs_launder_page(struct page *page) return nfs_wb_page(inode, page); } -#ifdef CONFIG_NFS_SWAP static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, sector_t *span) { - int ret; struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); *span = sis->pages; - rcu_read_lock(); - ret = xs_swapper(rcu_dereference(clnt->cl_xprt), 1); - rcu_read_unlock(); - - return ret; + return rpc_clnt_swap_activate(clnt); } static void nfs_swap_deactivate(struct file *file) { struct rpc_clnt *clnt = NFS_CLIENT(file->f_mapping->host); - rcu_read_lock(); - xs_swapper(rcu_dereference(clnt->cl_xprt), 0); - rcu_read_unlock(); + rpc_clnt_swap_deactivate(clnt); } -#endif const struct address_space_operations nfs_file_aops = { .readpage = nfs_readpage, @@ -596,10 +587,8 @@ const struct address_space_operations nfs_file_aops = { .launder_page = nfs_launder_page, .is_dirty_writeback = nfs_check_dirty_writeback, .error_remove_page = generic_error_remove_page, -#ifdef CONFIG_NFS_SWAP .swap_activate = nfs_swap_activate, .swap_deactivate = nfs_swap_deactivate, -#endif }; /* diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 7d05089e52d6..c12951b9551e 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -20,6 +20,7 @@ #include "../nfs4trace.h" #include "../iostat.h" #include "../nfs.h" +#include "../nfs42.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD @@ -182,17 +183,14 @@ static void _ff_layout_free_lseg(struct nfs4_ff_layout_segment *fls) static void ff_layout_sort_mirrors(struct nfs4_ff_layout_segment *fls) { - struct nfs4_ff_layout_mirror *tmp; int i, j; for (i = 0; i < fls->mirror_array_cnt - 1; i++) { for (j = i + 1; j < fls->mirror_array_cnt; j++) if (fls->mirror_array[i]->efficiency < - fls->mirror_array[j]->efficiency) { - tmp = fls->mirror_array[i]; - fls->mirror_array[i] = fls->mirror_array[j]; - fls->mirror_array[j] = tmp; - } + fls->mirror_array[j]->efficiency) + swap(fls->mirror_array[i], + fls->mirror_array[j]); } } @@ -274,6 +272,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, spin_lock_init(&fls->mirror_array[i]->lock); fls->mirror_array[i]->ds_count = ds_count; + fls->mirror_array[i]->lseg = &fls->generic_hdr; /* deviceid */ rc = decode_deviceid(&stream, &devid); @@ -344,6 +343,10 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, fls->mirror_array[i]->gid); } + p = xdr_inline_decode(&stream, 4); + if (p) + fls->flags = be32_to_cpup(p); + ff_layout_sort_mirrors(fls); rc = ff_layout_check_layout(lgr); if (rc) @@ -415,6 +418,146 @@ ff_layout_get_lseg_count(struct nfs4_ff_layout_segment *fls) return 1; } +static void +nfs4_ff_start_busy_timer(struct nfs4_ff_busy_timer *timer) +{ + /* first IO request? */ + if (atomic_inc_return(&timer->n_ops) == 1) { + timer->start_time = ktime_get(); + } +} + +static ktime_t +nfs4_ff_end_busy_timer(struct nfs4_ff_busy_timer *timer) +{ + ktime_t start, now; + + if (atomic_dec_return(&timer->n_ops) < 0) + WARN_ON_ONCE(1); + + now = ktime_get(); + start = timer->start_time; + timer->start_time = now; + return ktime_sub(now, start); +} + +static ktime_t +nfs4_ff_layout_calc_completion_time(struct rpc_task *task) +{ + return ktime_sub(ktime_get(), task->tk_start); +} + +static bool +nfs4_ff_layoutstat_start_io(struct nfs4_ff_layout_mirror *mirror, + struct nfs4_ff_layoutstat *layoutstat) +{ + static const ktime_t notime = {0}; + ktime_t now = ktime_get(); + + nfs4_ff_start_busy_timer(&layoutstat->busy_timer); + if (ktime_equal(mirror->start_time, notime)) + mirror->start_time = now; + if (ktime_equal(mirror->last_report_time, notime)) + mirror->last_report_time = now; + if (ktime_to_ms(ktime_sub(now, mirror->last_report_time)) >= + FF_LAYOUTSTATS_REPORT_INTERVAL) { + mirror->last_report_time = now; + return true; + } + + return false; +} + +static void +nfs4_ff_layout_stat_io_update_requested(struct nfs4_ff_layoutstat *layoutstat, + __u64 requested) +{ + struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; + + iostat->ops_requested++; + iostat->bytes_requested += requested; +} + +static void +nfs4_ff_layout_stat_io_update_completed(struct nfs4_ff_layoutstat *layoutstat, + __u64 requested, + __u64 completed, + ktime_t time_completed) +{ + struct nfs4_ff_io_stat *iostat = &layoutstat->io_stat; + ktime_t timer; + + iostat->ops_completed++; + iostat->bytes_completed += completed; + iostat->bytes_not_delivered += requested - completed; + + timer = nfs4_ff_end_busy_timer(&layoutstat->busy_timer); + iostat->total_busy_time = + ktime_add(iostat->total_busy_time, timer); + iostat->aggregate_completion_time = + ktime_add(iostat->aggregate_completion_time, time_completed); +} + +static void +nfs4_ff_layout_stat_io_start_read(struct nfs4_ff_layout_mirror *mirror, + __u64 requested) +{ + bool report; + + spin_lock(&mirror->lock); + report = nfs4_ff_layoutstat_start_io(mirror, &mirror->read_stat); + nfs4_ff_layout_stat_io_update_requested(&mirror->read_stat, requested); + spin_unlock(&mirror->lock); + + if (report) + pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode); +} + +static void +nfs4_ff_layout_stat_io_end_read(struct rpc_task *task, + struct nfs4_ff_layout_mirror *mirror, + __u64 requested, + __u64 completed) +{ + spin_lock(&mirror->lock); + nfs4_ff_layout_stat_io_update_completed(&mirror->read_stat, + requested, completed, + nfs4_ff_layout_calc_completion_time(task)); + spin_unlock(&mirror->lock); +} + +static void +nfs4_ff_layout_stat_io_start_write(struct nfs4_ff_layout_mirror *mirror, + __u64 requested) +{ + bool report; + + spin_lock(&mirror->lock); + report = nfs4_ff_layoutstat_start_io(mirror , &mirror->write_stat); + nfs4_ff_layout_stat_io_update_requested(&mirror->write_stat, requested); + spin_unlock(&mirror->lock); + + if (report) + pnfs_report_layoutstat(mirror->lseg->pls_layout->plh_inode); +} + +static void +nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, + struct nfs4_ff_layout_mirror *mirror, + __u64 requested, + __u64 completed, + enum nfs3_stable_how committed) +{ + if (committed == NFS_UNSTABLE) + requested = completed = 0; + + spin_lock(&mirror->lock); + nfs4_ff_layout_stat_io_update_completed(&mirror->write_stat, + requested, completed, + nfs4_ff_layout_calc_completion_time(task)); + spin_unlock(&mirror->lock); +} + static int ff_layout_alloc_commit_info(struct pnfs_layout_segment *lseg, struct nfs_commit_info *cinfo, @@ -631,7 +774,7 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) nfs_direct_set_resched_writes(hdr->dreq); /* fake unstable write to let common nfs resend pages */ hdr->verf.committed = NFS_UNSTABLE; - hdr->good_bytes = 0; + hdr->good_bytes = hdr->args.count; } return; } @@ -879,6 +1022,12 @@ static int ff_layout_read_done_cb(struct rpc_task *task, return 0; } +static bool +ff_layout_need_layoutcommit(struct pnfs_layout_segment *lseg) +{ + return !(FF_LAYOUT_LSEG(lseg)->flags & FF_FLAGS_NO_LAYOUTCOMMIT); +} + /* * We reference the rpc_cred of the first WRITE that triggers the need for * a LAYOUTCOMMIT, and use it to send the layoutcommit compound. @@ -891,6 +1040,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task, static void ff_layout_set_layoutcommit(struct nfs_pgio_header *hdr) { + if (!ff_layout_need_layoutcommit(hdr->lseg)) + return; + pnfs_set_layoutcommit(hdr->inode, hdr->lseg, hdr->mds_offset + hdr->res.count); dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino, @@ -909,6 +1061,10 @@ ff_layout_reset_to_mds(struct pnfs_layout_segment *lseg, int idx) static int ff_layout_read_prepare_common(struct rpc_task *task, struct nfs_pgio_header *hdr) { + nfs4_ff_layout_stat_io_start_read( + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count); + if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { rpc_exit(task, -EIO); return -EIO; @@ -962,15 +1118,15 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data) { struct nfs_pgio_header *hdr = data; - if (ff_layout_read_prepare_common(task, hdr)) - return; - if (ff_layout_setup_sequence(hdr->ds_clp, &hdr->args.seq_args, &hdr->res.seq_res, task)) return; + if (ff_layout_read_prepare_common(task, hdr)) + return; + if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, hdr->args.lock_context, FMODE_READ) == -EIO) rpc_exit(task, -EIO); /* lost lock, terminate I/O */ @@ -982,6 +1138,10 @@ static void ff_layout_read_call_done(struct rpc_task *task, void *data) dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); + nfs4_ff_layout_stat_io_end_read(task, + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count, hdr->res.count); + if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && task->tk_status == 0) { nfs4_sequence_done(task, &hdr->res.seq_res); @@ -1074,7 +1234,8 @@ static int ff_layout_commit_done_cb(struct rpc_task *task, return -EAGAIN; } - if (data->verf.committed == NFS_UNSTABLE) + if (data->verf.committed == NFS_UNSTABLE + && ff_layout_need_layoutcommit(data->lseg)) pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb); return 0; @@ -1083,6 +1244,10 @@ static int ff_layout_commit_done_cb(struct rpc_task *task, static int ff_layout_write_prepare_common(struct rpc_task *task, struct nfs_pgio_header *hdr) { + nfs4_ff_layout_stat_io_start_write( + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count); + if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) { rpc_exit(task, -EIO); return -EIO; @@ -1116,15 +1281,15 @@ static void ff_layout_write_prepare_v4(struct rpc_task *task, void *data) { struct nfs_pgio_header *hdr = data; - if (ff_layout_write_prepare_common(task, hdr)) - return; - if (ff_layout_setup_sequence(hdr->ds_clp, &hdr->args.seq_args, &hdr->res.seq_res, task)) return; + if (ff_layout_write_prepare_common(task, hdr)) + return; + if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context, hdr->args.lock_context, FMODE_WRITE) == -EIO) rpc_exit(task, -EIO); /* lost lock, terminate I/O */ @@ -1134,6 +1299,11 @@ static void ff_layout_write_call_done(struct rpc_task *task, void *data) { struct nfs_pgio_header *hdr = data; + nfs4_ff_layout_stat_io_end_write(task, + FF_LAYOUT_COMP(hdr->lseg, hdr->pgio_mirror_idx), + hdr->args.count, hdr->res.count, + hdr->res.verf->committed); + if (test_bit(NFS_IOHDR_REDO, &hdr->flags) && task->tk_status == 0) { nfs4_sequence_done(task, &hdr->res.seq_res); @@ -1152,8 +1322,17 @@ static void ff_layout_write_count_stats(struct rpc_task *task, void *data) &NFS_CLIENT(hdr->inode)->cl_metrics[NFSPROC4_CLNT_WRITE]); } +static void ff_layout_commit_prepare_common(struct rpc_task *task, + struct nfs_commit_data *cdata) +{ + nfs4_ff_layout_stat_io_start_write( + FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), + 0); +} + static void ff_layout_commit_prepare_v3(struct rpc_task *task, void *data) { + ff_layout_commit_prepare_common(task, data); rpc_call_start(task); } @@ -1161,10 +1340,30 @@ static void ff_layout_commit_prepare_v4(struct rpc_task *task, void *data) { struct nfs_commit_data *wdata = data; - ff_layout_setup_sequence(wdata->ds_clp, + if (ff_layout_setup_sequence(wdata->ds_clp, &wdata->args.seq_args, &wdata->res.seq_res, - task); + task)) + return; + ff_layout_commit_prepare_common(task, data); +} + +static void ff_layout_commit_done(struct rpc_task *task, void *data) +{ + struct nfs_commit_data *cdata = data; + struct nfs_page *req; + __u64 count = 0; + + if (task->tk_status == 0) { + list_for_each_entry(req, &cdata->pages, wb_list) + count += req->wb_bytes; + } + + nfs4_ff_layout_stat_io_end_write(task, + FF_LAYOUT_COMP(cdata->lseg, cdata->ds_commit_index), + count, count, NFS_FILE_SYNC); + + pnfs_generic_write_commit_done(task, data); } static void ff_layout_commit_count_stats(struct rpc_task *task, void *data) @@ -1205,14 +1404,14 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v4 = { static const struct rpc_call_ops ff_layout_commit_call_ops_v3 = { .rpc_call_prepare = ff_layout_commit_prepare_v3, - .rpc_call_done = pnfs_generic_write_commit_done, + .rpc_call_done = ff_layout_commit_done, .rpc_count_stats = ff_layout_commit_count_stats, .rpc_release = pnfs_generic_commit_release, }; static const struct rpc_call_ops ff_layout_commit_call_ops_v4 = { .rpc_call_prepare = ff_layout_commit_prepare_v4, - .rpc_call_done = pnfs_generic_write_commit_done, + .rpc_call_done = ff_layout_commit_done, .rpc_count_stats = ff_layout_commit_count_stats, .rpc_release = pnfs_generic_commit_release, }; @@ -1256,7 +1455,6 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr) fh = nfs4_ff_layout_select_ds_fh(lseg, idx); if (fh) hdr->args.fh = fh; - /* * Note that if we ever decide to split across DSes, * then we may need to handle dense-like offsets. @@ -1385,6 +1583,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how) fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); if (fh) data->args.fh = fh; + return nfs_initiate_commit(ds_clnt, data, ds->ds_clp->rpc_ops, vers == 3 ? &ff_layout_commit_call_ops_v3 : &ff_layout_commit_call_ops_v4, @@ -1488,6 +1687,247 @@ out: dprintk("%s: Return\n", __func__); } +static int +ff_layout_ntop4(const struct sockaddr *sap, char *buf, const size_t buflen) +{ + const struct sockaddr_in *sin = (struct sockaddr_in *)sap; + + return snprintf(buf, buflen, "%pI4", &sin->sin_addr); +} + +static size_t +ff_layout_ntop6_noscopeid(const struct sockaddr *sap, char *buf, + const int buflen) +{ + const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap; + const struct in6_addr *addr = &sin6->sin6_addr; + + /* + * RFC 4291, Section 2.2.2 + * + * Shorthanded ANY address + */ + if (ipv6_addr_any(addr)) + return snprintf(buf, buflen, "::"); + + /* + * RFC 4291, Section 2.2.2 + * + * Shorthanded loopback address + */ + if (ipv6_addr_loopback(addr)) + return snprintf(buf, buflen, "::1"); + + /* + * RFC 4291, Section 2.2.3 + * + * Special presentation address format for mapped v4 + * addresses. + */ + if (ipv6_addr_v4mapped(addr)) + return snprintf(buf, buflen, "::ffff:%pI4", + &addr->s6_addr32[3]); + + /* + * RFC 4291, Section 2.2.1 + */ + return snprintf(buf, buflen, "%pI6c", addr); +} + +/* Derived from rpc_sockaddr2uaddr */ +static void +ff_layout_encode_netaddr(struct xdr_stream *xdr, struct nfs4_pnfs_ds_addr *da) +{ + struct sockaddr *sap = (struct sockaddr *)&da->da_addr; + char portbuf[RPCBIND_MAXUADDRPLEN]; + char addrbuf[RPCBIND_MAXUADDRLEN]; + char *netid; + unsigned short port; + int len, netid_len; + __be32 *p; + + switch (sap->sa_family) { + case AF_INET: + if (ff_layout_ntop4(sap, addrbuf, sizeof(addrbuf)) == 0) + return; + port = ntohs(((struct sockaddr_in *)sap)->sin_port); + netid = "tcp"; + netid_len = 3; + break; + case AF_INET6: + if (ff_layout_ntop6_noscopeid(sap, addrbuf, sizeof(addrbuf)) == 0) + return; + port = ntohs(((struct sockaddr_in6 *)sap)->sin6_port); + netid = "tcp6"; + netid_len = 4; + break; + default: + /* we only support tcp and tcp6 */ + WARN_ON_ONCE(1); + return; + } + + snprintf(portbuf, sizeof(portbuf), ".%u.%u", port >> 8, port & 0xff); + len = strlcat(addrbuf, portbuf, sizeof(addrbuf)); + + p = xdr_reserve_space(xdr, 4 + netid_len); + xdr_encode_opaque(p, netid, netid_len); + + p = xdr_reserve_space(xdr, 4 + len); + xdr_encode_opaque(p, addrbuf, len); +} + +static void +ff_layout_encode_nfstime(struct xdr_stream *xdr, + ktime_t t) +{ + struct timespec64 ts; + __be32 *p; + + p = xdr_reserve_space(xdr, 12); + ts = ktime_to_timespec64(t); + p = xdr_encode_hyper(p, ts.tv_sec); + *p++ = cpu_to_be32(ts.tv_nsec); +} + +static void +ff_layout_encode_io_latency(struct xdr_stream *xdr, + struct nfs4_ff_io_stat *stat) +{ + __be32 *p; + + p = xdr_reserve_space(xdr, 5 * 8); + p = xdr_encode_hyper(p, stat->ops_requested); + p = xdr_encode_hyper(p, stat->bytes_requested); + p = xdr_encode_hyper(p, stat->ops_completed); + p = xdr_encode_hyper(p, stat->bytes_completed); + p = xdr_encode_hyper(p, stat->bytes_not_delivered); + ff_layout_encode_nfstime(xdr, stat->total_busy_time); + ff_layout_encode_nfstime(xdr, stat->aggregate_completion_time); +} + +static void +ff_layout_encode_layoutstats(struct xdr_stream *xdr, + struct nfs42_layoutstat_args *args, + struct nfs42_layoutstat_devinfo *devinfo) +{ + struct nfs4_ff_layout_mirror *mirror = devinfo->layout_private; + struct nfs4_pnfs_ds_addr *da; + struct nfs4_pnfs_ds *ds = mirror->mirror_ds->ds; + struct nfs_fh *fh = &mirror->fh_versions[0]; + __be32 *p, *start; + + da = list_first_entry(&ds->ds_addrs, struct nfs4_pnfs_ds_addr, da_node); + dprintk("%s: DS %s: encoding address %s\n", + __func__, ds->ds_remotestr, da->da_remotestr); + /* layoutupdate length */ + start = xdr_reserve_space(xdr, 4); + /* netaddr4 */ + ff_layout_encode_netaddr(xdr, da); + /* nfs_fh4 */ + p = xdr_reserve_space(xdr, 4 + fh->size); + xdr_encode_opaque(p, fh->data, fh->size); + /* ff_io_latency4 read */ + spin_lock(&mirror->lock); + ff_layout_encode_io_latency(xdr, &mirror->read_stat.io_stat); + /* ff_io_latency4 write */ + ff_layout_encode_io_latency(xdr, &mirror->write_stat.io_stat); + spin_unlock(&mirror->lock); + /* nfstime4 */ + ff_layout_encode_nfstime(xdr, ktime_sub(ktime_get(), mirror->start_time)); + /* bool */ + p = xdr_reserve_space(xdr, 4); + *p = cpu_to_be32(false); + + *start = cpu_to_be32((xdr->p - start - 1) * 4); +} + +static bool +ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args, + struct pnfs_layout_segment *pls, + int *dev_count, int dev_limit) +{ + struct nfs4_ff_layout_mirror *mirror; + struct nfs4_deviceid_node *dev; + struct nfs42_layoutstat_devinfo *devinfo; + int i; + + for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { + if (*dev_count >= dev_limit) + break; + mirror = FF_LAYOUT_COMP(pls, i); + if (!mirror || !mirror->mirror_ds) + continue; + dev = FF_LAYOUT_DEVID_NODE(pls, i); + devinfo = &args->devinfo[*dev_count]; + memcpy(&devinfo->dev_id, &dev->deviceid, NFS4_DEVICEID4_SIZE); + devinfo->offset = pls->pls_range.offset; + devinfo->length = pls->pls_range.length; + /* well, we don't really know if IO is continuous or not! */ + devinfo->read_count = mirror->read_stat.io_stat.bytes_completed; + devinfo->read_bytes = mirror->read_stat.io_stat.bytes_completed; + devinfo->write_count = mirror->write_stat.io_stat.bytes_completed; + devinfo->write_bytes = mirror->write_stat.io_stat.bytes_completed; + devinfo->layout_type = LAYOUT_FLEX_FILES; + devinfo->layoutstats_encode = ff_layout_encode_layoutstats; + devinfo->layout_private = mirror; + /* lseg refcount put in cleanup_layoutstats */ + pnfs_get_lseg(pls); + + ++(*dev_count); + } + + return *dev_count < dev_limit; +} + +static int +ff_layout_prepare_layoutstats(struct nfs42_layoutstat_args *args) +{ + struct pnfs_layout_segment *pls; + int dev_count = 0; + + spin_lock(&args->inode->i_lock); + list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) { + dev_count += FF_LAYOUT_MIRROR_COUNT(pls); + } + spin_unlock(&args->inode->i_lock); + /* For now, send at most PNFS_LAYOUTSTATS_MAXDEV statistics */ + if (dev_count > PNFS_LAYOUTSTATS_MAXDEV) { + dprintk("%s: truncating devinfo to limit (%d:%d)\n", + __func__, dev_count, PNFS_LAYOUTSTATS_MAXDEV); + dev_count = PNFS_LAYOUTSTATS_MAXDEV; + } + args->devinfo = kmalloc(dev_count * sizeof(*args->devinfo), GFP_KERNEL); + if (!args->devinfo) + return -ENOMEM; + + dev_count = 0; + spin_lock(&args->inode->i_lock); + list_for_each_entry(pls, &NFS_I(args->inode)->layout->plh_segs, pls_list) { + if (!ff_layout_mirror_prepare_stats(args, pls, &dev_count, + PNFS_LAYOUTSTATS_MAXDEV)) { + break; + } + } + spin_unlock(&args->inode->i_lock); + args->num_dev = dev_count; + + return 0; +} + +static void +ff_layout_cleanup_layoutstats(struct nfs42_layoutstat_data *data) +{ + struct nfs4_ff_layout_mirror *mirror; + int i; + + for (i = 0; i < data->args.num_dev; i++) { + mirror = data->args.devinfo[i].layout_private; + data->args.devinfo[i].layout_private = NULL; + pnfs_put_lseg(mirror->lseg); + } +} + static struct pnfs_layoutdriver_type flexfilelayout_type = { .id = LAYOUT_FLEX_FILES, .name = "LAYOUT_FLEX_FILES", @@ -1510,6 +1950,8 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .alloc_deviceid_node = ff_layout_alloc_deviceid_node, .encode_layoutreturn = ff_layout_encode_layoutreturn, .sync = pnfs_nfs_generic_sync, + .prepare_layoutstats = ff_layout_prepare_layoutstats, + .cleanup_layoutstats = ff_layout_cleanup_layoutstats, }; static int __init nfs4flexfilelayout_init(void) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h index 070f20445b2d..f92f9a0a856b 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.h +++ b/fs/nfs/flexfilelayout/flexfilelayout.h @@ -9,12 +9,17 @@ #ifndef FS_NFS_NFS4FLEXFILELAYOUT_H #define FS_NFS_NFS4FLEXFILELAYOUT_H +#define FF_FLAGS_NO_LAYOUTCOMMIT 1 + #include "../pnfs.h" /* XXX: Let's filter out insanely large mirror count for now to avoid oom * due to network error etc. */ #define NFS4_FLEXFILE_LAYOUT_MAX_MIRROR_CNT 4096 +/* LAYOUTSTATS report interval in ms */ +#define FF_LAYOUTSTATS_REPORT_INTERVAL (60000L) + struct nfs4_ff_ds_version { u32 version; u32 minor_version; @@ -41,24 +46,48 @@ struct nfs4_ff_layout_ds_err { struct nfs4_deviceid deviceid; }; +struct nfs4_ff_io_stat { + __u64 ops_requested; + __u64 bytes_requested; + __u64 ops_completed; + __u64 bytes_completed; + __u64 bytes_not_delivered; + ktime_t total_busy_time; + ktime_t aggregate_completion_time; +}; + +struct nfs4_ff_busy_timer { + ktime_t start_time; + atomic_t n_ops; +}; + +struct nfs4_ff_layoutstat { + struct nfs4_ff_io_stat io_stat; + struct nfs4_ff_busy_timer busy_timer; +}; + struct nfs4_ff_layout_mirror { + struct pnfs_layout_segment *lseg; /* back pointer */ u32 ds_count; u32 efficiency; struct nfs4_ff_layout_ds *mirror_ds; u32 fh_versions_cnt; struct nfs_fh *fh_versions; nfs4_stateid stateid; - struct nfs4_string user_name; - struct nfs4_string group_name; u32 uid; u32 gid; struct rpc_cred *cred; spinlock_t lock; + struct nfs4_ff_layoutstat read_stat; + struct nfs4_ff_layoutstat write_stat; + ktime_t start_time; + ktime_t last_report_time; }; struct nfs4_ff_layout_segment { struct pnfs_layout_segment generic_hdr; u64 stripe_unit; + u32 flags; u32 mirror_array_cnt; struct nfs4_ff_layout_mirror **mirror_array; }; diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index 77a2d026aa12..f13e1969eedd 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c @@ -324,7 +324,8 @@ static int ff_layout_update_mirror_cred(struct nfs4_ff_layout_mirror *mirror, __func__, PTR_ERR(cred)); return PTR_ERR(cred); } else { - mirror->cred = cred; + if (cmpxchg(&mirror->cred, NULL, cred)) + put_rpccred(cred); } } return 0; @@ -386,7 +387,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, /* matching smp_wmb() in _nfs4_pnfs_v3/4_ds_connect */ smp_rmb(); if (ds->ds_clp) - goto out; + goto out_update_creds; flavor = nfs4_ff_layout_choose_authflavor(mirror); @@ -430,7 +431,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, } } } - +out_update_creds: if (ff_layout_update_mirror_cred(mirror, ds)) ds = NULL; out: diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index f734562c6d24..b77b328a06d7 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -678,6 +678,8 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) if (!err) { generic_fillattr(inode, stat); stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode)); + if (S_ISDIR(inode->i_mode)) + stat->blksize = NFS_SERVER(inode)->dtsize; } out: trace_nfs_getattr_exit(inode, err); @@ -2008,17 +2010,15 @@ static int __init init_nfs_fs(void) if (err) goto out1; -#ifdef CONFIG_PROC_FS rpc_proc_register(&init_net, &nfs_rpcstat); -#endif - if ((err = register_nfs_fs()) != 0) + + err = register_nfs_fs(); + if (err) goto out0; return 0; out0: -#ifdef CONFIG_PROC_FS rpc_proc_unregister(&init_net, "nfs"); -#endif nfs_destroy_directcache(); out1: nfs_destroy_writepagecache(); @@ -2049,9 +2049,7 @@ static void __exit exit_nfs_fs(void) nfs_destroy_nfspagecache(); nfs_fscache_unregister(); unregister_pernet_subsys(&nfs_net_ops); -#ifdef CONFIG_PROC_FS rpc_proc_unregister(&init_net, "nfs"); -#endif unregister_nfs_fs(); nfs_fs_proc_exit(); nfsiod_stop(); diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c index 53852a4bd88b..9b04c2e6fffc 100644 --- a/fs/nfs/nfs3xdr.c +++ b/fs/nfs/nfs3xdr.c @@ -1342,7 +1342,7 @@ static void nfs3_xdr_enc_setacl3args(struct rpc_rqst *req, if (args->npages != 0) xdr_write_pages(xdr, args->pages, 0, args->len); else - xdr_reserve_space(xdr, NFS_ACL_INLINE_BUFSIZE); + xdr_reserve_space(xdr, args->len); error = nfsacl_encode(xdr->buf, base, args->inode, (args->mask & NFS_ACL) ? diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h index 7afb8947dfdf..ff66ae700b89 100644 --- a/fs/nfs/nfs42.h +++ b/fs/nfs/nfs42.h @@ -5,11 +5,18 @@ #ifndef __LINUX_FS_NFS_NFS4_2_H #define __LINUX_FS_NFS_NFS4_2_H +/* + * FIXME: four LAYOUTSTATS calls per compound at most! Do we need to support + * more? Need to consider not to pre-alloc too much for a compound. + */ +#define PNFS_LAYOUTSTATS_MAXDEV (4) + /* nfs4.2proc.c */ int nfs42_proc_allocate(struct file *, loff_t, loff_t); int nfs42_proc_deallocate(struct file *, loff_t, loff_t); loff_t nfs42_proc_llseek(struct file *, loff_t, int); - +int nfs42_proc_layoutstats_generic(struct nfs_server *, + struct nfs42_layoutstat_data *); /* nfs4.2xdr.h */ extern struct rpc_procinfo nfs4_2_procedures[]; diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 3a9e75235f30..f486b80f927a 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -10,6 +10,11 @@ #include <linux/nfs_fs.h> #include "nfs4_fs.h" #include "nfs42.h" +#include "iostat.h" +#include "pnfs.h" +#include "internal.h" + +#define NFSDBG_FACILITY NFSDBG_PNFS static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file, fmode_t fmode) @@ -165,3 +170,85 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); } + +static void +nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) +{ + struct nfs42_layoutstat_data *data = calldata; + struct nfs_server *server = NFS_SERVER(data->args.inode); + + nfs41_setup_sequence(nfs4_get_session(server), &data->args.seq_args, + &data->res.seq_res, task); +} + +static void +nfs42_layoutstat_done(struct rpc_task *task, void *calldata) +{ + struct nfs42_layoutstat_data *data = calldata; + + if (!nfs4_sequence_done(task, &data->res.seq_res)) + return; + + switch (task->tk_status) { + case 0: + break; + case -ENOTSUPP: + case -EOPNOTSUPP: + NFS_SERVER(data->inode)->caps &= ~NFS_CAP_LAYOUTSTATS; + default: + dprintk("%s server returns %d\n", __func__, task->tk_status); + } +} + +static void +nfs42_layoutstat_release(void *calldata) +{ + struct nfs42_layoutstat_data *data = calldata; + struct nfs_server *nfss = NFS_SERVER(data->args.inode); + + if (nfss->pnfs_curr_ld->cleanup_layoutstats) + nfss->pnfs_curr_ld->cleanup_layoutstats(data); + + pnfs_put_layout_hdr(NFS_I(data->args.inode)->layout); + smp_mb__before_atomic(); + clear_bit(NFS_INO_LAYOUTSTATS, &NFS_I(data->args.inode)->flags); + smp_mb__after_atomic(); + nfs_iput_and_deactive(data->inode); + kfree(data->args.devinfo); + kfree(data); +} + +static const struct rpc_call_ops nfs42_layoutstat_ops = { + .rpc_call_prepare = nfs42_layoutstat_prepare, + .rpc_call_done = nfs42_layoutstat_done, + .rpc_release = nfs42_layoutstat_release, +}; + +int nfs42_proc_layoutstats_generic(struct nfs_server *server, + struct nfs42_layoutstat_data *data) +{ + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTSTATS], + .rpc_argp = &data->args, + .rpc_resp = &data->res, + }; + struct rpc_task_setup task_setup = { + .rpc_client = server->client, + .rpc_message = &msg, + .callback_ops = &nfs42_layoutstat_ops, + .callback_data = data, + .flags = RPC_TASK_ASYNC, + }; + struct rpc_task *task; + + data->inode = nfs_igrab_and_active(data->args.inode); + if (!data->inode) { + nfs42_layoutstat_release(data); + return -EAGAIN; + } + nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); + task = rpc_run_task(&task_setup); + if (IS_ERR(task)) + return PTR_ERR(task); + return 0; +} diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index 1a25b27248f2..a6bd27da6286 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -4,6 +4,8 @@ #ifndef __LINUX_FS_NFS_NFS4_2XDR_H #define __LINUX_FS_NFS_NFS4_2XDR_H +#include "nfs42.h" + #define encode_fallocate_maxsz (encode_stateid_maxsz + \ 2 /* offset */ + \ 2 /* length */) @@ -22,6 +24,16 @@ 1 /* whence */ + \ 2 /* offset */ + \ 2 /* length */) +#define encode_io_info_maxsz 4 +#define encode_layoutstats_maxsz (op_decode_hdr_maxsz + \ + 2 /* offset */ + \ + 2 /* length */ + \ + encode_stateid_maxsz + \ + encode_io_info_maxsz + \ + encode_io_info_maxsz + \ + 1 /* opaque devaddr4 length */ + \ + XDR_QUADLEN(PNFS_LAYOUTSTATS_MAXSIZE)) +#define decode_layoutstats_maxsz (op_decode_hdr_maxsz) #define NFS4_enc_allocate_sz (compound_encode_hdr_maxsz + \ encode_putfh_maxsz + \ @@ -45,6 +57,14 @@ #define NFS4_dec_seek_sz (compound_decode_hdr_maxsz + \ decode_putfh_maxsz + \ decode_seek_maxsz) +#define NFS4_enc_layoutstats_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + PNFS_LAYOUTSTATS_MAXDEV * encode_layoutstats_maxsz) +#define NFS4_dec_layoutstats_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + PNFS_LAYOUTSTATS_MAXDEV * decode_layoutstats_maxsz) static void encode_fallocate(struct xdr_stream *xdr, @@ -81,6 +101,33 @@ static void encode_seek(struct xdr_stream *xdr, encode_uint32(xdr, args->sa_what); } +static void encode_layoutstats(struct xdr_stream *xdr, + struct nfs42_layoutstat_args *args, + struct nfs42_layoutstat_devinfo *devinfo, + struct compound_hdr *hdr) +{ + __be32 *p; + + encode_op_hdr(xdr, OP_LAYOUTSTATS, decode_layoutstats_maxsz, hdr); + p = reserve_space(xdr, 8 + 8); + p = xdr_encode_hyper(p, devinfo->offset); + p = xdr_encode_hyper(p, devinfo->length); + encode_nfs4_stateid(xdr, &args->stateid); + p = reserve_space(xdr, 4*8 + NFS4_DEVICEID4_SIZE + 4); + p = xdr_encode_hyper(p, devinfo->read_count); + p = xdr_encode_hyper(p, devinfo->read_bytes); + p = xdr_encode_hyper(p, devinfo->write_count); + p = xdr_encode_hyper(p, devinfo->write_bytes); + p = xdr_encode_opaque_fixed(p, devinfo->dev_id.data, + NFS4_DEVICEID4_SIZE); + /* Encode layoutupdate4 */ + *p++ = cpu_to_be32(devinfo->layout_type); + if (devinfo->layoutstats_encode != NULL) + devinfo->layoutstats_encode(xdr, args, devinfo); + else + encode_uint32(xdr, 0); +} + /* * Encode ALLOCATE request */ @@ -137,6 +184,28 @@ static void nfs4_xdr_enc_seek(struct rpc_rqst *req, encode_nops(&hdr); } +/* + * Encode LAYOUTSTATS request + */ +static void nfs4_xdr_enc_layoutstats(struct rpc_rqst *req, + struct xdr_stream *xdr, + struct nfs42_layoutstat_args *args) +{ + int i; + + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + WARN_ON(args->num_dev > PNFS_LAYOUTSTATS_MAXDEV); + for (i = 0; i < args->num_dev; i++) + encode_layoutstats(xdr, args, &args->devinfo[i], &hdr); + encode_nops(&hdr); +} + static int decode_allocate(struct xdr_stream *xdr, struct nfs42_falloc_res *res) { return decode_op_hdr(xdr, OP_ALLOCATE); @@ -169,6 +238,12 @@ out_overflow: return -EIO; } +static int decode_layoutstats(struct xdr_stream *xdr, + struct nfs42_layoutstat_res *res) +{ + return decode_op_hdr(xdr, OP_LAYOUTSTATS); +} + /* * Decode ALLOCATE request */ @@ -246,4 +321,35 @@ static int nfs4_xdr_dec_seek(struct rpc_rqst *rqstp, out: return status; } + +/* + * Decode LAYOUTSTATS request + */ +static int nfs4_xdr_dec_layoutstats(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, + struct nfs42_layoutstat_res *res) +{ + struct compound_hdr hdr; + int status, i; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, rqstp); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + WARN_ON(res->num_dev > PNFS_LAYOUTSTATS_MAXDEV); + for (i = 0; i < res->num_dev; i++) { + status = decode_layoutstats(xdr, res); + if (status) + goto out; + } +out: + res->rpc_status = status; + return status; +} + #endif /* __LINUX_FS_NFS_NFS4_2XDR_H */ diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index fdef424b0cd3..ea3bee919a76 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -233,6 +233,7 @@ extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception extern int nfs4_call_sync(struct rpc_clnt *, struct nfs_server *, struct rpc_message *, struct nfs4_sequence_args *, struct nfs4_sequence_res *, int); +extern void nfs4_init_sequence(struct nfs4_sequence_args *, struct nfs4_sequence_res *, int); extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *, bool); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index e42be52a8c18..3aa6a9ba5113 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -676,7 +676,6 @@ found: break; } - /* No matching nfs_client found. */ spin_unlock(&nn->nfs_client_lock); dprintk("NFS: <-- %s status = %d\n", __func__, status); nfs_put_client(prev); diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index f58c17b3b480..dcd39d4e2efe 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -41,6 +41,10 @@ nfs4_file_open(struct inode *inode, struct file *filp) dprintk("NFS: open file(%pd2)\n", dentry); + err = nfs_check_flags(openflags); + if (err) + return err; + if ((openflags & O_ACCMODE) == 3) openflags--; diff --git a/fs/nfs/nfs4getroot.c b/fs/nfs/nfs4getroot.c index c0b3a16b4a00..039b3eb6d834 100644 --- a/fs/nfs/nfs4getroot.c +++ b/fs/nfs/nfs4getroot.c @@ -35,13 +35,6 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_p goto out; } - if (fsinfo.fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) { - printk(KERN_ERR "nfs4_get_rootfh:" - " getroot obtained referral\n"); - ret = -EREMOTE; - goto out; - } - memcpy(&server->fsid, &fsinfo.fattr->fsid, sizeof(server->fsid)); out: nfs_free_fattr(fsinfo.fattr); diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 2e1737c40a29..535dfc69c628 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -494,12 +494,7 @@ nfs_idmap_delete(struct nfs_client *clp) int nfs_idmap_init(void) { - int ret; - ret = nfs_idmap_init_keyring(); - if (ret != 0) - goto out; -out: - return ret; + return nfs_idmap_init_keyring(); } void nfs_idmap_quit(void) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 55e1e3af23a3..6f228b5af819 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -356,6 +356,9 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_ case 0: return 0; case -NFS4ERR_OPENMODE: + case -NFS4ERR_DELEG_REVOKED: + case -NFS4ERR_ADMIN_REVOKED: + case -NFS4ERR_BAD_STATEID: if (inode && nfs4_have_delegation(inode, FMODE_READ)) { nfs4_inode_return_delegation(inode); exception->retry = 1; @@ -367,15 +370,6 @@ int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_ if (ret < 0) break; goto wait_on_recovery; - case -NFS4ERR_DELEG_REVOKED: - case -NFS4ERR_ADMIN_REVOKED: - case -NFS4ERR_BAD_STATEID: - if (state == NULL) - break; - ret = nfs4_schedule_stateid_recovery(server, state); - if (ret < 0) - break; - goto wait_on_recovery; case -NFS4ERR_EXPIRED: if (state != NULL) { ret = nfs4_schedule_stateid_recovery(server, state); @@ -482,8 +476,8 @@ struct nfs4_call_sync_data { struct nfs4_sequence_res *seq_res; }; -static void nfs4_init_sequence(struct nfs4_sequence_args *args, - struct nfs4_sequence_res *res, int cache_reply) +void nfs4_init_sequence(struct nfs4_sequence_args *args, + struct nfs4_sequence_res *res, int cache_reply) { args->sa_slot = NULL; args->sa_cache_this = cache_reply; @@ -1553,6 +1547,13 @@ static int nfs4_open_recover_helper(struct nfs4_opendata *opendata, fmode_t fmod struct nfs4_state *newstate; int ret; + if ((opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR || + opendata->o_arg.claim == NFS4_OPEN_CLAIM_DELEG_CUR_FH) && + (opendata->o_arg.u.delegation_type & fmode) != fmode) + /* This mode can't have been delegated, so we must have + * a valid open_stateid to cover it - not need to reclaim. + */ + return 0; opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; opendata->o_arg.share_access = nfs4_map_atomic_open_share( @@ -1684,6 +1685,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct "%d.\n", __func__, err); case 0: case -ENOENT: + case -EAGAIN: case -ESTALE: break; case -NFS4ERR_BADSESSION: @@ -3355,6 +3357,8 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, goto out; case -NFS4ERR_MOVED: err = nfs4_get_referral(client, dir, name, fattr, fhandle); + if (err == -NFS4ERR_MOVED) + err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); goto out; case -NFS4ERR_WRONGSEC: err = -EPERM; @@ -4955,49 +4959,128 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp, memcpy(bootverf->data, verf, sizeof(bootverf->data)); } -static unsigned int -nfs4_init_nonuniform_client_string(struct nfs_client *clp, - char *buf, size_t len) +static int +nfs4_init_nonuniform_client_string(struct nfs_client *clp) { - unsigned int result; + int result; + size_t len; + char *str; + bool retried = false; if (clp->cl_owner_id != NULL) - return strlcpy(buf, clp->cl_owner_id, len); + return 0; +retry: + rcu_read_lock(); + len = 10 + strlen(clp->cl_ipaddr) + 1 + + strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) + + 1 + + strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) + + 1; + rcu_read_unlock(); + + if (len > NFS4_OPAQUE_LIMIT + 1) + return -EINVAL; + + /* + * Since this string is allocated at mount time, and held until the + * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying + * about a memory-reclaim deadlock. + */ + str = kmalloc(len, GFP_KERNEL); + if (!str) + return -ENOMEM; rcu_read_lock(); - result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", - clp->cl_ipaddr, - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_ADDR), - rpc_peeraddr2str(clp->cl_rpcclient, - RPC_DISPLAY_PROTO)); + result = scnprintf(str, len, "Linux NFSv4.0 %s/%s %s", + clp->cl_ipaddr, + rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR), + rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); rcu_read_unlock(); - clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); - return result; + + /* Did something change? */ + if (result >= len) { + kfree(str); + if (retried) + return -EINVAL; + retried = true; + goto retry; + } + clp->cl_owner_id = str; + return 0; } -static unsigned int -nfs4_init_uniform_client_string(struct nfs_client *clp, - char *buf, size_t len) +static int +nfs4_init_uniquifier_client_string(struct nfs_client *clp) +{ + int result; + size_t len; + char *str; + + len = 10 + 10 + 1 + 10 + 1 + + strlen(nfs4_client_id_uniquifier) + 1 + + strlen(clp->cl_rpcclient->cl_nodename) + 1; + + if (len > NFS4_OPAQUE_LIMIT + 1) + return -EINVAL; + + /* + * Since this string is allocated at mount time, and held until the + * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying + * about a memory-reclaim deadlock. + */ + str = kmalloc(len, GFP_KERNEL); + if (!str) + return -ENOMEM; + + result = scnprintf(str, len, "Linux NFSv%u.%u %s/%s", + clp->rpc_ops->version, clp->cl_minorversion, + nfs4_client_id_uniquifier, + clp->cl_rpcclient->cl_nodename); + if (result >= len) { + kfree(str); + return -EINVAL; + } + clp->cl_owner_id = str; + return 0; +} + +static int +nfs4_init_uniform_client_string(struct nfs_client *clp) { - const char *nodename = clp->cl_rpcclient->cl_nodename; - unsigned int result; + int result; + size_t len; + char *str; if (clp->cl_owner_id != NULL) - return strlcpy(buf, clp->cl_owner_id, len); + return 0; if (nfs4_client_id_uniquifier[0] != '\0') - result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", - clp->rpc_ops->version, - clp->cl_minorversion, - nfs4_client_id_uniquifier, - nodename); - else - result = scnprintf(buf, len, "Linux NFSv%u.%u %s", - clp->rpc_ops->version, clp->cl_minorversion, - nodename); - clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); - return result; + return nfs4_init_uniquifier_client_string(clp); + + len = 10 + 10 + 1 + 10 + 1 + + strlen(clp->cl_rpcclient->cl_nodename) + 1; + + if (len > NFS4_OPAQUE_LIMIT + 1) + return -EINVAL; + + /* + * Since this string is allocated at mount time, and held until the + * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying + * about a memory-reclaim deadlock. + */ + str = kmalloc(len, GFP_KERNEL); + if (!str) + return -ENOMEM; + + result = scnprintf(str, len, "Linux NFSv%u.%u %s", + clp->rpc_ops->version, clp->cl_minorversion, + clp->cl_rpcclient->cl_nodename); + if (result >= len) { + kfree(str); + return -EINVAL; + } + clp->cl_owner_id = str; + return 0; } /* @@ -5044,7 +5127,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, struct nfs4_setclientid setclientid = { .sc_verifier = &sc_verifier, .sc_prog = program, - .sc_cb_ident = clp->cl_cb_ident, + .sc_clnt = clp, }; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID], @@ -5064,16 +5147,15 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, /* nfs_client_id4 */ nfs4_init_boot_verifier(clp, &sc_verifier); + if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags)) - setclientid.sc_name_len = - nfs4_init_uniform_client_string(clp, - setclientid.sc_name, - sizeof(setclientid.sc_name)); + status = nfs4_init_uniform_client_string(clp); else - setclientid.sc_name_len = - nfs4_init_nonuniform_client_string(clp, - setclientid.sc_name, - sizeof(setclientid.sc_name)); + status = nfs4_init_nonuniform_client_string(clp); + + if (status) + goto out; + /* cb_client4 */ setclientid.sc_netid_len = nfs4_init_callback_netid(clp, @@ -5083,9 +5165,9 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, sizeof(setclientid.sc_uaddr), "%s.%u.%u", clp->cl_ipaddr, port >> 8, port & 255); - dprintk("NFS call setclientid auth=%s, '%.*s'\n", + dprintk("NFS call setclientid auth=%s, '%s'\n", clp->cl_rpcclient->cl_auth->au_ops->au_name, - setclientid.sc_name_len, setclientid.sc_name); + clp->cl_owner_id); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) { status = PTR_ERR(task); @@ -5402,6 +5484,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl, atomic_inc(&lsp->ls_count); /* Ensure we don't close file until we're done freeing locks! */ p->ctx = get_nfs_open_context(ctx); + get_file(fl->fl_file); memcpy(&p->fl, fl, sizeof(p->fl)); p->server = NFS_SERVER(inode); return p; @@ -5413,6 +5496,7 @@ static void nfs4_locku_release_calldata(void *data) nfs_free_seqid(calldata->arg.seqid); nfs4_put_lock_state(calldata->lsp); put_nfs_open_context(calldata->ctx); + fput(calldata->fl.fl_file); kfree(calldata); } @@ -6846,11 +6930,14 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, }; nfs4_init_boot_verifier(clp, &verifier); - args.id_len = nfs4_init_uniform_client_string(clp, args.id, - sizeof(args.id)); - dprintk("NFS call exchange_id auth=%s, '%.*s'\n", + + status = nfs4_init_uniform_client_string(clp); + if (status) + goto out; + + dprintk("NFS call exchange_id auth=%s, '%s'\n", clp->cl_rpcclient->cl_auth->au_ops->au_name, - args.id_len, args.id); + clp->cl_owner_id); res.server_owner = kzalloc(sizeof(struct nfs41_server_owner), GFP_NOFS); @@ -6885,7 +6972,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, /* unsupported! */ WARN_ON_ONCE(1); status = -EINVAL; - goto out_server_scope; + goto out_impl_id; } status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); @@ -6913,6 +7000,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, /* use the most recent implementation id */ kfree(clp->cl_implid); clp->cl_implid = res.impl_id; + res.impl_id = NULL; if (clp->cl_serverscope != NULL && !nfs41_same_server_scope(clp->cl_serverscope, @@ -6926,15 +7014,16 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, if (clp->cl_serverscope == NULL) { clp->cl_serverscope = res.server_scope; - goto out; + res.server_scope = NULL; } - } else - kfree(res.impl_id); + } -out_server_owner: - kfree(res.server_owner); +out_impl_id: + kfree(res.impl_id); out_server_scope: kfree(res.server_scope); +out_server_owner: + kfree(res.server_owner); out: if (clp->cl_implid != NULL) dprintk("NFS reply exchange_id: Server Implementation ID: " @@ -8061,9 +8150,8 @@ nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync) struct rpc_task *task; int status = 0; - dprintk("NFS: %4d initiating layoutcommit call. sync %d " - "lbw: %llu inode %lu\n", - data->task.tk_pid, sync, + dprintk("NFS: initiating layoutcommit call. sync %d " + "lbw: %llu inode %lu\n", sync, data->args.lastbytewritten, data->args.inode->i_ino); @@ -8557,7 +8645,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { | NFS_CAP_ATOMIC_OPEN_V1 | NFS_CAP_ALLOCATE | NFS_CAP_DEALLOCATE - | NFS_CAP_SEEK, + | NFS_CAP_SEEK + | NFS_CAP_LAYOUTSTATS, .init_client = nfs41_init_client, .shutdown_client = nfs41_shutdown_client, .match_stateid = nfs41_match_stateid, diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 2782cfca2265..605840dc89cf 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -309,7 +309,6 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) if (test_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state)) goto do_confirm; - nfs4_begin_drain_session(clp); status = nfs4_proc_exchange_id(clp, cred); if (status != 0) goto out; @@ -1482,6 +1481,8 @@ restart: spin_unlock(&state->state_lock); } nfs4_put_open_state(state); + clear_bit(NFS4CLNT_RECLAIM_NOGRACE, + &state->flags); spin_lock(&sp->so_lock); goto restart; } @@ -1830,6 +1831,7 @@ static int nfs4_establish_lease(struct nfs_client *clp) clp->cl_mvops->reboot_recovery_ops; int status; + nfs4_begin_drain_session(clp); cred = nfs4_get_clid_cred(clp); if (cred == NULL) return -ENOENT; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 0aea97841d30..558cd65dbdb7 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -139,7 +139,8 @@ static int nfs4_stat_to_errno(int); #define encode_setclientid_maxsz \ (op_encode_hdr_maxsz + \ XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \ - XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \ + /* client name */ \ + 1 + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \ 1 /* sc_prog */ + \ 1 + XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \ 1 + XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \ @@ -288,7 +289,8 @@ static int nfs4_stat_to_errno(int); #define encode_exchange_id_maxsz (op_encode_hdr_maxsz + \ encode_verifier_maxsz + \ 1 /* co_ownerid.len */ + \ - XDR_QUADLEN(NFS4_EXCHANGE_ID_LEN) + \ + /* eia_clientowner */ \ + 1 + XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + \ 1 /* flags */ + \ 1 /* spa_how */ + \ /* max is SP4_MACH_CRED (for now) */ + \ @@ -1667,13 +1669,14 @@ static void encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclie encode_op_hdr(xdr, OP_SETCLIENTID, decode_setclientid_maxsz, hdr); encode_nfs4_verifier(xdr, setclientid->sc_verifier); - encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name); + encode_string(xdr, strlen(setclientid->sc_clnt->cl_owner_id), + setclientid->sc_clnt->cl_owner_id); p = reserve_space(xdr, 4); *p = cpu_to_be32(setclientid->sc_prog); encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid); encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr); p = reserve_space(xdr, 4); - *p = cpu_to_be32(setclientid->sc_cb_ident); + *p = cpu_to_be32(setclientid->sc_clnt->cl_cb_ident); } static void encode_setclientid_confirm(struct xdr_stream *xdr, const struct nfs4_setclientid_res *arg, struct compound_hdr *hdr) @@ -1747,7 +1750,8 @@ static void encode_exchange_id(struct xdr_stream *xdr, encode_op_hdr(xdr, OP_EXCHANGE_ID, decode_exchange_id_maxsz, hdr); encode_nfs4_verifier(xdr, args->verifier); - encode_string(xdr, args->id_len, args->id); + encode_string(xdr, strlen(args->client->cl_owner_id), + args->client->cl_owner_id); encode_uint32(xdr, args->flags); encode_uint32(xdr, args->state_protect.how); @@ -7427,6 +7431,7 @@ struct rpc_procinfo nfs4_procedures[] = { PROC(SEEK, enc_seek, dec_seek), PROC(ALLOCATE, enc_allocate, dec_allocate), PROC(DEALLOCATE, enc_deallocate, dec_deallocate), + PROC(LAYOUTSTATS, enc_layoutstats, dec_layoutstats), #endif /* CONFIG_NFS_V4_2 */ }; diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 282b39369510..1da68d3b1eda 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -636,9 +636,8 @@ int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr, hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how); - dprintk("NFS: %5u initiated pgio call " + dprintk("NFS: initiated pgio call " "(req %s/%llu, %u bytes @ offset %llu)\n", - hdr->task.tk_pid, hdr->inode->i_sb->s_id, (unsigned long long)NFS_FILEID(hdr->inode), hdr->args.count, @@ -690,8 +689,6 @@ static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, static void nfs_pgio_release(void *calldata) { struct nfs_pgio_header *hdr = calldata; - if (hdr->rw_ops->rw_release) - hdr->rw_ops->rw_release(hdr); nfs_pgio_data_destroy(hdr); hdr->completion_ops->completion(hdr); } @@ -711,7 +708,9 @@ static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror, * nfs_pageio_init - initialise a page io descriptor * @desc: pointer to descriptor * @inode: pointer to inode - * @doio: pointer to io function + * @pg_ops: pointer to pageio operations + * @compl_ops: pointer to pageio completion operations + * @rw_ops: pointer to nfs read/write operations * @bsize: io block size * @io_flags: extra parameters for the io function */ @@ -1186,6 +1185,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an * nfs_pageio_descriptor * @desc: pointer to io descriptor + * @mirror_idx: pointer to mirror index */ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, u32 mirror_idx) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 230606243be6..0ba9a02c9566 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -35,6 +35,7 @@ #include "iostat.h" #include "nfs4trace.h" #include "delegation.h" +#include "nfs42.h" #define NFSDBG_FACILITY NFSDBG_PNFS #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ) @@ -1821,6 +1822,7 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr) /* Resend all requests through the MDS */ nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true, hdr->completion_ops); + set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags); return nfs_pageio_resend(&pgio, hdr); } EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds); @@ -1865,6 +1867,7 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, mirror->pg_recoalesce = 1; } nfs_pgio_data_destroy(hdr); + hdr->release(hdr); } static enum pnfs_try_status @@ -1979,6 +1982,7 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, mirror->pg_recoalesce = 1; } nfs_pgio_data_destroy(hdr); + hdr->release(hdr); } /* @@ -2247,3 +2251,63 @@ struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) } return thp; } + +#if IS_ENABLED(CONFIG_NFS_V4_2) +int +pnfs_report_layoutstat(struct inode *inode) +{ + struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; + struct nfs_server *server = NFS_SERVER(inode); + struct nfs_inode *nfsi = NFS_I(inode); + struct nfs42_layoutstat_data *data; + struct pnfs_layout_hdr *hdr; + int status = 0; + + if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats) + goto out; + + if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS)) + goto out; + + if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags)) + goto out; + + spin_lock(&inode->i_lock); + if (!NFS_I(inode)->layout) { + spin_unlock(&inode->i_lock); + goto out; + } + hdr = NFS_I(inode)->layout; + pnfs_get_layout_hdr(hdr); + spin_unlock(&inode->i_lock); + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto out_put; + } + + data->args.fh = NFS_FH(inode); + data->args.inode = inode; + nfs4_stateid_copy(&data->args.stateid, &hdr->plh_stateid); + status = ld->prepare_layoutstats(&data->args); + if (status) + goto out_free; + + status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data); + +out: + dprintk("%s returns %d\n", __func__, status); + return status; + +out_free: + kfree(data); +out_put: + pnfs_put_layout_hdr(hdr); + smp_mb__before_atomic(); + clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags); + smp_mb__after_atomic(); + goto out; +} +EXPORT_SYMBOL_GPL(pnfs_report_layoutstat); +#endif diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 1e6308f82fc3..3e6ab7bfbabd 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -178,6 +178,8 @@ struct pnfs_layoutdriver_type { void (*encode_layoutcommit) (struct pnfs_layout_hdr *lo, struct xdr_stream *xdr, const struct nfs4_layoutcommit_args *args); + int (*prepare_layoutstats) (struct nfs42_layoutstat_args *args); + void (*cleanup_layoutstats) (struct nfs42_layoutstat_data *data); }; struct pnfs_layout_hdr { @@ -290,7 +292,6 @@ int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *); struct nfs4_threshold *pnfs_mdsthreshold_alloc(void); void pnfs_error_mark_layout_for_return(struct inode *inode, struct pnfs_layout_segment *lseg); - /* nfs4_deviceid_flags */ enum { NFS_DEVICEID_INVALID = 0, /* set when MDS clientid recalled */ @@ -689,4 +690,14 @@ static inline void nfs4_pnfs_v3_ds_connect_unload(void) #endif /* CONFIG_NFS_V4_1 */ +#if IS_ENABLED(CONFIG_NFS_V4_2) +int pnfs_report_layoutstat(struct inode *inode); +#else +static inline int +pnfs_report_layoutstat(struct inode *inode) +{ + return 0; +} +#endif + #endif /* FS_NFS_PNFS_H */ diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e6c262555e08..65869ca9c851 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1290,6 +1290,7 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr, static void nfs_redirty_request(struct nfs_page *req) { nfs_mark_request_dirty(req); + set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); nfs_unlock_request(req); nfs_end_page_writeback(req); nfs_release_request(req); @@ -1348,11 +1349,6 @@ void nfs_commit_prepare(struct rpc_task *task, void *calldata) NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); } -static void nfs_writeback_release_common(struct nfs_pgio_header *hdr) -{ - /* do nothing! */ -} - /* * Special version of should_remove_suid() that ignores capabilities. */ @@ -1556,7 +1552,7 @@ int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, /* Set up the initial task struct. */ nfs_ops->commit_setup(data, &msg); - dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); + dprintk("NFS: initiated commit call\n"); nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client, NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg); @@ -2013,7 +2009,6 @@ static const struct nfs_rw_ops nfs_rw_write_ops = { .rw_mode = FMODE_WRITE, .rw_alloc_header = nfs_writehdr_alloc, .rw_free_header = nfs_writehdr_free, - .rw_release = nfs_writeback_release_common, .rw_done = nfs_writeback_done, .rw_result = nfs_writeback_result, .rw_initiate = nfs_initiate_write, diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 450648697433..5b1e2a497e51 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -26,7 +26,7 @@ #include <linux/fs.h> /* struct inode */ #include <linux/fsnotify_backend.h> #include <linux/idr.h> -#include <linux/init.h> /* module_init */ +#include <linux/init.h> /* fs_initcall */ #include <linux/inotify.h> #include <linux/kernel.h> /* roundup() */ #include <linux/namei.h> /* LOOKUP_FOLLOW */ @@ -812,4 +812,4 @@ static int __init inotify_user_setup(void) return 0; } -module_init(inotify_user_setup); +fs_initcall(inotify_user_setup); diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 907870e81a72..70e9af551600 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -23,6 +23,7 @@ struct ovl_cache_entry { u64 ino; struct list_head l_node; struct rb_node node; + struct ovl_cache_entry *next_maybe_whiteout; bool is_whiteout; char name[]; }; @@ -39,7 +40,7 @@ struct ovl_readdir_data { struct rb_root root; struct list_head *list; struct list_head middle; - struct dentry *dir; + struct ovl_cache_entry *first_maybe_whiteout; int count; int err; }; @@ -79,7 +80,7 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, return NULL; } -static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir, +static struct ovl_cache_entry *ovl_cache_entry_new(struct ovl_readdir_data *rdd, const char *name, int len, u64 ino, unsigned int d_type) { @@ -98,29 +99,8 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir, p->is_whiteout = false; if (d_type == DT_CHR) { - struct dentry *dentry; - const struct cred *old_cred; - struct cred *override_cred; - - override_cred = prepare_creds(); - if (!override_cred) { - kfree(p); - return NULL; - } - - /* - * CAP_DAC_OVERRIDE for lookup - */ - cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); - old_cred = override_creds(override_cred); - - dentry = lookup_one_len(name, dir, len); - if (!IS_ERR(dentry)) { - p->is_whiteout = ovl_is_whiteout(dentry); - dput(dentry); - } - revert_creds(old_cred); - put_cred(override_cred); + p->next_maybe_whiteout = rdd->first_maybe_whiteout; + rdd->first_maybe_whiteout = p; } return p; } @@ -148,7 +128,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, return 0; } - p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type); + p = ovl_cache_entry_new(rdd, name, len, ino, d_type); if (p == NULL) return -ENOMEM; @@ -169,7 +149,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd, if (p) { list_move_tail(&p->l_node, &rdd->middle); } else { - p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type); + p = ovl_cache_entry_new(rdd, name, namelen, ino, d_type); if (p == NULL) rdd->err = -ENOMEM; else @@ -219,6 +199,43 @@ static int ovl_fill_merge(struct dir_context *ctx, const char *name, return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type); } +static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd) +{ + int err; + struct ovl_cache_entry *p; + struct dentry *dentry; + const struct cred *old_cred; + struct cred *override_cred; + + override_cred = prepare_creds(); + if (!override_cred) + return -ENOMEM; + + /* + * CAP_DAC_OVERRIDE for lookup + */ + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + old_cred = override_creds(override_cred); + + err = mutex_lock_killable(&dir->d_inode->i_mutex); + if (!err) { + while (rdd->first_maybe_whiteout) { + p = rdd->first_maybe_whiteout; + rdd->first_maybe_whiteout = p->next_maybe_whiteout; + dentry = lookup_one_len(p->name, dir, p->len); + if (!IS_ERR(dentry)) { + p->is_whiteout = ovl_is_whiteout(dentry); + dput(dentry); + } + } + mutex_unlock(&dir->d_inode->i_mutex); + } + revert_creds(old_cred); + put_cred(override_cred); + + return err; +} + static inline int ovl_dir_read(struct path *realpath, struct ovl_readdir_data *rdd) { @@ -229,7 +246,7 @@ static inline int ovl_dir_read(struct path *realpath, if (IS_ERR(realfile)) return PTR_ERR(realfile); - rdd->dir = realpath->dentry; + rdd->first_maybe_whiteout = NULL; rdd->ctx.pos = 0; do { rdd->count = 0; @@ -238,6 +255,10 @@ static inline int ovl_dir_read(struct path *realpath, if (err >= 0) err = rdd->err; } while (!err && rdd->count); + + if (!err && rdd->first_maybe_whiteout) + err = ovl_check_whiteouts(realpath->dentry, rdd); + fput(realfile); return err; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index bf8537c7f455..8a08c582bc22 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -273,10 +273,57 @@ static void ovl_dentry_release(struct dentry *dentry) } } +static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct ovl_entry *oe = dentry->d_fsdata; + unsigned int i; + int ret = 1; + + for (i = 0; i < oe->numlower; i++) { + struct dentry *d = oe->lowerstack[i].dentry; + + if (d->d_flags & DCACHE_OP_REVALIDATE) { + ret = d->d_op->d_revalidate(d, flags); + if (ret < 0) + return ret; + if (!ret) { + if (!(flags & LOOKUP_RCU)) + d_invalidate(d); + return -ESTALE; + } + } + } + return 1; +} + +static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct ovl_entry *oe = dentry->d_fsdata; + unsigned int i; + int ret = 1; + + for (i = 0; i < oe->numlower; i++) { + struct dentry *d = oe->lowerstack[i].dentry; + + if (d->d_flags & DCACHE_OP_WEAK_REVALIDATE) { + ret = d->d_op->d_weak_revalidate(d, flags); + if (ret <= 0) + break; + } + } + return ret; +} + static const struct dentry_operations ovl_dentry_operations = { .d_release = ovl_dentry_release, }; +static const struct dentry_operations ovl_reval_dentry_operations = { + .d_release = ovl_dentry_release, + .d_revalidate = ovl_dentry_revalidate, + .d_weak_revalidate = ovl_dentry_weak_revalidate, +}; + static struct ovl_entry *ovl_alloc_entry(unsigned int numlower) { size_t size = offsetof(struct ovl_entry, lowerstack[numlower]); @@ -288,6 +335,20 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower) return oe; } +static bool ovl_dentry_remote(struct dentry *dentry) +{ + return dentry->d_flags & + (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE); +} + +static bool ovl_dentry_weird(struct dentry *dentry) +{ + return dentry->d_flags & (DCACHE_NEED_AUTOMOUNT | + DCACHE_MANAGE_TRANSIT | + DCACHE_OP_HASH | + DCACHE_OP_COMPARE); +} + static inline struct dentry *ovl_lookup_real(struct dentry *dir, struct qstr *name) { @@ -303,6 +364,10 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir, } else if (!dentry->d_inode) { dput(dentry); dentry = NULL; + } else if (ovl_dentry_weird(dentry)) { + dput(dentry); + /* Don't support traversing automounts and other weirdness */ + dentry = ERR_PTR(-EREMOTE); } return dentry; } @@ -350,6 +415,11 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, goto out; if (this) { + if (unlikely(ovl_dentry_remote(this))) { + dput(this); + err = -EREMOTE; + goto out; + } if (ovl_is_whiteout(this)) { dput(this); this = NULL; @@ -694,25 +764,6 @@ static void ovl_unescape(char *s) } } -static bool ovl_is_allowed_fs_type(struct dentry *root) -{ - const struct dentry_operations *dop = root->d_op; - - /* - * We don't support: - * - automount filesystems - * - filesystems with revalidate (FIXME for lower layer) - * - filesystems with case insensitive names - */ - if (dop && - (dop->d_manage || dop->d_automount || - dop->d_revalidate || dop->d_weak_revalidate || - dop->d_compare || dop->d_hash)) { - return false; - } - return true; -} - static int ovl_mount_dir_noesc(const char *name, struct path *path) { int err = -EINVAL; @@ -727,7 +778,7 @@ static int ovl_mount_dir_noesc(const char *name, struct path *path) goto out; } err = -EINVAL; - if (!ovl_is_allowed_fs_type(path->dentry)) { + if (ovl_dentry_weird(path->dentry)) { pr_err("overlayfs: filesystem on '%s' not supported\n", name); goto out_put; } @@ -751,13 +802,21 @@ static int ovl_mount_dir(const char *name, struct path *path) if (tmp) { ovl_unescape(tmp); err = ovl_mount_dir_noesc(tmp, path); + + if (!err) + if (ovl_dentry_remote(path->dentry)) { + pr_err("overlayfs: filesystem on '%s' not supported as upperdir\n", + tmp); + path_put(path); + err = -EINVAL; + } kfree(tmp); } return err; } static int ovl_lower_dir(const char *name, struct path *path, long *namelen, - int *stack_depth) + int *stack_depth, bool *remote) { int err; struct kstatfs statfs; @@ -774,6 +833,9 @@ static int ovl_lower_dir(const char *name, struct path *path, long *namelen, *namelen = max(*namelen, statfs.f_namelen); *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); + if (ovl_dentry_remote(path->dentry)) + *remote = true; + return 0; out_put: @@ -827,6 +889,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) unsigned int numlower; unsigned int stacklen = 0; unsigned int i; + bool remote = false; int err; err = -ENOMEM; @@ -900,7 +963,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) lower = lowertmp; for (numlower = 0; numlower < stacklen; numlower++) { err = ovl_lower_dir(lower, &stack[numlower], - &ufs->lower_namelen, &sb->s_stack_depth); + &ufs->lower_namelen, &sb->s_stack_depth, + &remote); if (err) goto out_put_lowerpath; @@ -958,7 +1022,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (!ufs->upper_mnt) sb->s_flags |= MS_RDONLY; - sb->s_d_op = &ovl_dentry_operations; + if (remote) + sb->s_d_op = &ovl_reval_dentry_operations; + else + sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; oe = ovl_alloc_entry(numlower); diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c index 8db932da4009..8ebd9a334085 100644 --- a/fs/proc_namespace.c +++ b/fs/proc_namespace.c @@ -17,7 +17,8 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) { - struct proc_mounts *p = proc_mounts(file->private_data); + struct seq_file *m = file->private_data; + struct proc_mounts *p = m->private; struct mnt_namespace *ns = p->ns; unsigned res = POLLIN | POLLRDNORM; int event; @@ -25,8 +26,8 @@ static unsigned mounts_poll(struct file *file, poll_table *wait) poll_wait(file, &p->ns->poll, wait); event = ACCESS_ONCE(ns->event); - if (p->m.poll_event != event) { - p->m.poll_event = event; + if (m->poll_event != event) { + m->poll_event = event; res |= POLLERR | POLLPRI; } @@ -92,7 +93,7 @@ static void show_type(struct seq_file *m, struct super_block *sb) static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); int err = 0; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; @@ -126,7 +127,7 @@ out: static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); struct super_block *sb = mnt->mnt_sb; struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; @@ -186,7 +187,7 @@ out: static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt) { - struct proc_mounts *p = proc_mounts(m); + struct proc_mounts *p = m->private; struct mount *r = real_mount(mnt); struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt }; struct super_block *sb = mnt_path.dentry->d_sb; @@ -236,6 +237,7 @@ static int mounts_open_common(struct inode *inode, struct file *file, struct mnt_namespace *ns = NULL; struct path root; struct proc_mounts *p; + struct seq_file *m; int ret = -EINVAL; if (!task) @@ -260,26 +262,21 @@ static int mounts_open_common(struct inode *inode, struct file *file, task_unlock(task); put_task_struct(task); - ret = -ENOMEM; - p = kmalloc(sizeof(struct proc_mounts), GFP_KERNEL); - if (!p) + ret = seq_open_private(file, &mounts_op, sizeof(struct proc_mounts)); + if (ret) goto err_put_path; - file->private_data = &p->m; - ret = seq_open(file, &mounts_op); - if (ret) - goto err_free; + m = file->private_data; + m->poll_event = ns->event; + p = m->private; p->ns = ns; p->root = root; - p->m.poll_event = ns->event; p->show = show; p->cached_event = ~0ULL; return 0; - err_free: - kfree(p); err_put_path: path_put(&root); err_put_ns: @@ -290,10 +287,11 @@ static int mounts_open_common(struct inode *inode, struct file *file, static int mounts_release(struct inode *inode, struct file *file) { - struct proc_mounts *p = proc_mounts(file->private_data); + struct seq_file *m = file->private_data; + struct proc_mounts *p = m->private; path_put(&p->root); put_mnt_ns(p->ns); - return seq_release(inode, file); + return seq_release_private(inode, file); } static int mounts_open(struct inode *inode, struct file *file) diff --git a/fs/seq_file.c b/fs/seq_file.c index 52b492721603..1d9c1cbd4d0b 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -48,18 +48,21 @@ static void *seq_buf_alloc(unsigned long size) * ERR_PTR(error). In the end of sequence they return %NULL. ->show() * returns 0 in case of success and negative number in case of error. * Returning SEQ_SKIP means "discard this element and move on". + * Note: seq_open() will allocate a struct seq_file and store its + * pointer in @file->private_data. This pointer should not be modified. */ int seq_open(struct file *file, const struct seq_operations *op) { - struct seq_file *p = file->private_data; + struct seq_file *p; + + WARN_ON(file->private_data); + + p = kzalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return -ENOMEM; + + file->private_data = p; - if (!p) { - p = kmalloc(sizeof(*p), GFP_KERNEL); - if (!p) - return -ENOMEM; - file->private_data = p; - } - memset(p, 0, sizeof(*p)); mutex_init(&p->lock); p->op = op; #ifdef CONFIG_USER_NS diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index b43276f339ef..83061cac719b 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -420,7 +420,7 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode) return fwnode && fwnode->type == FWNODE_ACPI; } -static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) +static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode) { return is_acpi_node(fwnode) ? container_of(fwnode, struct acpi_device, fwnode) : NULL; diff --git a/include/acpi/video.h b/include/acpi/video.h index a7d7f1043e9c..e840b294c6f5 100644 --- a/include/acpi/video.h +++ b/include/acpi/video.h @@ -43,7 +43,7 @@ static inline enum acpi_backlight_type acpi_video_get_backlight_type(void) { return acpi_backlight_vendor; } -static void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) +static inline void acpi_video_set_dmi_backlight_type(enum acpi_backlight_type type) { } #endif diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h index 19a240446fca..e42495ad8136 100644 --- a/include/drm/drm_mem_util.h +++ b/include/drm/drm_mem_util.h @@ -56,10 +56,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) static __inline void drm_free_large(void *ptr) { - if (!is_vmalloc_addr(ptr)) - return kfree(ptr); - - vfree(ptr); + kvfree(ptr); } #endif diff --git a/include/dt-bindings/clock/bcm-cygnus.h b/include/dt-bindings/clock/bcm-cygnus.h new file mode 100644 index 000000000000..32fbc475087a --- /dev/null +++ b/include/dt-bindings/clock/bcm-cygnus.h @@ -0,0 +1,68 @@ +/* + * BSD LICENSE + * + * Copyright(c) 2014 Broadcom Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _CLOCK_BCM_CYGNUS_H +#define _CLOCK_BCM_CYGNUS_H + +/* GENPLL clock ID */ +#define BCM_CYGNUS_GENPLL 0 +#define BCM_CYGNUS_GENPLL_AXI21_CLK 1 +#define BCM_CYGNUS_GENPLL_250MHZ_CLK 2 +#define BCM_CYGNUS_GENPLL_IHOST_SYS_CLK 3 +#define BCM_CYGNUS_GENPLL_ENET_SW_CLK 4 +#define BCM_CYGNUS_GENPLL_AUDIO_125_CLK 5 +#define BCM_CYGNUS_GENPLL_CAN_CLK 6 + +/* LCPLL0 clock ID */ +#define BCM_CYGNUS_LCPLL0 0 +#define BCM_CYGNUS_LCPLL0_PCIE_PHY_REF_CLK 1 +#define BCM_CYGNUS_LCPLL0_DDR_PHY_CLK 2 +#define BCM_CYGNUS_LCPLL0_SDIO_CLK 3 +#define BCM_CYGNUS_LCPLL0_USB_PHY_REF_CLK 4 +#define BCM_CYGNUS_LCPLL0_SMART_CARD_CLK 5 +#define BCM_CYGNUS_LCPLL0_CH5_UNUSED 6 + +/* MIPI PLL clock ID */ +#define BCM_CYGNUS_MIPIPLL 0 +#define BCM_CYGNUS_MIPIPLL_CH0_UNUSED 1 +#define BCM_CYGNUS_MIPIPLL_CH1_LCD 2 +#define BCM_CYGNUS_MIPIPLL_CH2_V3D 3 +#define BCM_CYGNUS_MIPIPLL_CH3_UNUSED 4 +#define BCM_CYGNUS_MIPIPLL_CH4_UNUSED 5 +#define BCM_CYGNUS_MIPIPLL_CH5_UNUSED 6 + +/* ASIU clock ID */ +#define BCM_CYGNUS_ASIU_KEYPAD_CLK 0 +#define BCM_CYGNUS_ASIU_ADC_CLK 1 +#define BCM_CYGNUS_ASIU_PWM_CLK 2 + +#endif /* _CLOCK_BCM_CYGNUS_H */ diff --git a/include/dt-bindings/clock/hi6220-clock.h b/include/dt-bindings/clock/hi6220-clock.h new file mode 100644 index 000000000000..70ee3833a7a0 --- /dev/null +++ b/include/dt-bindings/clock/hi6220-clock.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2015 Hisilicon Limited. + * + * Author: Bintian Wang <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __DT_BINDINGS_CLOCK_HI6220_H +#define __DT_BINDINGS_CLOCK_HI6220_H + +/* clk in Hi6220 AO (always on) controller */ +#define HI6220_NONE_CLOCK 0 + +/* fixed rate clocks */ +#define HI6220_REF32K 1 +#define HI6220_CLK_TCXO 2 +#define HI6220_MMC1_PAD 3 +#define HI6220_MMC2_PAD 4 +#define HI6220_MMC0_PAD 5 +#define HI6220_PLL_BBP 6 +#define HI6220_PLL_GPU 7 +#define HI6220_PLL1_DDR 8 +#define HI6220_PLL_SYS 9 +#define HI6220_PLL_SYS_MEDIA 10 +#define HI6220_DDR_SRC 11 +#define HI6220_PLL_MEDIA 12 +#define HI6220_PLL_DDR 13 + +/* fixed factor clocks */ +#define HI6220_300M 14 +#define HI6220_150M 15 +#define HI6220_PICOPHY_SRC 16 +#define HI6220_MMC0_SRC_SEL 17 +#define HI6220_MMC1_SRC_SEL 18 +#define HI6220_MMC2_SRC_SEL 19 +#define HI6220_VPU_CODEC 20 +#define HI6220_MMC0_SMP 21 +#define HI6220_MMC1_SMP 22 +#define HI6220_MMC2_SMP 23 + +/* gate clocks */ +#define HI6220_WDT0_PCLK 24 +#define HI6220_WDT1_PCLK 25 +#define HI6220_WDT2_PCLK 26 +#define HI6220_TIMER0_PCLK 27 +#define HI6220_TIMER1_PCLK 28 +#define HI6220_TIMER2_PCLK 29 +#define HI6220_TIMER3_PCLK 30 +#define HI6220_TIMER4_PCLK 31 +#define HI6220_TIMER5_PCLK 32 +#define HI6220_TIMER6_PCLK 33 +#define HI6220_TIMER7_PCLK 34 +#define HI6220_TIMER8_PCLK 35 +#define HI6220_UART0_PCLK 36 + +#define HI6220_AO_NR_CLKS 37 + +/* clk in Hi6220 systrl */ +/* gate clock */ +#define HI6220_MMC0_CLK 1 +#define HI6220_MMC0_CIUCLK 2 +#define HI6220_MMC1_CLK 3 +#define HI6220_MMC1_CIUCLK 4 +#define HI6220_MMC2_CLK 5 +#define HI6220_MMC2_CIUCLK 6 +#define HI6220_USBOTG_HCLK 7 +#define HI6220_CLK_PICOPHY 8 +#define HI6220_HIFI 9 +#define HI6220_DACODEC_PCLK 10 +#define HI6220_EDMAC_ACLK 11 +#define HI6220_CS_ATB 12 +#define HI6220_I2C0_CLK 13 +#define HI6220_I2C1_CLK 14 +#define HI6220_I2C2_CLK 15 +#define HI6220_I2C3_CLK 16 +#define HI6220_UART1_PCLK 17 +#define HI6220_UART2_PCLK 18 +#define HI6220_UART3_PCLK 19 +#define HI6220_UART4_PCLK 20 +#define HI6220_SPI_CLK 21 +#define HI6220_TSENSOR_CLK 22 +#define HI6220_MMU_CLK 23 +#define HI6220_HIFI_SEL 24 +#define HI6220_MMC0_SYSPLL 25 +#define HI6220_MMC1_SYSPLL 26 +#define HI6220_MMC2_SYSPLL 27 +#define HI6220_MMC0_SEL 28 +#define HI6220_MMC1_SEL 29 +#define HI6220_BBPPLL_SEL 30 +#define HI6220_MEDIA_PLL_SRC 31 +#define HI6220_MMC2_SEL 32 +#define HI6220_CS_ATB_SYSPLL 33 + +/* mux clocks */ +#define HI6220_MMC0_SRC 34 +#define HI6220_MMC0_SMP_IN 35 +#define HI6220_MMC1_SRC 36 +#define HI6220_MMC1_SMP_IN 37 +#define HI6220_MMC2_SRC 38 +#define HI6220_MMC2_SMP_IN 39 +#define HI6220_HIFI_SRC 40 +#define HI6220_UART1_SRC 41 +#define HI6220_UART2_SRC 42 +#define HI6220_UART3_SRC 43 +#define HI6220_UART4_SRC 44 +#define HI6220_MMC0_MUX0 45 +#define HI6220_MMC1_MUX0 46 +#define HI6220_MMC2_MUX0 47 +#define HI6220_MMC0_MUX1 48 +#define HI6220_MMC1_MUX1 49 +#define HI6220_MMC2_MUX1 50 + +/* divider clocks */ +#define HI6220_CLK_BUS 51 +#define HI6220_MMC0_DIV 52 +#define HI6220_MMC1_DIV 53 +#define HI6220_MMC2_DIV 54 +#define HI6220_HIFI_DIV 55 +#define HI6220_BBPPLL0_DIV 56 +#define HI6220_CS_DAPB 57 +#define HI6220_CS_ATB_DIV 58 + +#define HI6220_SYS_NR_CLKS 59 + +/* clk in Hi6220 media controller */ +/* gate clocks */ +#define HI6220_DSI_PCLK 1 +#define HI6220_G3D_PCLK 2 +#define HI6220_ACLK_CODEC_VPU 3 +#define HI6220_ISP_SCLK 4 +#define HI6220_ADE_CORE 5 +#define HI6220_MED_MMU 6 +#define HI6220_CFG_CSI4PHY 7 +#define HI6220_CFG_CSI2PHY 8 +#define HI6220_ISP_SCLK_GATE 9 +#define HI6220_ISP_SCLK_GATE1 10 +#define HI6220_ADE_CORE_GATE 11 +#define HI6220_CODEC_VPU_GATE 12 +#define HI6220_MED_SYSPLL 13 + +/* mux clocks */ +#define HI6220_1440_1200 14 +#define HI6220_1000_1200 15 +#define HI6220_1000_1440 16 + +/* divider clocks */ +#define HI6220_CODEC_JPEG 17 +#define HI6220_ISP_SCLK_SRC 18 +#define HI6220_ISP_SCLK1 19 +#define HI6220_ADE_CORE_SRC 20 +#define HI6220_ADE_PIX_SRC 21 +#define HI6220_G3D_CLK 22 +#define HI6220_CODEC_VPU_SRC 23 + +#define HI6220_MEDIA_NR_CLKS 24 + +/* clk in Hi6220 power controller */ +/* gate clocks */ +#define HI6220_PLL_GPU_GATE 1 +#define HI6220_PLL1_DDR_GATE 2 +#define HI6220_PLL_DDR_GATE 3 +#define HI6220_PLL_MEDIA_GATE 4 +#define HI6220_PLL0_BBP_GATE 5 + +/* divider clocks */ +#define HI6220_DDRC_SRC 6 +#define HI6220_DDRC_AXI1 7 + +#define HI6220_POWER_NR_CLKS 8 +#endif diff --git a/include/dt-bindings/clock/lpc18xx-ccu.h b/include/dt-bindings/clock/lpc18xx-ccu.h new file mode 100644 index 000000000000..bbfe00b6ab7d --- /dev/null +++ b/include/dt-bindings/clock/lpc18xx-ccu.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2015 Joachim Eastwood <[email protected]> + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +/* Clock Control Unit 1 (CCU1) clock offsets */ +#define CLK_APB3_BUS 0x100 +#define CLK_APB3_I2C1 0x108 +#define CLK_APB3_DAC 0x110 +#define CLK_APB3_ADC0 0x118 +#define CLK_APB3_ADC1 0x120 +#define CLK_APB3_CAN0 0x128 +#define CLK_APB1_BUS 0x200 +#define CLK_APB1_MOTOCON_PWM 0x208 +#define CLK_APB1_I2C0 0x210 +#define CLK_APB1_I2S 0x218 +#define CLK_APB1_CAN1 0x220 +#define CLK_SPIFI 0x300 +#define CLK_CPU_BUS 0x400 +#define CLK_CPU_SPIFI 0x408 +#define CLK_CPU_GPIO 0x410 +#define CLK_CPU_LCD 0x418 +#define CLK_CPU_ETHERNET 0x420 +#define CLK_CPU_USB0 0x428 +#define CLK_CPU_EMC 0x430 +#define CLK_CPU_SDIO 0x438 +#define CLK_CPU_DMA 0x440 +#define CLK_CPU_CORE 0x448 +#define CLK_CPU_SCT 0x468 +#define CLK_CPU_USB1 0x470 +#define CLK_CPU_EMCDIV 0x478 +#define CLK_CPU_FLASHA 0x480 +#define CLK_CPU_FLASHB 0x488 +#define CLK_CPU_M0APP 0x490 +#define CLK_CPU_ADCHS 0x498 +#define CLK_CPU_EEPROM 0x4a0 +#define CLK_CPU_WWDT 0x500 +#define CLK_CPU_UART0 0x508 +#define CLK_CPU_UART1 0x510 +#define CLK_CPU_SSP0 0x518 +#define CLK_CPU_TIMER0 0x520 +#define CLK_CPU_TIMER1 0x528 +#define CLK_CPU_SCU 0x530 +#define CLK_CPU_CREG 0x538 +#define CLK_CPU_RITIMER 0x600 +#define CLK_CPU_UART2 0x608 +#define CLK_CPU_UART3 0x610 +#define CLK_CPU_TIMER2 0x618 +#define CLK_CPU_TIMER3 0x620 +#define CLK_CPU_SSP1 0x628 +#define CLK_CPU_QEI 0x630 +#define CLK_PERIPH_BUS 0x700 +#define CLK_PERIPH_CORE 0x710 +#define CLK_PERIPH_SGPIO 0x718 +#define CLK_USB0 0x800 +#define CLK_USB1 0x900 +#define CLK_SPI 0xA00 +#define CLK_ADCHS 0xB00 + +/* Clock Control Unit 2 (CCU2) clock offsets */ +#define CLK_AUDIO 0x100 +#define CLK_APB2_UART3 0x200 +#define CLK_APB2_UART2 0x300 +#define CLK_APB0_UART1 0x400 +#define CLK_APB0_UART0 0x500 +#define CLK_APB2_SSP1 0x600 +#define CLK_APB0_SSP0 0x700 +#define CLK_SDIO 0x800 diff --git a/include/dt-bindings/clock/lpc18xx-cgu.h b/include/dt-bindings/clock/lpc18xx-cgu.h new file mode 100644 index 000000000000..6e57c6d2ca66 --- /dev/null +++ b/include/dt-bindings/clock/lpc18xx-cgu.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2015 Joachim Eastwood <[email protected]> + * + * This code is released using a dual license strategy: BSD/GPL + * You can choose the licence that better fits your requirements. + * + * Released under the terms of 3-clause BSD License + * Released under the terms of GNU General Public License Version 2.0 + * + */ + +/* LPC18xx/43xx base clock ids */ +#define BASE_SAFE_CLK 0 +#define BASE_USB0_CLK 1 +#define BASE_PERIPH_CLK 2 +#define BASE_USB1_CLK 3 +#define BASE_CPU_CLK 4 +#define BASE_SPIFI_CLK 5 +#define BASE_SPI_CLK 6 +#define BASE_PHY_RX_CLK 7 +#define BASE_PHY_TX_CLK 8 +#define BASE_APB1_CLK 9 +#define BASE_APB3_CLK 10 +#define BASE_LCD_CLK 11 +#define BASE_ADCHS_CLK 12 +#define BASE_SDIO_CLK 13 +#define BASE_SSP0_CLK 14 +#define BASE_SSP1_CLK 15 +#define BASE_UART0_CLK 16 +#define BASE_UART1_CLK 17 +#define BASE_UART2_CLK 18 +#define BASE_UART3_CLK 19 +#define BASE_OUT_CLK 20 +#define BASE_RES1_CLK 21 +#define BASE_RES2_CLK 22 +#define BASE_RES3_CLK 23 +#define BASE_RES4_CLK 24 +#define BASE_AUDIO_CLK 25 +#define BASE_CGU_OUT0_CLK 26 +#define BASE_CGU_OUT1_CLK 27 +#define BASE_CLK_MAX (BASE_CGU_OUT1_CLK + 1) diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 591f7fba89e2..7a510384a82a 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h @@ -48,6 +48,7 @@ #define MMP2_CLK_SSP1 78 #define MMP2_CLK_SSP2 79 #define MMP2_CLK_SSP3 80 +#define MMP2_CLK_TIMER 81 /* axi periphrals */ #define MMP2_CLK_SDH0 101 diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h index 79630b9d74b8..3e45bdfe1aa4 100644 --- a/include/dt-bindings/clock/marvell,pxa168.h +++ b/include/dt-bindings/clock/marvell,pxa168.h @@ -18,7 +18,9 @@ #define PXA168_CLK_PLL1_13_1_5 18 #define PXA168_CLK_PLL1_2_1_5 19 #define PXA168_CLK_PLL1_3_16 20 +#define PXA168_CLK_PLL1_192 21 #define PXA168_CLK_UART_PLL 27 +#define PXA168_CLK_USB_PLL 28 /* apb periphrals */ #define PXA168_CLK_TWSI0 60 @@ -40,6 +42,7 @@ #define PXA168_CLK_SSP2 76 #define PXA168_CLK_SSP3 77 #define PXA168_CLK_SSP4 78 +#define PXA168_CLK_TIMER 79 /* axi periphrals */ #define PXA168_CLK_DFC 100 diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h new file mode 100644 index 000000000000..d4f2e18919ff --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa1928.h @@ -0,0 +1,57 @@ +#ifndef __DTS_MARVELL_PXA1928_CLOCK_H +#define __DTS_MARVELL_PXA1928_CLOCK_H + +/* + * Clock ID values here correspond to the control register offset/4. + */ + +/* apb peripherals */ +#define PXA1928_CLK_RTC 0x00 +#define PXA1928_CLK_TWSI0 0x01 +#define PXA1928_CLK_TWSI1 0x02 +#define PXA1928_CLK_TWSI2 0x03 +#define PXA1928_CLK_TWSI3 0x04 +#define PXA1928_CLK_OWIRE 0x05 +#define PXA1928_CLK_KPC 0x06 +#define PXA1928_CLK_TB_ROTARY 0x07 +#define PXA1928_CLK_SW_JTAG 0x08 +#define PXA1928_CLK_TIMER1 0x09 +#define PXA1928_CLK_UART0 0x0b +#define PXA1928_CLK_UART1 0x0c +#define PXA1928_CLK_UART2 0x0d +#define PXA1928_CLK_GPIO 0x0e +#define PXA1928_CLK_PWM0 0x0f +#define PXA1928_CLK_PWM1 0x10 +#define PXA1928_CLK_PWM2 0x11 +#define PXA1928_CLK_PWM3 0x12 +#define PXA1928_CLK_SSP0 0x13 +#define PXA1928_CLK_SSP1 0x14 +#define PXA1928_CLK_SSP2 0x15 + +#define PXA1928_CLK_TWSI4 0x1f +#define PXA1928_CLK_TWSI5 0x20 +#define PXA1928_CLK_UART3 0x22 +#define PXA1928_CLK_THSENS_GLOB 0x24 +#define PXA1928_CLK_THSENS_CPU 0x26 +#define PXA1928_CLK_THSENS_VPU 0x27 +#define PXA1928_CLK_THSENS_GC 0x28 +#define PXA1928_APBC_NR_CLKS 0x30 + + +/* axi peripherals */ +#define PXA1928_CLK_SDH0 0x15 +#define PXA1928_CLK_SDH1 0x16 +#define PXA1928_CLK_USB 0x17 +#define PXA1928_CLK_NAND 0x18 +#define PXA1928_CLK_DMA 0x19 + +#define PXA1928_CLK_SDH2 0x3a +#define PXA1928_CLK_SDH3 0x3b +#define PXA1928_CLK_HSIC 0x3e +#define PXA1928_CLK_SDH4 0x57 +#define PXA1928_CLK_GC3D 0x5d +#define PXA1928_CLK_GC2D 0x5f + +#define PXA1928_APMU_NR_CLKS 0x60 + +#endif diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h index 719cffb2bea2..135082a0b62f 100644 --- a/include/dt-bindings/clock/marvell,pxa910.h +++ b/include/dt-bindings/clock/marvell,pxa910.h @@ -18,7 +18,9 @@ #define PXA910_CLK_PLL1_13_1_5 18 #define PXA910_CLK_PLL1_2_1_5 19 #define PXA910_CLK_PLL1_3_16 20 +#define PXA910_CLK_PLL1_192 21 #define PXA910_CLK_UART_PLL 27 +#define PXA910_CLK_USB_PLL 28 /* apb periphrals */ #define PXA910_CLK_TWSI0 60 @@ -37,6 +39,8 @@ #define PXA910_CLK_UART2 73 #define PXA910_CLK_SSP0 74 #define PXA910_CLK_SSP1 75 +#define PXA910_CLK_TIMER0 76 +#define PXA910_CLK_TIMER1 77 /* axi periphrals */ #define PXA910_CLK_DFC 100 diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h new file mode 100644 index 000000000000..bd2720d58e0c --- /dev/null +++ b/include/dt-bindings/clock/meson8b-clkc.h @@ -0,0 +1,25 @@ +/* + * Meson8b clock tree IDs + */ + +#ifndef __MESON8B_CLKC_H +#define __MESON8B_CLKC_H + +#define CLKID_UNUSED 0 +#define CLKID_XTAL 1 +#define CLKID_PLL_FIXED 2 +#define CLKID_PLL_VID 3 +#define CLKID_PLL_SYS 4 +#define CLKID_FCLK_DIV2 5 +#define CLKID_FCLK_DIV3 6 +#define CLKID_FCLK_DIV4 7 +#define CLKID_FCLK_DIV5 8 +#define CLKID_FCLK_DIV7 9 +#define CLKID_CLK81 10 +#define CLKID_MALI 11 +#define CLKID_CPUCLK 12 +#define CLKID_ZERO 13 + +#define CLK_NR_CLKS (CLKID_ZERO + 1) + +#endif /* __MESON8B_CLKC_H */ diff --git a/include/dt-bindings/clock/mt8135-clk.h b/include/dt-bindings/clock/mt8135-clk.h new file mode 100644 index 000000000000..6dac6c091dd2 --- /dev/null +++ b/include/dt-bindings/clock/mt8135-clk.h @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT8135_H +#define _DT_BINDINGS_CLK_MT8135_H + +/* TOPCKGEN */ + +#define CLK_TOP_DSI0_LNTC_DSICLK 1 +#define CLK_TOP_HDMITX_CLKDIG_CTS 2 +#define CLK_TOP_CLKPH_MCK 3 +#define CLK_TOP_CPUM_TCK_IN 4 +#define CLK_TOP_MAINPLL_806M 5 +#define CLK_TOP_MAINPLL_537P3M 6 +#define CLK_TOP_MAINPLL_322P4M 7 +#define CLK_TOP_MAINPLL_230P3M 8 +#define CLK_TOP_UNIVPLL_624M 9 +#define CLK_TOP_UNIVPLL_416M 10 +#define CLK_TOP_UNIVPLL_249P6M 11 +#define CLK_TOP_UNIVPLL_178P3M 12 +#define CLK_TOP_UNIVPLL_48M 13 +#define CLK_TOP_MMPLL_D2 14 +#define CLK_TOP_MMPLL_D3 15 +#define CLK_TOP_MMPLL_D5 16 +#define CLK_TOP_MMPLL_D7 17 +#define CLK_TOP_MMPLL_D4 18 +#define CLK_TOP_MMPLL_D6 19 +#define CLK_TOP_SYSPLL_D2 20 +#define CLK_TOP_SYSPLL_D4 21 +#define CLK_TOP_SYSPLL_D6 22 +#define CLK_TOP_SYSPLL_D8 23 +#define CLK_TOP_SYSPLL_D10 24 +#define CLK_TOP_SYSPLL_D12 25 +#define CLK_TOP_SYSPLL_D16 26 +#define CLK_TOP_SYSPLL_D24 27 +#define CLK_TOP_SYSPLL_D3 28 +#define CLK_TOP_SYSPLL_D2P5 29 +#define CLK_TOP_SYSPLL_D5 30 +#define CLK_TOP_SYSPLL_D3P5 31 +#define CLK_TOP_UNIVPLL1_D2 32 +#define CLK_TOP_UNIVPLL1_D4 33 +#define CLK_TOP_UNIVPLL1_D6 34 +#define CLK_TOP_UNIVPLL1_D8 35 +#define CLK_TOP_UNIVPLL1_D10 36 +#define CLK_TOP_UNIVPLL2_D2 37 +#define CLK_TOP_UNIVPLL2_D4 38 +#define CLK_TOP_UNIVPLL2_D6 39 +#define CLK_TOP_UNIVPLL2_D8 40 +#define CLK_TOP_UNIVPLL_D3 41 +#define CLK_TOP_UNIVPLL_D5 42 +#define CLK_TOP_UNIVPLL_D7 43 +#define CLK_TOP_UNIVPLL_D10 44 +#define CLK_TOP_UNIVPLL_D26 45 +#define CLK_TOP_APLL 46 +#define CLK_TOP_APLL_D4 47 +#define CLK_TOP_APLL_D8 48 +#define CLK_TOP_APLL_D16 49 +#define CLK_TOP_APLL_D24 50 +#define CLK_TOP_LVDSPLL_D2 51 +#define CLK_TOP_LVDSPLL_D4 52 +#define CLK_TOP_LVDSPLL_D8 53 +#define CLK_TOP_LVDSTX_CLKDIG_CT 54 +#define CLK_TOP_VPLL_DPIX 55 +#define CLK_TOP_TVHDMI_H 56 +#define CLK_TOP_HDMITX_CLKDIG_D2 57 +#define CLK_TOP_HDMITX_CLKDIG_D3 58 +#define CLK_TOP_TVHDMI_D2 59 +#define CLK_TOP_TVHDMI_D4 60 +#define CLK_TOP_MEMPLL_MCK_D4 61 +#define CLK_TOP_AXI_SEL 62 +#define CLK_TOP_SMI_SEL 63 +#define CLK_TOP_MFG_SEL 64 +#define CLK_TOP_IRDA_SEL 65 +#define CLK_TOP_CAM_SEL 66 +#define CLK_TOP_AUD_INTBUS_SEL 67 +#define CLK_TOP_JPG_SEL 68 +#define CLK_TOP_DISP_SEL 69 +#define CLK_TOP_MSDC30_1_SEL 70 +#define CLK_TOP_MSDC30_2_SEL 71 +#define CLK_TOP_MSDC30_3_SEL 72 +#define CLK_TOP_MSDC30_4_SEL 73 +#define CLK_TOP_USB20_SEL 74 +#define CLK_TOP_VENC_SEL 75 +#define CLK_TOP_SPI_SEL 76 +#define CLK_TOP_UART_SEL 77 +#define CLK_TOP_MEM_SEL 78 +#define CLK_TOP_CAMTG_SEL 79 +#define CLK_TOP_AUDIO_SEL 80 +#define CLK_TOP_FIX_SEL 81 +#define CLK_TOP_VDEC_SEL 82 +#define CLK_TOP_DDRPHYCFG_SEL 83 +#define CLK_TOP_DPILVDS_SEL 84 +#define CLK_TOP_PMICSPI_SEL 85 +#define CLK_TOP_MSDC30_0_SEL 86 +#define CLK_TOP_SMI_MFG_AS_SEL 87 +#define CLK_TOP_GCPU_SEL 88 +#define CLK_TOP_DPI1_SEL 89 +#define CLK_TOP_CCI_SEL 90 +#define CLK_TOP_APLL_SEL 91 +#define CLK_TOP_HDMIPLL_SEL 92 +#define CLK_TOP_NR_CLK 93 + +/* APMIXED_SYS */ + +#define CLK_APMIXED_ARMPLL1 1 +#define CLK_APMIXED_ARMPLL2 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIVPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_MSDCPLL 6 +#define CLK_APMIXED_TVDPLL 7 +#define CLK_APMIXED_LVDSPLL 8 +#define CLK_APMIXED_AUDPLL 9 +#define CLK_APMIXED_VDECPLL 10 +#define CLK_APMIXED_NR_CLK 11 + +/* INFRA_SYS */ + +#define CLK_INFRA_PMIC_WRAP 1 +#define CLK_INFRA_PMICSPI 2 +#define CLK_INFRA_CCIF1_AP_CTRL 3 +#define CLK_INFRA_CCIF0_AP_CTRL 4 +#define CLK_INFRA_KP 5 +#define CLK_INFRA_CPUM 6 +#define CLK_INFRA_M4U 7 +#define CLK_INFRA_MFGAXI 8 +#define CLK_INFRA_DEVAPC 9 +#define CLK_INFRA_AUDIO 10 +#define CLK_INFRA_MFG_BUS 11 +#define CLK_INFRA_SMI 12 +#define CLK_INFRA_DBGCLK 13 +#define CLK_INFRA_NR_CLK 14 + +/* PERI_SYS */ + +#define CLK_PERI_I2C5 1 +#define CLK_PERI_I2C4 2 +#define CLK_PERI_I2C3 3 +#define CLK_PERI_I2C2 4 +#define CLK_PERI_I2C1 5 +#define CLK_PERI_I2C0 6 +#define CLK_PERI_UART3 7 +#define CLK_PERI_UART2 8 +#define CLK_PERI_UART1 9 +#define CLK_PERI_UART0 10 +#define CLK_PERI_IRDA 11 +#define CLK_PERI_NLI 12 +#define CLK_PERI_MD_HIF 13 +#define CLK_PERI_AP_HIF 14 +#define CLK_PERI_MSDC30_3 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_1 17 +#define CLK_PERI_MSDC20_2 18 +#define CLK_PERI_MSDC20_1 19 +#define CLK_PERI_AP_DMA 20 +#define CLK_PERI_USB1 21 +#define CLK_PERI_USB0 22 +#define CLK_PERI_PWM 23 +#define CLK_PERI_PWM7 24 +#define CLK_PERI_PWM6 25 +#define CLK_PERI_PWM5 26 +#define CLK_PERI_PWM4 27 +#define CLK_PERI_PWM3 28 +#define CLK_PERI_PWM2 29 +#define CLK_PERI_PWM1 30 +#define CLK_PERI_THERM 31 +#define CLK_PERI_NFI 32 +#define CLK_PERI_USBSLV 33 +#define CLK_PERI_USB1_MCU 34 +#define CLK_PERI_USB0_MCU 35 +#define CLK_PERI_GCPU 36 +#define CLK_PERI_FHCTL 37 +#define CLK_PERI_SPI1 38 +#define CLK_PERI_AUXADC 39 +#define CLK_PERI_PERI_PWRAP 40 +#define CLK_PERI_I2C6 41 +#define CLK_PERI_UART0_SEL 42 +#define CLK_PERI_UART1_SEL 43 +#define CLK_PERI_UART2_SEL 44 +#define CLK_PERI_UART3_SEL 45 +#define CLK_PERI_NR_CLK 46 + +#endif /* _DT_BINDINGS_CLK_MT8135_H */ diff --git a/include/dt-bindings/clock/mt8173-clk.h b/include/dt-bindings/clock/mt8173-clk.h new file mode 100644 index 000000000000..4ad76ed882ad --- /dev/null +++ b/include/dt-bindings/clock/mt8173-clk.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: James Liao <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_MT8173_H +#define _DT_BINDINGS_CLK_MT8173_H + +/* TOPCKGEN */ + +#define CLK_TOP_CLKPH_MCK_O 1 +#define CLK_TOP_DPI 2 +#define CLK_TOP_USB_SYSPLL_125M 3 +#define CLK_TOP_HDMITX_DIG_CTS 4 +#define CLK_TOP_ARMCA7PLL_754M 5 +#define CLK_TOP_ARMCA7PLL_502M 6 +#define CLK_TOP_MAIN_H546M 7 +#define CLK_TOP_MAIN_H364M 8 +#define CLK_TOP_MAIN_H218P4M 9 +#define CLK_TOP_MAIN_H156M 10 +#define CLK_TOP_TVDPLL_445P5M 11 +#define CLK_TOP_TVDPLL_594M 12 +#define CLK_TOP_UNIV_624M 13 +#define CLK_TOP_UNIV_416M 14 +#define CLK_TOP_UNIV_249P6M 15 +#define CLK_TOP_UNIV_178P3M 16 +#define CLK_TOP_UNIV_48M 17 +#define CLK_TOP_CLKRTC_EXT 18 +#define CLK_TOP_CLKRTC_INT 19 +#define CLK_TOP_FPC 20 +#define CLK_TOP_HDMITXPLL_D2 21 +#define CLK_TOP_HDMITXPLL_D3 22 +#define CLK_TOP_ARMCA7PLL_D2 23 +#define CLK_TOP_ARMCA7PLL_D3 24 +#define CLK_TOP_APLL1 25 +#define CLK_TOP_APLL2 26 +#define CLK_TOP_DMPLL 27 +#define CLK_TOP_DMPLL_D2 28 +#define CLK_TOP_DMPLL_D4 29 +#define CLK_TOP_DMPLL_D8 30 +#define CLK_TOP_DMPLL_D16 31 +#define CLK_TOP_LVDSPLL_D2 32 +#define CLK_TOP_LVDSPLL_D4 33 +#define CLK_TOP_LVDSPLL_D8 34 +#define CLK_TOP_MMPLL 35 +#define CLK_TOP_MMPLL_D2 36 +#define CLK_TOP_MSDCPLL 37 +#define CLK_TOP_MSDCPLL_D2 38 +#define CLK_TOP_MSDCPLL_D4 39 +#define CLK_TOP_MSDCPLL2 40 +#define CLK_TOP_MSDCPLL2_D2 41 +#define CLK_TOP_MSDCPLL2_D4 42 +#define CLK_TOP_SYSPLL_D2 43 +#define CLK_TOP_SYSPLL1_D2 44 +#define CLK_TOP_SYSPLL1_D4 45 +#define CLK_TOP_SYSPLL1_D8 46 +#define CLK_TOP_SYSPLL1_D16 47 +#define CLK_TOP_SYSPLL_D3 48 +#define CLK_TOP_SYSPLL2_D2 49 +#define CLK_TOP_SYSPLL2_D4 50 +#define CLK_TOP_SYSPLL_D5 51 +#define CLK_TOP_SYSPLL3_D2 52 +#define CLK_TOP_SYSPLL3_D4 53 +#define CLK_TOP_SYSPLL_D7 54 +#define CLK_TOP_SYSPLL4_D2 55 +#define CLK_TOP_SYSPLL4_D4 56 +#define CLK_TOP_TVDPLL 57 +#define CLK_TOP_TVDPLL_D2 58 +#define CLK_TOP_TVDPLL_D4 59 +#define CLK_TOP_TVDPLL_D8 60 +#define CLK_TOP_TVDPLL_D16 61 +#define CLK_TOP_UNIVPLL_D2 62 +#define CLK_TOP_UNIVPLL1_D2 63 +#define CLK_TOP_UNIVPLL1_D4 64 +#define CLK_TOP_UNIVPLL1_D8 65 +#define CLK_TOP_UNIVPLL_D3 66 +#define CLK_TOP_UNIVPLL2_D2 67 +#define CLK_TOP_UNIVPLL2_D4 68 +#define CLK_TOP_UNIVPLL2_D8 69 +#define CLK_TOP_UNIVPLL_D5 70 +#define CLK_TOP_UNIVPLL3_D2 71 +#define CLK_TOP_UNIVPLL3_D4 72 +#define CLK_TOP_UNIVPLL3_D8 73 +#define CLK_TOP_UNIVPLL_D7 74 +#define CLK_TOP_UNIVPLL_D26 75 +#define CLK_TOP_UNIVPLL_D52 76 +#define CLK_TOP_VCODECPLL 77 +#define CLK_TOP_VCODECPLL_370P5 78 +#define CLK_TOP_VENCPLL 79 +#define CLK_TOP_VENCPLL_D2 80 +#define CLK_TOP_VENCPLL_D4 81 +#define CLK_TOP_AXI_SEL 82 +#define CLK_TOP_MEM_SEL 83 +#define CLK_TOP_DDRPHYCFG_SEL 84 +#define CLK_TOP_MM_SEL 85 +#define CLK_TOP_PWM_SEL 86 +#define CLK_TOP_VDEC_SEL 87 +#define CLK_TOP_VENC_SEL 88 +#define CLK_TOP_MFG_SEL 89 +#define CLK_TOP_CAMTG_SEL 90 +#define CLK_TOP_UART_SEL 91 +#define CLK_TOP_SPI_SEL 92 +#define CLK_TOP_USB20_SEL 93 +#define CLK_TOP_USB30_SEL 94 +#define CLK_TOP_MSDC50_0_H_SEL 95 +#define CLK_TOP_MSDC50_0_SEL 96 +#define CLK_TOP_MSDC30_1_SEL 97 +#define CLK_TOP_MSDC30_2_SEL 98 +#define CLK_TOP_MSDC30_3_SEL 99 +#define CLK_TOP_AUDIO_SEL 100 +#define CLK_TOP_AUD_INTBUS_SEL 101 +#define CLK_TOP_PMICSPI_SEL 102 +#define CLK_TOP_SCP_SEL 103 +#define CLK_TOP_ATB_SEL 104 +#define CLK_TOP_VENC_LT_SEL 105 +#define CLK_TOP_DPI0_SEL 106 +#define CLK_TOP_IRDA_SEL 107 +#define CLK_TOP_CCI400_SEL 108 +#define CLK_TOP_AUD_1_SEL 109 +#define CLK_TOP_AUD_2_SEL 110 +#define CLK_TOP_MEM_MFG_IN_SEL 111 +#define CLK_TOP_AXI_MFG_IN_SEL 112 +#define CLK_TOP_SCAM_SEL 113 +#define CLK_TOP_SPINFI_IFR_SEL 114 +#define CLK_TOP_HDMI_SEL 115 +#define CLK_TOP_DPILVDS_SEL 116 +#define CLK_TOP_MSDC50_2_H_SEL 117 +#define CLK_TOP_HDCP_SEL 118 +#define CLK_TOP_HDCP_24M_SEL 119 +#define CLK_TOP_RTC_SEL 120 +#define CLK_TOP_APLL1_DIV0 121 +#define CLK_TOP_APLL1_DIV1 122 +#define CLK_TOP_APLL1_DIV2 123 +#define CLK_TOP_APLL1_DIV3 124 +#define CLK_TOP_APLL1_DIV4 125 +#define CLK_TOP_APLL1_DIV5 126 +#define CLK_TOP_APLL2_DIV0 127 +#define CLK_TOP_APLL2_DIV1 128 +#define CLK_TOP_APLL2_DIV2 129 +#define CLK_TOP_APLL2_DIV3 130 +#define CLK_TOP_APLL2_DIV4 131 +#define CLK_TOP_APLL2_DIV5 132 +#define CLK_TOP_I2S0_M_SEL 133 +#define CLK_TOP_I2S1_M_SEL 134 +#define CLK_TOP_I2S2_M_SEL 135 +#define CLK_TOP_I2S3_M_SEL 136 +#define CLK_TOP_I2S3_B_SEL 137 +#define CLK_TOP_NR_CLK 138 + +/* APMIXED_SYS */ + +#define CLK_APMIXED_ARMCA15PLL 1 +#define CLK_APMIXED_ARMCA7PLL 2 +#define CLK_APMIXED_MAINPLL 3 +#define CLK_APMIXED_UNIVPLL 4 +#define CLK_APMIXED_MMPLL 5 +#define CLK_APMIXED_MSDCPLL 6 +#define CLK_APMIXED_VENCPLL 7 +#define CLK_APMIXED_TVDPLL 8 +#define CLK_APMIXED_MPLL 9 +#define CLK_APMIXED_VCODECPLL 10 +#define CLK_APMIXED_APLL1 11 +#define CLK_APMIXED_APLL2 12 +#define CLK_APMIXED_LVDSPLL 13 +#define CLK_APMIXED_MSDCPLL2 14 +#define CLK_APMIXED_NR_CLK 15 + +/* INFRA_SYS */ + +#define CLK_INFRA_DBGCLK 1 +#define CLK_INFRA_SMI 2 +#define CLK_INFRA_AUDIO 3 +#define CLK_INFRA_GCE 4 +#define CLK_INFRA_L2C_SRAM 5 +#define CLK_INFRA_M4U 6 +#define CLK_INFRA_CPUM 7 +#define CLK_INFRA_KP 8 +#define CLK_INFRA_CEC 9 +#define CLK_INFRA_PMICSPI 10 +#define CLK_INFRA_PMICWRAP 11 +#define CLK_INFRA_NR_CLK 12 + +/* PERI_SYS */ + +#define CLK_PERI_NFI 1 +#define CLK_PERI_THERM 2 +#define CLK_PERI_PWM1 3 +#define CLK_PERI_PWM2 4 +#define CLK_PERI_PWM3 5 +#define CLK_PERI_PWM4 6 +#define CLK_PERI_PWM5 7 +#define CLK_PERI_PWM6 8 +#define CLK_PERI_PWM7 9 +#define CLK_PERI_PWM 10 +#define CLK_PERI_USB0 11 +#define CLK_PERI_USB1 12 +#define CLK_PERI_AP_DMA 13 +#define CLK_PERI_MSDC30_0 14 +#define CLK_PERI_MSDC30_1 15 +#define CLK_PERI_MSDC30_2 16 +#define CLK_PERI_MSDC30_3 17 +#define CLK_PERI_NLI_ARB 18 +#define CLK_PERI_IRDA 19 +#define CLK_PERI_UART0 20 +#define CLK_PERI_UART1 21 +#define CLK_PERI_UART2 22 +#define CLK_PERI_UART3 23 +#define CLK_PERI_I2C0 24 +#define CLK_PERI_I2C1 25 +#define CLK_PERI_I2C2 26 +#define CLK_PERI_I2C3 27 +#define CLK_PERI_I2C4 28 +#define CLK_PERI_AUXADC 29 +#define CLK_PERI_SPI0 30 +#define CLK_PERI_I2C5 31 +#define CLK_PERI_NFIECC 32 +#define CLK_PERI_SPI 33 +#define CLK_PERI_IRRX 34 +#define CLK_PERI_I2C6 35 +#define CLK_PERI_UART0_SEL 36 +#define CLK_PERI_UART1_SEL 37 +#define CLK_PERI_UART2_SEL 38 +#define CLK_PERI_UART3_SEL 39 +#define CLK_PERI_NR_CLK 40 + +#endif /* _DT_BINDINGS_CLK_MT8173_H */ diff --git a/include/dt-bindings/reset-controller/mt8135-resets.h b/include/dt-bindings/reset-controller/mt8135-resets.h new file mode 100644 index 000000000000..1fb629508db2 --- /dev/null +++ b/include/dt-bindings/reset-controller/mt8135-resets.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8135 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8135 + +/* INFRACFG resets */ +#define MT8135_INFRA_EMI_REG_RST 0 +#define MT8135_INFRA_DRAMC0_A0_RST 1 +#define MT8135_INFRA_CCIF0_RST 2 +#define MT8135_INFRA_APCIRQ_EINT_RST 3 +#define MT8135_INFRA_APXGPT_RST 4 +#define MT8135_INFRA_SCPSYS_RST 5 +#define MT8135_INFRA_CCIF1_RST 6 +#define MT8135_INFRA_PMIC_WRAP_RST 7 +#define MT8135_INFRA_KP_RST 8 +#define MT8135_INFRA_EMI_RST 32 +#define MT8135_INFRA_DRAMC0_RST 34 +#define MT8135_INFRA_SMI_RST 35 +#define MT8135_INFRA_M4U_RST 36 + +/* PERICFG resets */ +#define MT8135_PERI_UART0_SW_RST 0 +#define MT8135_PERI_UART1_SW_RST 1 +#define MT8135_PERI_UART2_SW_RST 2 +#define MT8135_PERI_UART3_SW_RST 3 +#define MT8135_PERI_IRDA_SW_RST 4 +#define MT8135_PERI_PTP_SW_RST 5 +#define MT8135_PERI_AP_HIF_SW_RST 6 +#define MT8135_PERI_GPCU_SW_RST 7 +#define MT8135_PERI_MD_HIF_SW_RST 8 +#define MT8135_PERI_NLI_SW_RST 9 +#define MT8135_PERI_AUXADC_SW_RST 10 +#define MT8135_PERI_DMA_SW_RST 11 +#define MT8135_PERI_NFI_SW_RST 14 +#define MT8135_PERI_PWM_SW_RST 15 +#define MT8135_PERI_THERM_SW_RST 16 +#define MT8135_PERI_MSDC0_SW_RST 17 +#define MT8135_PERI_MSDC1_SW_RST 18 +#define MT8135_PERI_MSDC2_SW_RST 19 +#define MT8135_PERI_MSDC3_SW_RST 20 +#define MT8135_PERI_I2C0_SW_RST 22 +#define MT8135_PERI_I2C1_SW_RST 23 +#define MT8135_PERI_I2C2_SW_RST 24 +#define MT8135_PERI_I2C3_SW_RST 25 +#define MT8135_PERI_I2C4_SW_RST 26 +#define MT8135_PERI_I2C5_SW_RST 27 +#define MT8135_PERI_I2C6_SW_RST 28 +#define MT8135_PERI_USB_SW_RST 29 +#define MT8135_PERI_SPI1_SW_RST 33 +#define MT8135_PERI_PWRAP_BRIDGE_SW_RST 34 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8135 */ diff --git a/include/dt-bindings/reset-controller/mt8173-resets.h b/include/dt-bindings/reset-controller/mt8173-resets.h new file mode 100644 index 000000000000..9464b37cf68c --- /dev/null +++ b/include/dt-bindings/reset-controller/mt8173-resets.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Flora Fu, MediaTek + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT8173 +#define _DT_BINDINGS_RESET_CONTROLLER_MT8173 + +/* INFRACFG resets */ +#define MT8173_INFRA_EMI_REG_RST 0 +#define MT8173_INFRA_DRAMC0_A0_RST 1 +#define MT8173_INFRA_APCIRQ_EINT_RST 3 +#define MT8173_INFRA_APXGPT_RST 4 +#define MT8173_INFRA_SCPSYS_RST 5 +#define MT8173_INFRA_KP_RST 6 +#define MT8173_INFRA_PMIC_WRAP_RST 7 +#define MT8173_INFRA_MPIP_RST 8 +#define MT8173_INFRA_CEC_RST 9 +#define MT8173_INFRA_EMI_RST 32 +#define MT8173_INFRA_DRAMC0_RST 34 +#define MT8173_INFRA_APMIXEDSYS_RST 35 +#define MT8173_INFRA_MIPI_DSI_RST 36 +#define MT8173_INFRA_TRNG_RST 37 +#define MT8173_INFRA_SYSIRQ_RST 38 +#define MT8173_INFRA_MIPI_CSI_RST 39 +#define MT8173_INFRA_GCE_FAXI_RST 40 +#define MT8173_INFRA_MMIOMMURST 47 + + +/* PERICFG resets */ +#define MT8173_PERI_UART0_SW_RST 0 +#define MT8173_PERI_UART1_SW_RST 1 +#define MT8173_PERI_UART2_SW_RST 2 +#define MT8173_PERI_UART3_SW_RST 3 +#define MT8173_PERI_IRRX_SW_RST 4 +#define MT8173_PERI_PWM_SW_RST 8 +#define MT8173_PERI_AUXADC_SW_RST 10 +#define MT8173_PERI_DMA_SW_RST 11 +#define MT8173_PERI_I2C6_SW_RST 13 +#define MT8173_PERI_NFI_SW_RST 14 +#define MT8173_PERI_THERM_SW_RST 16 +#define MT8173_PERI_MSDC2_SW_RST 17 +#define MT8173_PERI_MSDC3_SW_RST 18 +#define MT8173_PERI_MSDC0_SW_RST 19 +#define MT8173_PERI_MSDC1_SW_RST 20 +#define MT8173_PERI_I2C0_SW_RST 22 +#define MT8173_PERI_I2C1_SW_RST 23 +#define MT8173_PERI_I2C2_SW_RST 24 +#define MT8173_PERI_I2C3_SW_RST 25 +#define MT8173_PERI_I2C4_SW_RST 26 +#define MT8173_PERI_HDMI_SW_RST 29 +#define MT8173_PERI_SPI0_SW_RST 33 + +#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8173 */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1618cdfb38c7..c471dfc93b71 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -53,7 +53,7 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) return adev ? adev->handle : NULL; } -#define ACPI_COMPANION(dev) acpi_node((dev)->fwnode) +#define ACPI_COMPANION(dev) to_acpi_node((dev)->fwnode) #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ acpi_fwnode_handle(adev) : NULL) #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) @@ -454,7 +454,7 @@ static inline bool is_acpi_node(struct fwnode_handle *fwnode) return false; } -static inline struct acpi_device *acpi_node(struct fwnode_handle *fwnode) +static inline struct acpi_device *to_acpi_node(struct fwnode_handle *fwnode) { return NULL; } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 30f92cefaa72..9ebee53d3bf5 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -43,9 +43,9 @@ struct ceph_options { int flags; struct ceph_fsid fsid; struct ceph_entity_addr my_addr; - int mount_timeout; - int osd_idle_ttl; - int osd_keepalive_timeout; + unsigned long mount_timeout; /* jiffies */ + unsigned long osd_idle_ttl; /* jiffies */ + unsigned long osd_keepalive_timeout; /* jiffies */ /* * any type that can't be simply compared or doesn't need need @@ -63,9 +63,9 @@ struct ceph_options { /* * defaults */ -#define CEPH_MOUNT_TIMEOUT_DEFAULT 60 -#define CEPH_OSD_KEEPALIVE_DEFAULT 5 -#define CEPH_OSD_IDLE_TTL_DEFAULT 60 +#define CEPH_MOUNT_TIMEOUT_DEFAULT msecs_to_jiffies(60 * 1000) +#define CEPH_OSD_KEEPALIVE_DEFAULT msecs_to_jiffies(5 * 1000) +#define CEPH_OSD_IDLE_TTL_DEFAULT msecs_to_jiffies(60 * 1000) #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) #define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) @@ -93,13 +93,9 @@ enum { CEPH_MOUNT_SHUTDOWN, }; -/* - * subtract jiffies - */ -static inline unsigned long time_sub(unsigned long a, unsigned long b) +static inline unsigned long ceph_timeout_jiffies(unsigned long timeout) { - BUG_ON(time_after(b, a)); - return (long)a - (long)b; + return timeout ?: MAX_SCHEDULE_TIMEOUT; } struct ceph_mds_client; @@ -178,6 +174,7 @@ static inline int calc_pages_for(u64 off, u64 len) extern struct kmem_cache *ceph_inode_cachep; extern struct kmem_cache *ceph_cap_cachep; +extern struct kmem_cache *ceph_cap_flush_cachep; extern struct kmem_cache *ceph_dentry_cachep; extern struct kmem_cache *ceph_file_cachep; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 61b19c46bdb3..7506b485bb6d 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -249,7 +249,7 @@ extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); extern void osd_req_op_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode); + unsigned int which, u16 opcode, u32 flags); extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, unsigned int which, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index df695313f975..78842f46f152 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -31,6 +31,7 @@ #define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ #define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ +#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ struct clk_hw; struct clk_core; @@ -209,7 +210,7 @@ struct clk_ops { struct clk_init_data { const char *name; const struct clk_ops *ops; - const char **parent_names; + const char * const *parent_names; u8 num_parents; unsigned long flags; }; @@ -426,12 +427,14 @@ extern const struct clk_ops clk_mux_ops; extern const struct clk_ops clk_mux_ro_ops; struct clk *clk_register_mux(struct device *dev, const char *name, - const char **parent_names, u8 num_parents, unsigned long flags, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 shift, u8 width, u8 clk_mux_flags, spinlock_t *lock); struct clk *clk_register_mux_table(struct device *dev, const char *name, - const char **parent_names, u8 num_parents, unsigned long flags, + const char * const *parent_names, u8 num_parents, + unsigned long flags, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); @@ -457,7 +460,7 @@ struct clk_fixed_factor { unsigned int div; }; -extern struct clk_ops clk_fixed_factor_ops; +extern const struct clk_ops clk_fixed_factor_ops; struct clk *clk_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned int mult, unsigned int div); @@ -518,7 +521,7 @@ struct clk_composite { }; struct clk *clk_register_composite(struct device *dev, const char *name, - const char **parent_names, int num_parents, + const char * const *parent_names, int num_parents, struct clk_hw *mux_hw, const struct clk_ops *mux_ops, struct clk_hw *rate_hw, const struct clk_ops *rate_ops, struct clk_hw *gate_hw, const struct clk_ops *gate_ops, @@ -589,6 +592,7 @@ long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p); +void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent); static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) { @@ -624,6 +628,8 @@ struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec, void *data); struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data); int of_clk_get_parent_count(struct device_node *np); +int of_clk_parent_fill(struct device_node *np, const char **parents, + unsigned int size); const char *of_clk_get_parent_name(struct device_node *np, int index); void of_clk_init(const struct of_device_id *matches); diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h index 48a1a7d100f1..48b49305716b 100644 --- a/include/linux/crush/crush.h +++ b/include/linux/crush/crush.h @@ -1,7 +1,11 @@ #ifndef CEPH_CRUSH_CRUSH_H #define CEPH_CRUSH_CRUSH_H -#include <linux/types.h> +#ifdef __KERNEL__ +# include <linux/types.h> +#else +# include "crush_compat.h" +#endif /* * CRUSH is a pseudo-random data distribution algorithm that @@ -20,7 +24,11 @@ #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ +#define CRUSH_MAX_RULESET (1<<8) /* max crush ruleset number */ +#define CRUSH_MAX_RULES CRUSH_MAX_RULESET /* should be the same as max rulesets */ +#define CRUSH_MAX_DEVICE_WEIGHT (100u * 0x10000u) +#define CRUSH_MAX_BUCKET_WEIGHT (65535u * 0x10000u) #define CRUSH_ITEM_UNDEF 0x7ffffffe /* undefined result (internal use only) */ #define CRUSH_ITEM_NONE 0x7fffffff /* no result */ @@ -108,6 +116,15 @@ enum { }; extern const char *crush_bucket_alg_name(int alg); +/* + * although tree was a legacy algorithm, it has been buggy, so + * exclude it. + */ +#define CRUSH_LEGACY_ALLOWED_BUCKET_ALGS ( \ + (1 << CRUSH_BUCKET_UNIFORM) | \ + (1 << CRUSH_BUCKET_LIST) | \ + (1 << CRUSH_BUCKET_STRAW)) + struct crush_bucket { __s32 id; /* this'll be negative */ __u16 type; /* non-zero; type=0 is reserved for devices */ @@ -174,7 +191,7 @@ struct crush_map { /* choose local attempts using a fallback permutation before * re-descent */ __u32 choose_local_fallback_tries; - /* choose attempts before giving up */ + /* choose attempts before giving up */ __u32 choose_total_tries; /* attempt chooseleaf inner descent once for firstn mode; on * reject retry outer descent. Note that this does *not* @@ -187,6 +204,25 @@ struct crush_map { * that want to limit reshuffling, a value of 3 or 4 will make the * mappings line up a bit better with previous mappings. */ __u8 chooseleaf_vary_r; + +#ifndef __KERNEL__ + /* + * version 0 (original) of straw_calc has various flaws. version 1 + * fixes a few of them. + */ + __u8 straw_calc_version; + + /* + * allowed bucket algs is a bitmask, here the bit positions + * are CRUSH_BUCKET_*. note that these are *bits* and + * CRUSH_BUCKET_* values are not, so we need to or together (1 + * << CRUSH_BUCKET_WHATEVER). The 0th bit is not used to + * minimize confusion (bucket type values start at 1). + */ + __u32 allowed_bucket_algs; + + __u32 *choose_tries; +#endif }; diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h index 91e884230d5d..d1d90258242e 100644 --- a/include/linux/crush/hash.h +++ b/include/linux/crush/hash.h @@ -1,6 +1,12 @@ #ifndef CEPH_CRUSH_HASH_H #define CEPH_CRUSH_HASH_H +#ifdef __KERNEL__ +# include <linux/types.h> +#else +# include "crush_compat.h" +#endif + #define CRUSH_HASH_RJENKINS1 0 #define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h index eab367446eea..5dfd5b1125d2 100644 --- a/include/linux/crush/mapper.h +++ b/include/linux/crush/mapper.h @@ -8,7 +8,7 @@ * LGPL2 */ -#include <linux/crush/crush.h> +#include "crush.h" extern int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size); extern int crush_do_rule(const struct crush_map *map, diff --git a/include/linux/device.h b/include/linux/device.h index 00ac57c26615..5a31bf3a4024 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -1300,4 +1300,26 @@ static void __exit __driver##_exit(void) \ } \ module_exit(__driver##_exit); +/** + * builtin_driver() - Helper macro for drivers that don't do anything + * special in init and have no exit. This eliminates some boilerplate. + * Each driver may only use this macro once, and calling it replaces + * device_initcall (or in some cases, the legacy __initcall). This is + * meant to be a direct parallel of module_driver() above but without + * the __exit stuff that is not used for builtin cases. + * + * @__driver: driver name + * @__register: register function for this driver type + * @...: Additional arguments to be passed to __register + * + * Use this macro to construct bus specific macros for registering + * drivers, and do not use it on its own. + */ +#define builtin_driver(__driver, __register, ...) \ +static int __init __driver##_init(void) \ +{ \ + return __register(&(__driver) , ##__VA_ARGS__); \ +} \ +device_initcall(__driver##_init); + #endif /* _DEVICE_H_ */ diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1ccaab44abcc..5383bb1394a1 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -119,16 +119,16 @@ extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid); -extern struct gen_pool *dev_get_gen_pool(struct device *dev); +extern struct gen_pool *gen_pool_get(struct device *dev); bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, size_t size); #ifdef CONFIG_OF -extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, +extern struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index); #else -static inline struct gen_pool *of_get_named_gen_pool(struct device_node *np, +static inline struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { return NULL; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 6ba7cf23748f..ad35f300b9a4 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -384,6 +384,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); void drain_all_pages(struct zone *zone); void drain_local_pages(struct zone *zone); +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +void page_alloc_init_late(void); +#else +static inline void page_alloc_init_late(void) +{ +} +#endif + /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what * GFP flags are used before interrupts are enabled. Once interrupts are diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index fd098169fe87..adac255aee86 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -407,6 +407,21 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) return -EINVAL; } +/* Child properties interface */ +struct fwnode_handle; + +static inline struct gpio_desc *fwnode_get_named_gpiod( + struct fwnode_handle *fwnode, const char *propname) +{ + return ERR_PTR(-ENOSYS); +} + +static inline struct gpio_desc *devm_get_gpiod_from_child( + struct device *dev, const char *con_id, struct fwnode_handle *child) +{ + return ERR_PTR(-ENOSYS); +} + #endif /* CONFIG_GPIOLIB */ /* diff --git a/include/linux/init.h b/include/linux/init.h index 21b6d768edd7..7c68c36d3fd8 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -91,14 +91,6 @@ #define __exit __section(.exit.text) __exitused __cold notrace -/* temporary, until all users are removed */ -#define __cpuinit -#define __cpuinitdata -#define __cpuinitconst -#define __cpuexit -#define __cpuexitdata -#define __cpuexitconst - /* Used for MEMORY_HOTPLUG */ #define __meminit __section(.meminit.text) __cold notrace #define __meminitdata __section(.meminit.data) @@ -116,9 +108,6 @@ #define __INITRODATA .section ".init.rodata","a",%progbits #define __FINITDATA .previous -/* temporary, until all users are removed */ -#define __CPUINIT - #define __MEMINIT .section ".meminit.text", "ax" #define __MEMINITDATA .section ".meminit.data", "aw" #define __MEMINITRODATA .section ".meminit.rodata", "a" diff --git a/include/linux/irq.h b/include/linux/irq.h index 812149160d3b..92188b0225bb 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -407,7 +407,6 @@ enum { IRQCHIP_EOI_THREADED = (1 << 6), }; -/* This include will go away once we isolated irq_desc usage to core code */ #include <linux/irqdesc.h> /* diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c52d1480f272..624a668e61f1 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -3,9 +3,6 @@ /* * Core internal functions to deal with irq descriptors - * - * This include will move to kernel/irq once we cleaned up the tree. - * For now it's included from <linux/irq.h> */ struct irq_affinity_notify; @@ -103,6 +100,11 @@ static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) #endif } +static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) +{ + return desc->irq_data.irq; +} + static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) { return &desc->irq_data; @@ -188,6 +190,47 @@ __irq_set_chip_handler_name_locked(unsigned int irq, struct irq_chip *chip, desc->name = name; } +/** + * irq_set_handler_locked - Set irq handler from a locked region + * @data: Pointer to the irq_data structure which identifies the irq + * @handler: Flow control handler function for this interrupt + * + * Sets the handler in the irq descriptor associated to @data. + * + * Must be called with irq_desc locked and valid parameters. Typical + * call site is the irq_set_type() callback. + */ +static inline void irq_set_handler_locked(struct irq_data *data, + irq_flow_handler_t handler) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + desc->handle_irq = handler; +} + +/** + * irq_set_chip_handler_name_locked - Set chip, handler and name from a locked region + * @data: Pointer to the irq_data structure for which the chip is set + * @chip: Pointer to the new irq chip + * @handler: Flow control handler function for this interrupt + * @name: Name of the interrupt + * + * Replace the irq chip at the proper hierarchy level in @data and + * sets the handler and name in the associated irq descriptor. + * + * Must be called with irq_desc locked and valid parameters. + */ +static inline void +irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, + irq_flow_handler_t handler, const char *name) +{ + struct irq_desc *desc = irq_data_to_desc(data); + + desc->handle_irq = handler; + desc->name = name; + data->chip = chip; +} + static inline int irq_balancing_disabled(unsigned int irq) { struct irq_desc *desc; diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index fdd5cc16c9c4..9669bf9d4f48 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -23,12 +23,6 @@ unsigned int irq_get_next_irq(unsigned int offset); ; \ else -#ifdef CONFIG_SMP -#define irq_node(irq) (irq_get_irq_data(irq)->node) -#else -#define irq_node(irq) 0 -#endif - # define for_each_active_irq(irq) \ for (irq = irq_get_next_irq(0); irq < nr_irqs; \ irq = irq_get_next_irq(irq + 1)) diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cfa9351c7536..5f0be58640ea 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -439,6 +439,9 @@ extern int panic_on_unrecovered_nmi; extern int panic_on_io_nmi; extern int panic_on_warn; extern int sysctl_panic_on_stackoverflow; + +extern bool crash_kexec_post_notifiers; + /* * Only to be used by arch init code. If the user over-wrote the default * CONFIG_PANIC_TIMEOUT, honor it. diff --git a/include/linux/leds.h b/include/linux/leds.h index 9a2b000094cf..b122eeafb5dc 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -12,6 +12,7 @@ #ifndef __LINUX_LEDS_H_INCLUDED #define __LINUX_LEDS_H_INCLUDED +#include <linux/device.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/rwsem.h> @@ -222,6 +223,11 @@ struct led_trigger { struct list_head next_trig; }; +ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count); +ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, + char *buf); + /* Registration functions for complex triggers */ extern int led_trigger_register(struct led_trigger *trigger); extern void led_trigger_unregister(struct led_trigger *trigger); @@ -238,6 +244,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger, unsigned long *delay_on, unsigned long *delay_off, int invert); +extern void led_trigger_set_default(struct led_classdev *led_cdev); +extern void led_trigger_set(struct led_classdev *led_cdev, + struct led_trigger *trigger); +extern void led_trigger_remove(struct led_classdev *led_cdev); + +static inline void *led_get_trigger_data(struct led_classdev *led_cdev) +{ + return led_cdev->trigger_data; +} + /** * led_trigger_rename_static - rename a trigger * @name: the new trigger name @@ -267,6 +283,15 @@ static inline void led_trigger_register_simple(const char *name, static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} static inline void led_trigger_event(struct led_trigger *trigger, enum led_brightness event) {} +static inline void led_trigger_set_default(struct led_classdev *led_cdev) {} +static inline void led_trigger_set(struct led_classdev *led_cdev, + struct led_trigger *trigger) {} +static inline void led_trigger_remove(struct led_classdev *led_cdev) {} +static inline void *led_get_trigger_data(struct led_classdev *led_cdev) +{ + return NULL; +} + #endif /* CONFIG_LEDS_TRIGGERS */ /* Trigger specific functions */ diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h index 01508c7b8c81..2a663c6bb428 100644 --- a/include/linux/libfdt_env.h +++ b/include/linux/libfdt_env.h @@ -5,6 +5,10 @@ #include <asm/byteorder.h> +typedef __be16 fdt16_t; +typedef __be32 fdt32_t; +typedef __be64 fdt64_t; + #define fdt32_to_cpu(x) be32_to_cpu(x) #define cpu_to_fdt32(x) cpu_to_be32(x) #define fdt64_to_cpu(x) be64_to_cpu(x) diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 0215ffd63069..cc4b01972060 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -101,6 +101,9 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, struct memblock_type *type_b, phys_addr_t *out_start, phys_addr_t *out_end, int *out_nid); +void __next_reserved_mem_region(u64 *idx, phys_addr_t *out_start, + phys_addr_t *out_end); + /** * for_each_mem_range - iterate through memblock areas from type_a and not * included in type_b. Or just type_a if type_b is NULL. @@ -142,6 +145,21 @@ void __next_mem_range_rev(u64 *idx, int nid, ulong flags, __next_mem_range_rev(&i, nid, flags, type_a, type_b, \ p_start, p_end, p_nid)) +/** + * for_each_reserved_mem_region - iterate over all reserved memblock areas + * @i: u64 used as loop variable + * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL + * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL + * + * Walks over reserved areas of memblock. Available as soon as memblock + * is initialized. + */ +#define for_each_reserved_mem_region(i, p_start, p_end) \ + for (i = 0UL, \ + __next_reserved_mem_region(&i, p_start, p_end); \ + i != (u64)ULLONG_MAX; \ + __next_reserved_mem_region(&i, p_start, p_end)) + #ifdef CONFIG_MOVABLE_NODE static inline bool memblock_is_hotpluggable(struct memblock_region *m) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 99959a34f4f1..2e872f92dbac 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1635,6 +1635,8 @@ extern void free_highmem_page(struct page *page); extern void adjust_managed_page_count(struct page *page, long count); extern void mem_init_print_info(const char *str); +extern void reserve_bootmem_region(unsigned long start, unsigned long end); + /* Free the reserved page into the buddy system, so it gets managed. */ static inline void __free_reserved_page(struct page *page) { @@ -1724,7 +1726,8 @@ extern void sparse_memory_present_with_active_regions(int nid); #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \ !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) -static inline int __early_pfn_to_nid(unsigned long pfn) +static inline int __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) { return 0; } @@ -1732,7 +1735,8 @@ static inline int __early_pfn_to_nid(unsigned long pfn) /* please see mm/page_alloc.c */ extern int __meminit early_pfn_to_nid(unsigned long pfn); /* there is a per-arch backend function. */ -extern int __meminit __early_pfn_to_nid(unsigned long pfn); +extern int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 54d74f6eb233..754c25966a0a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -762,6 +762,14 @@ typedef struct pglist_data { /* Number of pages migrated during the rate limiting time interval */ unsigned long numabalancing_migrate_nr_pages; #endif + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT + /* + * If memory initialisation on large machines is deferred then this + * is the first PFN that needs to be initialised. + */ + unsigned long first_deferred_pfn; +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ } pg_data_t; #define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages) @@ -1216,11 +1224,16 @@ void sparse_init(void); #define sparse_index_init(_sec, _nid) do {} while (0) #endif /* CONFIG_SPARSEMEM */ -#ifdef CONFIG_NODES_SPAN_OTHER_NODES -bool early_pfn_in_nid(unsigned long pfn, int nid); -#else -#define early_pfn_in_nid(pfn, nid) (1) -#endif +/* + * During memory init memblocks map pfns to nids. The search is expensive and + * this caches recent lookups. The implementation of __early_pfn_to_nid + * may treat start/end as pfns or sections. + */ +struct mminit_pfnnid_cache { + unsigned long last_start; + unsigned long last_end; + int last_nid; +}; #ifndef early_pfn_valid #define early_pfn_valid(pfn) (1) diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 32201c269890..b8e72aad919c 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -500,6 +500,7 @@ enum { NFSPROC4_CLNT_SEEK, NFSPROC4_CLNT_ALLOCATE, NFSPROC4_CLNT_DEALLOCATE, + NFSPROC4_CLNT_LAYOUTSTATS, }; /* nfs41 types */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index b95f914ce083..f91b5ade30c9 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -219,6 +219,7 @@ struct nfs_inode { #define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ +#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 5e1273d4de14..a2ea1491d3df 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -237,5 +237,6 @@ struct nfs_server { #define NFS_CAP_SEEK (1U << 19) #define NFS_CAP_ALLOCATE (1U << 20) #define NFS_CAP_DEALLOCATE (1U << 21) +#define NFS_CAP_LAYOUTSTATS (1U << 22) #endif diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 3eb072dbce83..f2f650f136ee 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -67,7 +67,6 @@ struct nfs_rw_ops { const fmode_t rw_mode; struct nfs_pgio_header *(*rw_alloc_header)(void); void (*rw_free_header)(struct nfs_pgio_header *); - void (*rw_release)(struct nfs_pgio_header *); int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *, struct inode *); void (*rw_result)(struct rpc_task *, struct nfs_pgio_header *); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 93ab6071bbe9..7bbe50504211 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -316,6 +316,49 @@ struct nfs4_layoutreturn { int rpc_status; }; +#define PNFS_LAYOUTSTATS_MAXSIZE 256 + +struct nfs42_layoutstat_args; +struct nfs42_layoutstat_devinfo; +typedef void (*layoutstats_encode_t)(struct xdr_stream *, + struct nfs42_layoutstat_args *, + struct nfs42_layoutstat_devinfo *); + +/* Per file per deviceid layoutstats */ +struct nfs42_layoutstat_devinfo { + struct nfs4_deviceid dev_id; + __u64 offset; + __u64 length; + __u64 read_count; + __u64 read_bytes; + __u64 write_count; + __u64 write_bytes; + __u32 layout_type; + layoutstats_encode_t layoutstats_encode; + void *layout_private; +}; + +struct nfs42_layoutstat_args { + struct nfs4_sequence_args seq_args; + struct nfs_fh *fh; + struct inode *inode; + nfs4_stateid stateid; + int num_dev; + struct nfs42_layoutstat_devinfo *devinfo; +}; + +struct nfs42_layoutstat_res { + struct nfs4_sequence_res seq_res; + int num_dev; + int rpc_status; +}; + +struct nfs42_layoutstat_data { + struct inode *inode; + struct nfs42_layoutstat_args args; + struct nfs42_layoutstat_res res; +}; + struct stateowner_id { __u64 create_time; __u32 uniquifier; @@ -984,17 +1027,14 @@ struct nfs4_readlink_res { struct nfs4_sequence_res seq_res; }; -#define NFS4_SETCLIENTID_NAMELEN (127) struct nfs4_setclientid { const nfs4_verifier * sc_verifier; - unsigned int sc_name_len; - char sc_name[NFS4_SETCLIENTID_NAMELEN + 1]; u32 sc_prog; unsigned int sc_netid_len; char sc_netid[RPCBIND_MAXNETIDLEN + 1]; unsigned int sc_uaddr_len; char sc_uaddr[RPCBIND_MAXUADDRLEN + 1]; - u32 sc_cb_ident; + struct nfs_client *sc_clnt; struct rpc_cred *sc_cred; }; @@ -1142,12 +1182,9 @@ struct nfs41_state_protection { struct nfs4_op_map allow; }; -#define NFS4_EXCHANGE_ID_LEN (48) struct nfs41_exchange_id_args { struct nfs_client *client; nfs4_verifier *verifier; - unsigned int id_len; - char id[NFS4_EXCHANGE_ID_LEN]; u32 flags; struct nfs41_state_protection state_protect; }; diff --git a/include/linux/of.h b/include/linux/of.h index b871ff9d81d7..edc068d19c79 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -120,6 +120,12 @@ extern struct device_node *of_aliases; extern struct device_node *of_stdout; extern raw_spinlock_t devtree_lock; +/* flag descriptions (need to be visible even when !CONFIG_OF) */ +#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ +#define OF_DETACHED 2 /* node has been detached from the device tree */ +#define OF_POPULATED 3 /* device already created for the node */ +#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ + #ifdef CONFIG_OF void of_core_init(void); @@ -128,7 +134,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode) return fwnode && fwnode->type == FWNODE_OF; } -static inline struct device_node *of_node(struct fwnode_handle *fwnode) +static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) { return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; } @@ -219,12 +225,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif -/* flag descriptions */ -#define OF_DYNAMIC 1 /* node and properties were allocated via kmalloc */ -#define OF_DETACHED 2 /* node has been detached from the device tree */ -#define OF_POPULATED 3 /* device already created for the node */ -#define OF_POPULATED_BUS 4 /* of_platform_populate recursed to children of this node */ - #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags) #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags) @@ -387,7 +387,7 @@ static inline bool is_of_node(struct fwnode_handle *fwnode) return false; } -static inline struct device_node *of_node(struct fwnode_handle *fwnode) +static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) { return NULL; } @@ -428,6 +428,11 @@ static inline struct device_node *of_find_node_opts_by_path(const char *path, return NULL; } +static inline struct device_node *of_find_node_by_phandle(phandle handle) +{ + return NULL; +} + static inline struct device_node *of_get_parent(const struct device_node *node) { return NULL; @@ -673,7 +678,10 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag #if defined(CONFIG_OF) && defined(CONFIG_NUMA) extern int of_node_to_nid(struct device_node *np); #else -static inline int of_node_to_nid(struct device_node *device) { return 0; } +static inline int of_node_to_nid(struct device_node *device) +{ + return NUMA_NO_NODE; +} #endif static inline struct device_node *of_find_matching_node( @@ -821,7 +829,7 @@ static inline int of_property_read_string_index(struct device_node *np, * @propname: name of the property to be searched. * * Search for a property in a device node. - * Returns true if the property exist false otherwise. + * Returns true if the property exists false otherwise. */ static inline bool of_property_read_bool(const struct device_node *np, const char *propname) diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 22801b10cef5..4c508549833a 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -33,6 +33,8 @@ extern int of_device_add(struct platform_device *pdev); extern int of_device_register(struct platform_device *ofdev); extern void of_device_unregister(struct platform_device *ofdev); +extern const void *of_device_get_match_data(const struct device *dev); + extern ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len); @@ -65,6 +67,11 @@ static inline int of_driver_match_device(struct device *dev, static inline void of_device_uevent(struct device *dev, struct kobj_uevent_env *env) { } +static inline const void *of_device_get_match_data(const struct device *dev) +{ + return NULL; +} + static inline int of_device_get_modalias(struct device *dev, char *str, ssize_t len) { diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index fd627a58068f..df9ef3801812 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -37,7 +37,7 @@ extern bool of_fdt_is_big_endian(const void *blob, unsigned long node); extern int of_fdt_match(const void *blob, unsigned long node, const char *const *compat); -extern void of_fdt_unflatten_tree(unsigned long *blob, +extern void of_fdt_unflatten_tree(const unsigned long *blob, struct device_node **mynodes); /* TBD: Temporary export of fdt globals - remove when code fully merged */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 58f1e75ba105..bba08f44cc97 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -222,6 +222,15 @@ static inline void platform_set_drvdata(struct platform_device *pdev, module_driver(__platform_driver, platform_driver_register, \ platform_driver_unregister) +/* builtin_platform_driver() - Helper macro for builtin drivers that + * don't do anything special in driver init. This eliminates some + * boilerplate. Each driver may only use this macro once, and + * calling it replaces device_initcall(). Note this is meant to be + * a parallel of module_platform_driver() above, but w/o _exit stuff. + */ +#define builtin_platform_driver(__platform_driver) \ + builtin_driver(__platform_driver, platform_driver_register) + /* module_platform_driver_probe() - Helper macro for drivers that don't do * anything special in module init/exit. This eliminates a lot of * boilerplate. Each module may only use this macro once, and @@ -240,6 +249,20 @@ static void __exit __platform_driver##_exit(void) \ } \ module_exit(__platform_driver##_exit); +/* builtin_platform_driver_probe() - Helper macro for drivers that don't do + * anything special in device init. This eliminates some boilerplate. Each + * driver may only use this macro once, and using it replaces device_initcall. + * This is meant to be a parallel of module_platform_driver_probe above, but + * without the __exit parts. + */ +#define builtin_platform_driver_probe(__platform_driver, __platform_probe) \ +static int __init __platform_driver##_init(void) \ +{ \ + return platform_driver_probe(&(__platform_driver), \ + __platform_probe); \ +} \ +device_initcall(__platform_driver##_init); \ + #define platform_create_bundle(driver, probe, res, n_res, data, size) \ __platform_create_bundle(driver, probe, res, n_res, data, size, THIS_MODULE) extern struct platform_device *__platform_create_bundle( diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 50a8486c524b..9b1ef0c820a7 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -265,13 +265,16 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, unsigned long offset, unsigned long size, gfp_t gfp_mask); +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer); + size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen); + const void *buf, size_t buflen); size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen); size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip); + const void *buf, size_t buflen, off_t skip); size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, size_t buflen, off_t skip); diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index 2ca67b55e0fe..8df43c9f11dc 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -37,7 +37,6 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied); void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); -int bc_send(struct rpc_rqst *req); /* * Determine if a shared backchannel is in use diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 598ba80ec30c..131032f15cc1 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -56,6 +56,7 @@ struct rpc_clnt { struct rpc_rtt * cl_rtt; /* RTO estimator data */ const struct rpc_timeout *cl_timeout; /* Timeout strategy */ + atomic_t cl_swapper; /* swapfile count */ int cl_nodelen; /* nodename length */ char cl_nodename[UNX_MAXNODENAME+1]; struct rpc_pipe_dir_head cl_pipedir_objects; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 5f1e6bd4c316..d703f0ef37d8 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -205,8 +205,7 @@ struct rpc_wait_queue { */ struct rpc_task *rpc_new_task(const struct rpc_task_setup *); struct rpc_task *rpc_run_task(const struct rpc_task_setup *); -struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, - const struct rpc_call_ops *ops); +struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req); void rpc_put_task(struct rpc_task *); void rpc_put_task_async(struct rpc_task *); void rpc_exit_task(struct rpc_task *); @@ -269,4 +268,20 @@ static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, } #endif +#if IS_ENABLED(CONFIG_SUNRPC_SWAP) +int rpc_clnt_swap_activate(struct rpc_clnt *clnt); +void rpc_clnt_swap_deactivate(struct rpc_clnt *clnt); +#else +static inline int +rpc_clnt_swap_activate(struct rpc_clnt *clnt) +{ + return -EINVAL; +} + +static inline void +rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) +{ +} +#endif /* CONFIG_SUNRPC_SWAP */ + #endif /* _LINUX_SUNRPC_SCHED_H_ */ diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 8b93ef53df3c..0fb9acbb4780 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -133,6 +133,9 @@ struct rpc_xprt_ops { void (*close)(struct rpc_xprt *xprt); void (*destroy)(struct rpc_xprt *xprt); void (*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq); + int (*enable_swap)(struct rpc_xprt *xprt); + void (*disable_swap)(struct rpc_xprt *xprt); + void (*inject_disconnect)(struct rpc_xprt *xprt); }; /* @@ -180,7 +183,7 @@ struct rpc_xprt { atomic_t num_reqs; /* total slots */ unsigned long state; /* transport state */ unsigned char resvport : 1; /* use a reserved port */ - unsigned int swapper; /* we're swapping over this + atomic_t swapper; /* we're swapping over this transport */ unsigned int bind_index; /* bind function index */ @@ -212,7 +215,8 @@ struct rpc_xprt { #if defined(CONFIG_SUNRPC_BACKCHANNEL) struct svc_serv *bc_serv; /* The RPC service which will */ /* process the callback */ - unsigned int bc_alloc_count; /* Total number of preallocs */ + int bc_alloc_count; /* Total number of preallocs */ + atomic_t bc_free_slots; spinlock_t bc_pa_lock; /* Protects the preallocated * items */ struct list_head bc_pa_list; /* List of preallocated @@ -241,6 +245,7 @@ struct rpc_xprt { const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *debugfs; /* debugfs directory */ + atomic_t inject_disconnect; #endif }; @@ -327,6 +332,18 @@ static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 * return p + xprt->tsh_size; } +static inline int +xprt_enable_swap(struct rpc_xprt *xprt) +{ + return xprt->ops->enable_swap(xprt); +} + +static inline void +xprt_disable_swap(struct rpc_xprt *xprt) +{ + xprt->ops->disable_swap(xprt); +} + /* * Transport switch helper functions */ @@ -345,7 +362,6 @@ void xprt_release_rqst_cong(struct rpc_task *task); void xprt_disconnect_done(struct rpc_xprt *xprt); void xprt_force_disconnect(struct rpc_xprt *xprt); void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie); -int xs_swapper(struct rpc_xprt *xprt, int enable); bool xprt_lock_connect(struct rpc_xprt *, struct rpc_task *, void *); void xprt_unlock_connect(struct rpc_xprt *, void *); @@ -431,6 +447,23 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) return test_and_set_bit(XPRT_BINDING, &xprt->state); } +#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) +extern unsigned int rpc_inject_disconnect; +static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) +{ + if (!rpc_inject_disconnect) + return; + if (atomic_dec_return(&xprt->inject_disconnect)) + return; + atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); + xprt->ops->inject_disconnect(xprt); +} +#else +static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) +{ +} +#endif + #endif /* __KERNEL__*/ #endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index c984c85981ea..b17613052cc3 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h @@ -56,7 +56,8 @@ #define RPCRDMA_INLINE_PAD_THRESH (512)/* payload threshold to pad (bytes) */ -/* memory registration strategies */ +/* Memory registration strategies, by number. + * This is part of a kernel / user space API. Do not remove. */ enum rpcrdma_memreg { RPCRDMA_BOUNCEBUFFERS = 0, RPCRDMA_REGISTER, diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index a746bf5216f8..f47feada5b42 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -65,6 +65,8 @@ struct watchdog_ops { * @driver-data:Pointer to the drivers private data. * @lock: Lock for watchdog core internal use only. * @status: Field that contains the devices internal status bits. + * @deferred: entry in wtd_deferred_reg_list which is used to + * register early initialized watchdogs. * * The watchdog_device structure contains all information about a * watchdog timer device. @@ -95,6 +97,7 @@ struct watchdog_device { #define WDOG_ALLOW_RELEASE 2 /* Did we receive the magic char ? */ #define WDOG_NO_WAY_OUT 3 /* Is 'nowayout' feature set ? */ #define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ + struct list_head deferred; }; #define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT) diff --git a/include/media/v4l2-flash-led-class.h b/include/media/v4l2-flash-led-class.h new file mode 100644 index 000000000000..098236c083b8 --- /dev/null +++ b/include/media/v4l2-flash-led-class.h @@ -0,0 +1,148 @@ +/* + * V4L2 flash LED sub-device registration helpers. + * + * Copyright (C) 2015 Samsung Electronics Co., Ltd + * Author: Jacek Anaszewski <[email protected]> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _V4L2_FLASH_H +#define _V4L2_FLASH_H + +#include <media/v4l2-ctrls.h> +#include <media/v4l2-subdev.h> + +struct led_classdev_flash; +struct led_classdev; +struct v4l2_flash; +enum led_brightness; + +/* + * struct v4l2_flash_ctrl_data - flash control initialization data, filled + * basing on the features declared by the LED flash + * class driver in the v4l2_flash_config + * @config: initialization data for a control + * @cid: contains v4l2 flash control id if the config + * field was initialized, 0 otherwise + */ +struct v4l2_flash_ctrl_data { + struct v4l2_ctrl_config config; + u32 cid; +}; + +struct v4l2_flash_ops { + /* setup strobing the flash by hardware pin state assertion */ + int (*external_strobe_set)(struct v4l2_flash *v4l2_flash, + bool enable); + /* convert intensity to brightness in a device specific manner */ + enum led_brightness (*intensity_to_led_brightness) + (struct v4l2_flash *v4l2_flash, s32 intensity); + /* convert brightness to intensity in a device specific manner */ + s32 (*led_brightness_to_intensity) + (struct v4l2_flash *v4l2_flash, enum led_brightness); +}; + +/** + * struct v4l2_flash_config - V4L2 Flash sub-device initialization data + * @dev_name: the name of the media entity, + unique in the system + * @torch_intensity: constraints for the LED in torch mode + * @indicator_intensity: constraints for the indicator LED + * @flash_faults: bitmask of flash faults that the LED flash class + device can report; corresponding LED_FAULT* bit + definitions are available in the header file + <linux/led-class-flash.h> + * @has_external_strobe: external strobe capability + */ +struct v4l2_flash_config { + char dev_name[32]; + struct led_flash_setting torch_intensity; + struct led_flash_setting indicator_intensity; + u32 flash_faults; + unsigned int has_external_strobe:1; +}; + +/** + * struct v4l2_flash - Flash sub-device context + * @fled_cdev: LED flash class device controlled by this sub-device + * @iled_cdev: LED class device representing indicator LED associated + * with the LED flash class device + * @ops: V4L2 specific flash ops + * @sd: V4L2 sub-device + * @hdl: flash controls handler + * @ctrls: array of pointers to controls, whose values define + * the sub-device state + */ +struct v4l2_flash { + struct led_classdev_flash *fled_cdev; + struct led_classdev_flash *iled_cdev; + const struct v4l2_flash_ops *ops; + + struct v4l2_subdev sd; + struct v4l2_ctrl_handler hdl; + struct v4l2_ctrl **ctrls; +}; + +static inline struct v4l2_flash *v4l2_subdev_to_v4l2_flash( + struct v4l2_subdev *sd) +{ + return container_of(sd, struct v4l2_flash, sd); +} + +static inline struct v4l2_flash *v4l2_ctrl_to_v4l2_flash(struct v4l2_ctrl *c) +{ + return container_of(c->handler, struct v4l2_flash, hdl); +} + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) +/** + * v4l2_flash_init - initialize V4L2 flash led sub-device + * @dev: flash device, e.g. an I2C device + * @of_node: of_node of the LED, may be NULL if the same as device's + * @fled_cdev: LED flash class device to wrap + * @iled_cdev: LED flash class device representing indicator LED associated + * with fled_cdev, may be NULL + * @flash_ops: V4L2 Flash device ops + * @config: initialization data for V4L2 Flash sub-device + * + * Create V4L2 Flash sub-device wrapping given LED subsystem device. + * + * Returns: A valid pointer, or, when an error occurs, the return + * value is encoded using ERR_PTR(). Use IS_ERR() to check and + * PTR_ERR() to obtain the numeric return value. + */ +struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct device_node *of_node, + struct led_classdev_flash *fled_cdev, + struct led_classdev_flash *iled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config); + +/** + * v4l2_flash_release - release V4L2 Flash sub-device + * @flash: the V4L2 Flash sub-device to release + * + * Release V4L2 Flash sub-device. + */ +void v4l2_flash_release(struct v4l2_flash *v4l2_flash); + +#else +static inline struct v4l2_flash *v4l2_flash_init( + struct device *dev, struct device_node *of_node, + struct led_classdev_flash *fled_cdev, + struct led_classdev_flash *iled_cdev, + const struct v4l2_flash_ops *ops, + struct v4l2_flash_config *config) +{ + return NULL; +} + +static inline void v4l2_flash_release(struct v4l2_flash *v4l2_flash) +{ +} +#endif /* CONFIG_V4L2_FLASH_LED_CLASS */ + +#endif /* _V4L2_FLASH_H */ diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index dc20102ff600..4e18318eb425 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -605,6 +605,8 @@ struct v4l2_subdev { struct video_device *devnode; /* pointer to the physical device, if any */ struct device *dev; + /* The device_node of the subdev, usually the same as dev->of_node. */ + struct device_node *of_node; /* Links this subdev to a global subdev_list or @notifier->done list. */ struct list_head async_list; /* Pointer to respective struct v4l2_async_subdev. */ diff --git a/include/net/ax25.h b/include/net/ax25.h index 16a923a3a43a..e602f8177ebf 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/atomic.h> #include <net/neighbour.h> +#include <net/sock.h> #define AX25_T1CLAMPLO 1 #define AX25_T1CLAMPHI (30 * HZ) @@ -246,7 +247,20 @@ typedef struct ax25_cb { atomic_t refcount; } ax25_cb; -#define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) +struct ax25_sock { + struct sock sk; + struct ax25_cb *cb; +}; + +static inline struct ax25_sock *ax25_sk(const struct sock *sk) +{ + return (struct ax25_sock *) sk; +} + +static inline struct ax25_cb *sk_to_ax25(const struct sock *sk) +{ + return ax25_sk(sk)->cb; +} #define ax25_for_each(__ax25, list) \ hlist_for_each_entry(__ax25, list, ax25_node) diff --git a/include/net/sock.h b/include/net/sock.h index 14d539c040d7..05a8c1aea251 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -277,7 +277,6 @@ struct cg_proto; * @sk_incoming_cpu: record cpu processing incoming packets * @sk_txhash: computed flow hash for use on transmit * @sk_filter: socket filtering instructions - * @sk_protinfo: private area, net family specific, when not using slab * @sk_timer: sock cleanup timer * @sk_stamp: time stamp of last packet received * @sk_tsflags: SO_TIMESTAMPING socket options @@ -416,7 +415,6 @@ struct sock { const struct cred *sk_peer_cred; long sk_rcvtimeo; long sk_sndtimeo; - void *sk_protinfo; struct timer_list sk_timer; ktime_t sk_stamp; u16 sk_tsflags; diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index d3f4832db289..b6fce900a833 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -313,6 +313,9 @@ struct drm_amdgpu_gem_op { #define AMDGPU_VA_OP_MAP 1 #define AMDGPU_VA_OP_UNMAP 2 +/* Delay the page table update till the next CS */ +#define AMDGPU_VM_DELAY_UPDATE (1 << 0) + /* Mapping flags */ /* readable mapping */ #define AMDGPU_VM_PAGE_READABLE (1 << 1) @@ -348,6 +351,7 @@ struct drm_amdgpu_gem_va { #define AMDGPU_CHUNK_ID_IB 0x01 #define AMDGPU_CHUNK_ID_FENCE 0x02 +#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 struct drm_amdgpu_cs_chunk { uint32_t chunk_id; @@ -399,6 +403,14 @@ struct drm_amdgpu_cs_chunk_ib { uint32_t ring; }; +struct drm_amdgpu_cs_chunk_dep { + uint32_t ip_type; + uint32_t ip_instance; + uint32_t ring; + uint32_t ctx_id; + uint64_t handle; +}; + struct drm_amdgpu_cs_chunk_fence { uint32_t handle; uint32_t offset; diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index 25084a052a1e..c9aca042e61d 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -755,4 +755,7 @@ struct fuse_notify_retrieve_in { uint64_t dummy4; }; +/* Device ioctls: */ +#define FUSE_DEV_IOC_CLONE _IOR(229, 0, uint32_t) + #endif /* _LINUX_FUSE_H */ diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index 83d6236a2f08..eaf94919291a 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -19,8 +19,10 @@ #define _UAPI_LINUX_IN_H #include <linux/types.h> +#include <linux/libc-compat.h> #include <linux/socket.h> +#if __UAPI_DEF_IN_IPPROTO /* Standard well-defined IP protocols. */ enum { IPPROTO_IP = 0, /* Dummy protocol for TCP */ @@ -75,12 +77,14 @@ enum { #define IPPROTO_RAW IPPROTO_RAW IPPROTO_MAX }; +#endif - +#if __UAPI_DEF_IN_ADDR /* Internet address. */ struct in_addr { __be32 s_addr; }; +#endif #define IP_TOS 1 #define IP_TTL 2 @@ -158,6 +162,7 @@ struct in_addr { /* Request struct for multicast socket ops */ +#if __UAPI_DEF_IP_MREQ struct ip_mreq { struct in_addr imr_multiaddr; /* IP multicast address of group */ struct in_addr imr_interface; /* local IP address of interface */ @@ -209,14 +214,18 @@ struct group_filter { #define GROUP_FILTER_SIZE(numsrc) \ (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \ + (numsrc) * sizeof(struct __kernel_sockaddr_storage)) +#endif +#if __UAPI_DEF_IN_PKTINFO struct in_pktinfo { int ipi_ifindex; struct in_addr ipi_spec_dst; struct in_addr ipi_addr; }; +#endif /* Structure describing an Internet (IP) socket address. */ +#if __UAPI_DEF_SOCKADDR_IN #define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */ struct sockaddr_in { __kernel_sa_family_t sin_family; /* Address family */ @@ -228,8 +237,9 @@ struct sockaddr_in { sizeof(unsigned short int) - sizeof(struct in_addr)]; }; #define sin_zero __pad /* for BSD UNIX comp. -FvK */ +#endif - +#if __UAPI_DEF_IN_CLASS /* * Definitions of the bits in an Internet address integer. * On subnets, host and network parts are found according @@ -280,7 +290,7 @@ struct sockaddr_in { #define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */ #define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */ #define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */ - +#endif /* <asm/byteorder.h> contains the htonl type stuff.. */ #include <asm/byteorder.h> diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h index fa673e9cc040..7d024ceb075d 100644 --- a/include/uapi/linux/libc-compat.h +++ b/include/uapi/linux/libc-compat.h @@ -56,6 +56,13 @@ /* GLIBC headers included first so don't define anything * that would already be defined. */ +#define __UAPI_DEF_IN_ADDR 0 +#define __UAPI_DEF_IN_IPPROTO 0 +#define __UAPI_DEF_IN_PKTINFO 0 +#define __UAPI_DEF_IP_MREQ 0 +#define __UAPI_DEF_SOCKADDR_IN 0 +#define __UAPI_DEF_IN_CLASS 0 + #define __UAPI_DEF_IN6_ADDR 0 /* The exception is the in6_addr macros which must be defined * if the glibc code didn't define them. This guard matches @@ -78,6 +85,13 @@ /* Linux headers included first, and we must define everything * we need. The expectation is that glibc will check the * __UAPI_DEF_* defines and adjust appropriately. */ +#define __UAPI_DEF_IN_ADDR 1 +#define __UAPI_DEF_IN_IPPROTO 1 +#define __UAPI_DEF_IN_PKTINFO 1 +#define __UAPI_DEF_IP_MREQ 1 +#define __UAPI_DEF_SOCKADDR_IN 1 +#define __UAPI_DEF_IN_CLASS 1 + #define __UAPI_DEF_IN6_ADDR 1 /* We unconditionally define the in6_addr macros and glibc must * coordinate. */ @@ -103,6 +117,14 @@ * that we need. */ #else /* !defined(__GLIBC__) */ +/* Definitions for in.h */ +#define __UAPI_DEF_IN_ADDR 1 +#define __UAPI_DEF_IN_IPPROTO 1 +#define __UAPI_DEF_IN_PKTINFO 1 +#define __UAPI_DEF_IP_MREQ 1 +#define __UAPI_DEF_SOCKADDR_IN 1 +#define __UAPI_DEF_IN_CLASS 1 + /* Definitions for in6.h */ #define __UAPI_DEF_IN6_ADDR 1 #define __UAPI_DEF_IN6_ADDR_ALT 1 diff --git a/init/main.c b/init/main.c index c599aea23bb1..c5d5626289ce 100644 --- a/init/main.c +++ b/init/main.c @@ -1004,6 +1004,8 @@ static noinline void __init kernel_init_freeable(void) smp_init(); sched_init_smp(); + page_alloc_init_late(); + do_basic_setup(); /* Open the /dev/console on the rootfs, this should never fail */ diff --git a/ipc/msg.c b/ipc/msg.c index 2b6fdbb9e0e9..66c4f567eb73 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -76,7 +76,7 @@ struct msg_sender { static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -196,7 +196,7 @@ static void expunge_all(struct msg_queue *msq, int res) * or dealing with -EAGAIN cases. See lockless receive part 1 * and 2 in do_msgrcv(). */ - smp_mb(); + smp_wmb(); /* barrier (B) */ msr->r_msg = ERR_PTR(res); } } @@ -580,7 +580,8 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) /* initialize pipelined send ordering */ msr->r_msg = NULL; wake_up_process(msr->r_tsk); - smp_mb(); /* see barrier comment below */ + /* barrier (B) see barrier comment below */ + smp_wmb(); msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; @@ -589,11 +590,12 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) wake_up_process(msr->r_tsk); /* * Ensure that the wakeup is visible before - * setting r_msg, as the receiving end depends - * on it. See lockless receive part 1 and 2 in - * do_msgrcv(). + * setting r_msg, as the receiving can otherwise + * exit - once r_msg is set, the receiver can + * continue. See lockless receive part 1 and 2 + * in do_msgrcv(). Barrier (B). */ - smp_mb(); + smp_wmb(); msr->r_msg = msg; return 1; @@ -932,12 +934,38 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl /* Lockless receive, part 2: * Wait until pipelined_send or expunge_all are outside of * wake_up_process(). There is a race with exit(), see - * ipc/mqueue.c for the details. + * ipc/mqueue.c for the details. The correct serialization + * ensures that a receiver cannot continue without the wakeup + * being visibible _before_ setting r_msg: + * + * CPU 0 CPU 1 + * <loop receiver> + * smp_rmb(); (A) <-- pair -. <waker thread> + * <load ->r_msg> | msr->r_msg = NULL; + * | wake_up_process(); + * <continue> `------> smp_wmb(); (B) + * msr->r_msg = msg; + * + * Where (A) orders the message value read and where (B) orders + * the write to the r_msg -- done in both pipelined_send and + * expunge_all. */ - msg = (struct msg_msg *)msr_d.r_msg; - while (msg == NULL) { - cpu_relax(); + for (;;) { + /* + * Pairs with writer barrier in pipelined_send + * or expunge_all. + */ + smp_rmb(); /* barrier (A) */ msg = (struct msg_msg *)msr_d.r_msg; + if (msg) + break; + + /* + * The cpu_relax() call is a compiler barrier + * which forces everything in this loop to be + * re-loaded. + */ + cpu_relax(); } /* Lockless receive, part 3: diff --git a/ipc/sem.c b/ipc/sem.c index d1a6edd17eba..bc3d530cb23e 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -391,7 +391,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp; struct sem_array *sma; - ipcp = ipc_obtain_object(&sem_ids(ns), id); + ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -410,7 +410,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); diff --git a/ipc/shm.c b/ipc/shm.c index 6d767071c367..06e5cf2fe019 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -129,7 +129,7 @@ void __init shm_init(void) static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -155,8 +155,11 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); - if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; + /* + * We raced in the idr lookup or with shm_destroy(). Either way, the + * ID is busted. + */ + BUG_ON(IS_ERR(ipcp)); return container_of(ipcp, struct shmid_kernel, shm_perm); } @@ -191,7 +194,6 @@ static void shm_open(struct vm_area_struct *vma) struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); - BUG_ON(IS_ERR(shp)); shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; @@ -258,7 +260,6 @@ static void shm_close(struct vm_area_struct *vma) down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); - BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; @@ -1191,7 +1192,6 @@ out_fput: out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); - BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); diff --git a/ipc/util.c b/ipc/util.c index ff3323ef8d8b..be4230020a1f 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -467,10 +467,7 @@ void ipc_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - if (is_vmalloc_addr(p)) - vfree(p); - else - kfree(p); + kvfree(p); } /** @@ -558,7 +555,7 @@ void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) * Call inside the RCU critical section. * The ipc object is *not* locked on exit. */ -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; int lid = ipcid_to_idx(id); @@ -584,21 +581,24 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) struct kern_ipc_perm *out; rcu_read_lock(); - out = ipc_obtain_object(ids, id); + out = ipc_obtain_object_idr(ids, id); if (IS_ERR(out)) - goto err1; + goto err; spin_lock(&out->lock); - /* ipc_rmid() may have already freed the ID while ipc_lock - * was spinning: here verify that the structure is still valid + /* + * ipc_rmid() may have already freed the ID while ipc_lock() + * was spinning: here verify that the structure is still valid. + * Upon races with RMID, return -EIDRM, thus indicating that + * the ID points to a removed identifier. */ if (ipc_valid_object(out)) return out; spin_unlock(&out->lock); - out = ERR_PTR(-EINVAL); -err1: + out = ERR_PTR(-EIDRM); +err: rcu_read_unlock(); return out; } @@ -608,7 +608,7 @@ err1: * @ids: ipc identifier set * @id: ipc id to look for * - * Similar to ipc_obtain_object() but also checks + * Similar to ipc_obtain_object_idr() but also checks * the ipc object reference counter. * * Call inside the RCU critical section. @@ -616,13 +616,13 @@ err1: */ struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) { - struct kern_ipc_perm *out = ipc_obtain_object(ids, id); + struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id); if (IS_ERR(out)) goto out; if (ipc_checkid(out, id)) - return ERR_PTR(-EIDRM); + return ERR_PTR(-EINVAL); out: return out; } diff --git a/ipc/util.h b/ipc/util.h index 1a5a0fcd099c..3a8a5a0eca62 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -132,7 +132,7 @@ void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); void ipc_rcu_free(struct rcu_head *head); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c index a744098e4eb7..7080ae1eb6c1 100644 --- a/kernel/gcov/base.c +++ b/kernel/gcov/base.c @@ -92,6 +92,12 @@ void __gcov_merge_time_profile(gcov_type *counters, unsigned int n_counters) } EXPORT_SYMBOL(__gcov_merge_time_profile); +void __gcov_merge_icall_topn(gcov_type *counters, unsigned int n_counters) +{ + /* Unused. */ +} +EXPORT_SYMBOL(__gcov_merge_icall_topn); + /** * gcov_enable_events - enable event reporting through gcov_event() * diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c index 826ba9fb5e32..e25e92fb44fa 100644 --- a/kernel/gcov/gcc_4_7.c +++ b/kernel/gcov/gcc_4_7.c @@ -18,7 +18,9 @@ #include <linux/vmalloc.h> #include "gcov.h" -#if __GNUC__ == 4 && __GNUC_MINOR__ >= 9 +#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1 +#define GCOV_COUNTERS 10 +#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9 #define GCOV_COUNTERS 9 #else #define GCOV_COUNTERS 8 diff --git a/kernel/kexec.c b/kernel/kexec.c index 7a36fdcca5bf..a785c1015e25 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -84,6 +84,17 @@ struct resource crashk_low_res = { int kexec_should_crash(struct task_struct *p) { + /* + * If crash_kexec_post_notifiers is enabled, don't run + * crash_kexec() here yet, which must be run after panic + * notifiers in panic(). + */ + if (crash_kexec_post_notifiers) + return 0; + /* + * There are 4 panic() calls in do_exit() path, each of which + * corresponds to each of these 4 conditions. + */ if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops) return 1; return 0; diff --git a/kernel/panic.c b/kernel/panic.c index 8136ad76e5fd..04e91ff7560b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -32,7 +32,7 @@ static unsigned long tainted_mask; static int pause_on_oops; static int pause_on_oops_flag; static DEFINE_SPINLOCK(pause_on_oops_lock); -static bool crash_kexec_post_notifiers; +bool crash_kexec_post_notifiers; int panic_on_warn __read_mostly; int panic_timeout = CONFIG_PANIC_TIMEOUT; @@ -142,7 +142,8 @@ void panic(const char *fmt, ...) * Note: since some panic_notifiers can make crashed kernel * more unstable, it can increase risks of the kdump failure too. */ - crash_kexec(NULL); + if (crash_kexec_post_notifiers) + crash_kexec(NULL); bust_spinlocks(0); diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 7e01f78f0417..9e302315e33d 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig @@ -187,7 +187,7 @@ config DPM_WATCHDOG config DPM_WATCHDOG_TIMEOUT int "Watchdog timeout in seconds" range 1 120 - default 12 + default 60 depends on DPM_WATCHDOG config PM_TRACE diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 2329daae5255..690f78f210f2 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -552,7 +552,7 @@ int hibernation_platform_enter(void) error = disable_nonboot_cpus(); if (error) - goto Platform_finish; + goto Enable_cpus; local_irq_disable(); syscore_suspend(); @@ -568,6 +568,8 @@ int hibernation_platform_enter(void) Power_up: syscore_resume(); local_irq_enable(); + + Enable_cpus: enable_nonboot_cpus(); Platform_finish: diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index de553849f3ac..cf8c24203368 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -207,14 +207,14 @@ static int console_may_schedule; * need to be changed in the future, when the requirements change. * * /dev/kmsg exports the structured data in the following line format: - * "<level>,<sequnum>,<timestamp>,<contflag>;<message text>\n" + * "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n" + * + * Users of the export format should ignore possible additional values + * separated by ',', and find the message after the ';' character. * * The optional key/value pairs are attached as continuation lines starting * with a space character and terminated by a newline. All possible * non-prinatable characters are escaped in the "\xff" notation. - * - * Users of the export format should ignore possible additional values - * separated by ',', and find the message after the ';' character. */ enum log_flags { diff --git a/kernel/relay.c b/kernel/relay.c index e9dbaeb8fd65..0b4570cfacae 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -81,10 +81,7 @@ static struct page **relay_alloc_page_array(unsigned int n_pages) */ static void relay_free_page_array(struct page **array) { - if (is_vmalloc_addr(array)) - vfree(array); - else - kfree(array); + kvfree(array); } /** diff --git a/kernel/time/Makefile b/kernel/time/Makefile index ffc4cc3dcd47..49eca0beed32 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile @@ -12,5 +12,3 @@ obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o tick-sched.o obj-$(CONFIG_TIMER_STATS) += timer_stats.o obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o obj-$(CONFIG_TEST_UDELAY) += test_udelay.o - -$(obj)/time.o: $(objtree)/include/config/ diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 520499dd85af..5e097fa9faf7 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1566,7 +1566,7 @@ static void migrate_timers(int cpu) BUG_ON(cpu_online(cpu)); old_base = per_cpu_ptr(&tvec_bases, cpu); - new_base = this_cpu_ptr(&tvec_bases); + new_base = get_cpu_ptr(&tvec_bases); /* * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. @@ -1590,6 +1590,7 @@ static void migrate_timers(int cpu) spin_unlock(&old_base->lock); spin_unlock_irq(&new_base->lock); + put_cpu_ptr(&tvec_bases); } static int timer_cpu_notify(struct notifier_block *self, diff --git a/lib/genalloc.c b/lib/genalloc.c index d214866eeea2..daf0afb6d979 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -602,12 +602,12 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, EXPORT_SYMBOL(devm_gen_pool_create); /** - * dev_get_gen_pool - Obtain the gen_pool (if any) for a device + * gen_pool_get - Obtain the gen_pool (if any) for a device * @dev: device to retrieve the gen_pool from * * Returns the gen_pool for the device if one is present, or NULL. */ -struct gen_pool *dev_get_gen_pool(struct device *dev) +struct gen_pool *gen_pool_get(struct device *dev) { struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, NULL); @@ -616,11 +616,11 @@ struct gen_pool *dev_get_gen_pool(struct device *dev) return NULL; return *p; } -EXPORT_SYMBOL_GPL(dev_get_gen_pool); +EXPORT_SYMBOL_GPL(gen_pool_get); #ifdef CONFIG_OF /** - * of_get_named_gen_pool - find a pool by phandle property + * of_gen_pool_get - find a pool by phandle property * @np: device node * @propname: property name containing phandle(s) * @index: index into the phandle array @@ -629,7 +629,7 @@ EXPORT_SYMBOL_GPL(dev_get_gen_pool); * address of the device tree node pointed at by the phandle property, * or NULL if not found. */ -struct gen_pool *of_get_named_gen_pool(struct device_node *np, +struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { struct platform_device *pdev; @@ -642,7 +642,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np, of_node_put(np_pool); if (!pdev) return NULL; - return dev_get_gen_pool(&pdev->dev); + return gen_pool_get(&pdev->dev); } -EXPORT_SYMBOL_GPL(of_get_named_gen_pool); +EXPORT_SYMBOL_GPL(of_gen_pool_get); #endif /* CONFIG_OF */ diff --git a/lib/list_sort.c b/lib/list_sort.c index b29015102698..3fe401067e20 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -289,5 +289,5 @@ exit: kfree(elts); return err; } -module_init(list_sort_test); +late_initcall(list_sort_test); #endif /* CONFIG_TEST_LIST_SORT */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 99fbc2f238c4..d105a9f56878 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -650,9 +650,8 @@ EXPORT_SYMBOL(sg_miter_stop); * Returns the number of copied bytes. * **/ -static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip, - bool to_buffer) +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; @@ -689,6 +688,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, local_irq_restore(flags); return offset; } +EXPORT_SYMBOL(sg_copy_buffer); /** * sg_copy_from_buffer - Copy from a linear buffer to an SG list @@ -701,9 +701,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, * **/ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen) + const void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); } EXPORT_SYMBOL(sg_copy_from_buffer); @@ -729,16 +729,16 @@ EXPORT_SYMBOL(sg_copy_to_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * **/ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip) + const void *buf, size_t buflen, off_t skip) { - return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); } EXPORT_SYMBOL(sg_pcopy_from_buffer); @@ -747,8 +747,8 @@ EXPORT_SYMBOL(sg_pcopy_from_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy to - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * diff --git a/mm/Kconfig b/mm/Kconfig index c180af880ed5..e79de2bd12cd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -636,3 +636,21 @@ config MAX_STACK_SIZE_MB changed to a smaller value in which case that is used. A sane initial value is 80 MB. + +# For architectures that support deferred memory initialisation +config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + bool + +config DEFERRED_STRUCT_PAGE_INIT + bool "Defer initialisation of struct pages to kswapd" + default n + depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + depends on MEMORY_HOTPLUG + help + Ordinarily all struct pages are initialised during early boot in a + single thread. On very large machines this can take a considerable + amount of time. If this option is set, large machines will bring up + a subset of memmap at boot and then initialise the rest in parallel + when kswapd starts. This has a potential performance impact on + processes running early in the lifetime of the systemm until kswapd + finishes the initialisation. diff --git a/mm/bootmem.c b/mm/bootmem.c index 477be696511d..a23dd1934654 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c @@ -164,7 +164,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size) end = PFN_DOWN(physaddr + size); for (; cursor < end; cursor++) { - __free_pages_bootmem(pfn_to_page(cursor), 0); + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); totalram_pages++; } } @@ -172,7 +172,7 @@ void __init free_bootmem_late(unsigned long physaddr, unsigned long size) static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) { struct page *page; - unsigned long *map, start, end, pages, count = 0; + unsigned long *map, start, end, pages, cur, count = 0; if (!bdata->node_bootmem_map) return 0; @@ -210,17 +210,17 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) { int order = ilog2(BITS_PER_LONG); - __free_pages_bootmem(pfn_to_page(start), order); + __free_pages_bootmem(pfn_to_page(start), start, order); count += BITS_PER_LONG; start += BITS_PER_LONG; } else { - unsigned long cur = start; + cur = start; start = ALIGN(start + 1, BITS_PER_LONG); while (vec && cur != start) { if (vec & 1) { page = pfn_to_page(cur); - __free_pages_bootmem(page, 0); + __free_pages_bootmem(page, cur, 0); count++; } vec >>= 1; @@ -229,12 +229,13 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata) } } + cur = bdata->node_min_pfn; page = virt_to_page(bdata->node_bootmem_map); pages = bdata->node_low_pfn - bdata->node_min_pfn; pages = bootmem_bootmap_pages(pages); count += pages; while (pages--) - __free_pages_bootmem(page++, 0); + __free_pages_bootmem(page++, cur++, 0); bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count); diff --git a/mm/internal.h b/mm/internal.h index a25e359a4039..36b23f1e2ca6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -155,7 +155,8 @@ __find_buddy_index(unsigned long page_idx, unsigned int order) } extern int __isolate_free_page(struct page *page, unsigned int order); -extern void __free_pages_bootmem(struct page *page, unsigned int order); +extern void __free_pages_bootmem(struct page *page, unsigned long pfn, + unsigned int order); extern void prep_compound_page(struct page *page, unsigned long order); #ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); @@ -361,10 +362,7 @@ do { \ } while (0) extern void mminit_verify_pageflags_layout(void); -extern void mminit_verify_page_links(struct page *page, - enum zone_type zone, unsigned long nid, unsigned long pfn); extern void mminit_verify_zonelist(void); - #else static inline void mminit_dprintk(enum mminit_level level, @@ -376,11 +374,6 @@ static inline void mminit_verify_pageflags_layout(void) { } -static inline void mminit_verify_page_links(struct page *page, - enum zone_type zone, unsigned long nid, unsigned long pfn) -{ -} - static inline void mminit_verify_zonelist(void) { } diff --git a/mm/memblock.c b/mm/memblock.c index 1b444c730846..87108e77e476 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -820,6 +820,38 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) /** + * __next_reserved_mem_region - next function for for_each_reserved_region() + * @idx: pointer to u64 loop variable + * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL + * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL + * + * Iterate over all reserved memory regions. + */ +void __init_memblock __next_reserved_mem_region(u64 *idx, + phys_addr_t *out_start, + phys_addr_t *out_end) +{ + struct memblock_type *rsv = &memblock.reserved; + + if (*idx >= 0 && *idx < rsv->cnt) { + struct memblock_region *r = &rsv->regions[*idx]; + phys_addr_t base = r->base; + phys_addr_t size = r->size; + + if (out_start) + *out_start = base; + if (out_end) + *out_end = base + size - 1; + + *idx += 1; + return; + } + + /* signal end of iteration */ + *idx = ULLONG_MAX; +} + +/** * __next__mem_range - next function for for_each_free_mem_range() etc. * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes @@ -1387,7 +1419,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) end = PFN_DOWN(base + size); for (; cursor < end; cursor++) { - __free_pages_bootmem(pfn_to_page(cursor), 0); + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); totalram_pages++; } } diff --git a/mm/mm_init.c b/mm/mm_init.c index 5f420f7fafa1..fdadf918de76 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -11,6 +11,7 @@ #include <linux/export.h> #include <linux/memory.h> #include <linux/notifier.h> +#include <linux/sched.h> #include "internal.h" #ifdef CONFIG_DEBUG_MEMORY_INIT @@ -130,14 +131,6 @@ void __init mminit_verify_pageflags_layout(void) BUG_ON(or_mask != add_mask); } -void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone, - unsigned long nid, unsigned long pfn) -{ - BUG_ON(page_to_nid(page) != nid); - BUG_ON(page_zonenum(page) != zone); - BUG_ON(page_to_pfn(page) != pfn); -} - static __init int set_mminit_loglevel(char *str) { get_option(&str, &mminit_loglevel); diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 5258386fa1be..e57cf24babd6 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -86,7 +86,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) end = PFN_DOWN(addr + size); for (; cursor < end; cursor++) { - __free_pages_bootmem(pfn_to_page(cursor), 0); + __free_pages_bootmem(pfn_to_page(cursor), cursor, 0); totalram_pages++; } } @@ -101,7 +101,7 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end) while (start + (1UL << order) > end) order--; - __free_pages_bootmem(pfn_to_page(start), order); + __free_pages_bootmem(pfn_to_page(start), start, order); start += (1UL << order); } @@ -130,6 +130,9 @@ static unsigned long __init free_low_memory_core_early(void) memblock_clear_hotplug(0, -1); + for_each_reserved_mem_region(i, &start, &end) + reserve_bootmem_region(start, end); + for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) count += __free_memory_core(start, end); diff --git a/mm/nommu.c b/mm/nommu.c index 05e7447d960b..58ea3643b9e9 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -2085,7 +2085,7 @@ static int __meminit init_user_reserve(void) sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17); return 0; } -module_init(init_user_reserve) +subsys_initcall(init_user_reserve); /* * Initialise sysctl_admin_reserve_kbytes. @@ -2106,4 +2106,4 @@ static int __meminit init_admin_reserve(void) sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13); return 0; } -module_init(init_admin_reserve) +subsys_initcall(init_admin_reserve); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5e6fa06f2784..506eac8b38af 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/interrupt.h> +#include <linux/rwsem.h> #include <linux/pagemap.h> #include <linux/jiffies.h> #include <linux/bootmem.h> @@ -61,6 +62,7 @@ #include <linux/hugetlb.h> #include <linux/sched/rt.h> #include <linux/page_owner.h> +#include <linux/kthread.h> #include <asm/sections.h> #include <asm/tlbflush.h> @@ -235,6 +237,77 @@ EXPORT_SYMBOL(nr_online_nodes); int page_group_by_mobility_disabled __read_mostly; +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +static inline void reset_deferred_meminit(pg_data_t *pgdat) +{ + pgdat->first_deferred_pfn = ULONG_MAX; +} + +/* Returns true if the struct page for the pfn is uninitialised */ +static inline bool __meminit early_page_uninitialised(unsigned long pfn) +{ + int nid = early_pfn_to_nid(pfn); + + if (pfn >= NODE_DATA(nid)->first_deferred_pfn) + return true; + + return false; +} + +static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) +{ + if (pfn >= NODE_DATA(nid)->first_deferred_pfn) + return true; + + return false; +} + +/* + * Returns false when the remaining initialisation should be deferred until + * later in the boot cycle when it can be parallelised. + */ +static inline bool update_defer_init(pg_data_t *pgdat, + unsigned long pfn, unsigned long zone_end, + unsigned long *nr_initialised) +{ + /* Always populate low zones for address-contrained allocations */ + if (zone_end < pgdat_end_pfn(pgdat)) + return true; + + /* Initialise at least 2G of the highest zone */ + (*nr_initialised)++; + if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) && + (pfn & (PAGES_PER_SECTION - 1)) == 0) { + pgdat->first_deferred_pfn = pfn; + return false; + } + + return true; +} +#else +static inline void reset_deferred_meminit(pg_data_t *pgdat) +{ +} + +static inline bool early_page_uninitialised(unsigned long pfn) +{ + return false; +} + +static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid) +{ + return false; +} + +static inline bool update_defer_init(pg_data_t *pgdat, + unsigned long pfn, unsigned long zone_end, + unsigned long *nr_initialised) +{ + return true; +} +#endif + + void set_pageblock_migratetype(struct page *page, int migratetype) { if (unlikely(page_group_by_mobility_disabled && @@ -764,6 +837,75 @@ static int free_tail_pages_check(struct page *head_page, struct page *page) return 0; } +static void __meminit __init_single_page(struct page *page, unsigned long pfn, + unsigned long zone, int nid) +{ + set_page_links(page, zone, nid, pfn); + init_page_count(page); + page_mapcount_reset(page); + page_cpupid_reset_last(page); + + INIT_LIST_HEAD(&page->lru); +#ifdef WANT_PAGE_VIRTUAL + /* The shift won't overflow because ZONE_NORMAL is below 4G. */ + if (!is_highmem_idx(zone)) + set_page_address(page, __va(pfn << PAGE_SHIFT)); +#endif +} + +static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, + int nid) +{ + return __init_single_page(pfn_to_page(pfn), pfn, zone, nid); +} + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +static void init_reserved_page(unsigned long pfn) +{ + pg_data_t *pgdat; + int nid, zid; + + if (!early_page_uninitialised(pfn)) + return; + + nid = early_pfn_to_nid(pfn); + pgdat = NODE_DATA(nid); + + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + struct zone *zone = &pgdat->node_zones[zid]; + + if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone)) + break; + } + __init_single_pfn(pfn, zid, nid); +} +#else +static inline void init_reserved_page(unsigned long pfn) +{ +} +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + +/* + * Initialised pages do not have PageReserved set. This function is + * called for each range allocated by the bootmem allocator and + * marks the pages PageReserved. The remaining valid pages are later + * sent to the buddy page allocator. + */ +void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) +{ + unsigned long start_pfn = PFN_DOWN(start); + unsigned long end_pfn = PFN_UP(end); + + for (; start_pfn < end_pfn; start_pfn++) { + if (pfn_valid(start_pfn)) { + struct page *page = pfn_to_page(start_pfn); + + init_reserved_page(start_pfn); + SetPageReserved(page); + } + } +} + static bool free_pages_prepare(struct page *page, unsigned int order) { bool compound = PageCompound(page); @@ -818,7 +960,8 @@ static void __free_pages_ok(struct page *page, unsigned int order) local_irq_restore(flags); } -void __init __free_pages_bootmem(struct page *page, unsigned int order) +static void __init __free_pages_boot_core(struct page *page, + unsigned long pfn, unsigned int order) { unsigned int nr_pages = 1 << order; struct page *p = page; @@ -838,6 +981,223 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order) __free_pages(page, order); } +#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ + defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) +/* Only safe to use early in boot when initialisation is single-threaded */ +static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; + +int __meminit early_pfn_to_nid(unsigned long pfn) +{ + int nid; + + /* The system will behave unpredictably otherwise */ + BUG_ON(system_state != SYSTEM_BOOTING); + + nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); + if (nid >= 0) + return nid; + /* just returns 0 */ + return 0; +} +#endif + +#ifdef CONFIG_NODES_SPAN_OTHER_NODES +static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, + struct mminit_pfnnid_cache *state) +{ + int nid; + + nid = __early_pfn_to_nid(pfn, state); + if (nid >= 0 && nid != node) + return false; + return true; +} + +/* Only safe to use early in boot when initialisation is single-threaded */ +static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) +{ + return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache); +} + +#else + +static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node) +{ + return true; +} +static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node, + struct mminit_pfnnid_cache *state) +{ + return true; +} +#endif + + +void __init __free_pages_bootmem(struct page *page, unsigned long pfn, + unsigned int order) +{ + if (early_page_uninitialised(pfn)) + return; + return __free_pages_boot_core(page, pfn, order); +} + +#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +static void __init deferred_free_range(struct page *page, + unsigned long pfn, int nr_pages) +{ + int i; + + if (!page) + return; + + /* Free a large naturally-aligned chunk if possible */ + if (nr_pages == MAX_ORDER_NR_PAGES && + (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + __free_pages_boot_core(page, pfn, MAX_ORDER-1); + return; + } + + for (i = 0; i < nr_pages; i++, page++, pfn++) + __free_pages_boot_core(page, pfn, 0); +} + +static __initdata DECLARE_RWSEM(pgdat_init_rwsem); + +/* Initialise remaining memory on a node */ +static int __init deferred_init_memmap(void *data) +{ + pg_data_t *pgdat = data; + int nid = pgdat->node_id; + struct mminit_pfnnid_cache nid_init_state = { }; + unsigned long start = jiffies; + unsigned long nr_pages = 0; + unsigned long walk_start, walk_end; + int i, zid; + struct zone *zone; + unsigned long first_init_pfn = pgdat->first_deferred_pfn; + const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); + + if (first_init_pfn == ULONG_MAX) { + up_read(&pgdat_init_rwsem); + return 0; + } + + /* Bind memory initialisation thread to a local node if possible */ + if (!cpumask_empty(cpumask)) + set_cpus_allowed_ptr(current, cpumask); + + /* Sanity check boundaries */ + BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn); + BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat)); + pgdat->first_deferred_pfn = ULONG_MAX; + + /* Only the highest zone is deferred so find it */ + for (zid = 0; zid < MAX_NR_ZONES; zid++) { + zone = pgdat->node_zones + zid; + if (first_init_pfn < zone_end_pfn(zone)) + break; + } + + for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) { + unsigned long pfn, end_pfn; + struct page *page = NULL; + struct page *free_base_page = NULL; + unsigned long free_base_pfn = 0; + int nr_to_free = 0; + + end_pfn = min(walk_end, zone_end_pfn(zone)); + pfn = first_init_pfn; + if (pfn < walk_start) + pfn = walk_start; + if (pfn < zone->zone_start_pfn) + pfn = zone->zone_start_pfn; + + for (; pfn < end_pfn; pfn++) { + if (!pfn_valid_within(pfn)) + goto free_range; + + /* + * Ensure pfn_valid is checked every + * MAX_ORDER_NR_PAGES for memory holes + */ + if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) { + if (!pfn_valid(pfn)) { + page = NULL; + goto free_range; + } + } + + if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) { + page = NULL; + goto free_range; + } + + /* Minimise pfn page lookups and scheduler checks */ + if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) { + page++; + } else { + nr_pages += nr_to_free; + deferred_free_range(free_base_page, + free_base_pfn, nr_to_free); + free_base_page = NULL; + free_base_pfn = nr_to_free = 0; + + page = pfn_to_page(pfn); + cond_resched(); + } + + if (page->flags) { + VM_BUG_ON(page_zone(page) != zone); + goto free_range; + } + + __init_single_page(page, pfn, zid, nid); + if (!free_base_page) { + free_base_page = page; + free_base_pfn = pfn; + nr_to_free = 0; + } + nr_to_free++; + + /* Where possible, batch up pages for a single free */ + continue; +free_range: + /* Free the current block of pages to allocator */ + nr_pages += nr_to_free; + deferred_free_range(free_base_page, free_base_pfn, + nr_to_free); + free_base_page = NULL; + free_base_pfn = nr_to_free = 0; + } + + first_init_pfn = max(end_pfn, first_init_pfn); + } + + /* Sanity check that the next zone really is unpopulated */ + WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone)); + + pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, + jiffies_to_msecs(jiffies - start)); + up_read(&pgdat_init_rwsem); + return 0; +} + +void __init page_alloc_init_late(void) +{ + int nid; + + for_each_node_state(nid, N_MEMORY) { + down_read(&pgdat_init_rwsem); + kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); + } + + /* Block until all are initialised */ + down_write(&pgdat_init_rwsem); + up_write(&pgdat_init_rwsem); +} +#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ + #ifdef CONFIG_CMA /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ void __init init_cma_reserved_pageblock(struct page *page) @@ -4150,6 +4510,9 @@ static void setup_zone_migrate_reserve(struct zone *zone) zone->nr_migrate_reserve_block = reserve; for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { + if (!early_page_nid_uninitialised(pfn, zone_to_nid(zone))) + return; + if (!pfn_valid(pfn)) continue; page = pfn_to_page(pfn); @@ -4212,15 +4575,16 @@ static void setup_zone_migrate_reserve(struct zone *zone) void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, unsigned long start_pfn, enum memmap_context context) { - struct page *page; + pg_data_t *pgdat = NODE_DATA(nid); unsigned long end_pfn = start_pfn + size; unsigned long pfn; struct zone *z; + unsigned long nr_initialised = 0; if (highest_memmap_pfn < end_pfn - 1) highest_memmap_pfn = end_pfn - 1; - z = &NODE_DATA(nid)->node_zones[zone]; + z = &pgdat->node_zones[zone]; for (pfn = start_pfn; pfn < end_pfn; pfn++) { /* * There can be holes in boot-time mem_map[]s @@ -4232,14 +4596,11 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, continue; if (!early_pfn_in_nid(pfn, nid)) continue; + if (!update_defer_init(pgdat, pfn, end_pfn, + &nr_initialised)) + break; } - page = pfn_to_page(pfn); - set_page_links(page, zone, nid, pfn); - mminit_verify_page_links(page, zone, nid, pfn); - init_page_count(page); - page_mapcount_reset(page); - page_cpupid_reset_last(page); - SetPageReserved(page); + /* * Mark the block movable so that blocks are reserved for * movable at startup. This will force kernel allocations @@ -4254,17 +4615,14 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * check here not to call set_pageblock_migratetype() against * pfn out of zone. */ - if ((z->zone_start_pfn <= pfn) - && (pfn < zone_end_pfn(z)) - && !(pfn & (pageblock_nr_pages - 1))) - set_pageblock_migratetype(page, MIGRATE_MOVABLE); + if (!(pfn & (pageblock_nr_pages - 1))) { + struct page *page = pfn_to_page(pfn); - INIT_LIST_HEAD(&page->lru); -#ifdef WANT_PAGE_VIRTUAL - /* The shift won't overflow because ZONE_NORMAL is below 4G. */ - if (!is_highmem_idx(zone)) - set_page_address(page, __va(pfn << PAGE_SHIFT)); -#endif + __init_single_page(page, pfn, zone, nid); + set_pageblock_migratetype(page, MIGRATE_MOVABLE); + } else { + __init_single_pfn(pfn, zone, nid); + } } } @@ -4522,57 +4880,30 @@ int __meminit init_currently_empty_zone(struct zone *zone, #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID + /* * Required by SPARSEMEM. Given a PFN, return what node the PFN is on. */ -int __meminit __early_pfn_to_nid(unsigned long pfn) +int __meminit __early_pfn_to_nid(unsigned long pfn, + struct mminit_pfnnid_cache *state) { unsigned long start_pfn, end_pfn; int nid; - /* - * NOTE: The following SMP-unsafe globals are only used early in boot - * when the kernel is running single-threaded. - */ - static unsigned long __meminitdata last_start_pfn, last_end_pfn; - static int __meminitdata last_nid; - if (last_start_pfn <= pfn && pfn < last_end_pfn) - return last_nid; + if (state->last_start <= pfn && pfn < state->last_end) + return state->last_nid; nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); if (nid != -1) { - last_start_pfn = start_pfn; - last_end_pfn = end_pfn; - last_nid = nid; + state->last_start = start_pfn; + state->last_end = end_pfn; + state->last_nid = nid; } return nid; } #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */ -int __meminit early_pfn_to_nid(unsigned long pfn) -{ - int nid; - - nid = __early_pfn_to_nid(pfn); - if (nid >= 0) - return nid; - /* just returns 0 */ - return 0; -} - -#ifdef CONFIG_NODES_SPAN_OTHER_NODES -bool __meminit early_pfn_in_nid(unsigned long pfn, int node) -{ - int nid; - - nid = __early_pfn_to_nid(pfn); - if (nid >= 0 && nid != node) - return false; - return true; -} -#endif - /** * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed. @@ -5090,6 +5421,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, /* pg_data_t should be reset to zero when it's allocated */ WARN_ON(pgdat->nr_zones || pgdat->classzone_idx); + reset_deferred_meminit(pgdat); pgdat->node_id = nid; pgdat->node_start_pfn = node_start_pfn; #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP diff --git a/mm/page_owner.c b/mm/page_owner.c index 0993f5f36b01..bd5f842b56d2 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -310,4 +310,4 @@ static int __init pageowner_init(void) return 0; } -module_init(pageowner_init) +late_initcall(pageowner_init) diff --git a/mm/slab_common.c b/mm/slab_common.c index 983b78694c46..3e5f8f29c286 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -855,7 +855,7 @@ void __init setup_kmalloc_cache_index_table(void) } } -static void new_kmalloc_cache(int idx, unsigned long flags) +static void __init new_kmalloc_cache(int idx, unsigned long flags) { kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name, kmalloc_info[idx].size, flags); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 9c891d0412a2..ae3a47f9d1d5 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -57,7 +57,7 @@ static const struct proto_ops ax25_proto_ops; static void ax25_free_sock(struct sock *sk) { - ax25_cb_put(ax25_sk(sk)); + ax25_cb_put(sk_to_ax25(sk)); } /* @@ -306,7 +306,7 @@ void ax25_destroy_socket(ax25_cb *ax25) while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) { if (skb->sk != ax25->sk) { /* A pending connection */ - ax25_cb *sax25 = ax25_sk(skb->sk); + ax25_cb *sax25 = sk_to_ax25(skb->sk); /* Queue the unaccepted socket for death */ sock_orphan(skb->sk); @@ -551,7 +551,7 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname, return -EFAULT; lock_sock(sk); - ax25 = ax25_sk(sk); + ax25 = sk_to_ax25(sk); switch (optname) { case AX25_WINDOW: @@ -697,7 +697,7 @@ static int ax25_getsockopt(struct socket *sock, int level, int optname, length = min_t(unsigned int, maxlen, sizeof(int)); lock_sock(sk); - ax25 = ax25_sk(sk); + ax25 = sk_to_ax25(sk); switch (optname) { case AX25_WINDOW: @@ -796,7 +796,7 @@ out: static struct proto ax25_proto = { .name = "AX25", .owner = THIS_MODULE, - .obj_size = sizeof(struct sock), + .obj_size = sizeof(struct ax25_sock), }; static int ax25_create(struct net *net, struct socket *sock, int protocol, @@ -858,7 +858,7 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol, if (sk == NULL) return -ENOMEM; - ax25 = sk->sk_protinfo = ax25_create_cb(); + ax25 = ax25_sk(sk)->cb = ax25_create_cb(); if (!ax25) { sk_free(sk); return -ENOMEM; @@ -910,7 +910,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) sk->sk_state = TCP_ESTABLISHED; sock_copy_flags(sk, osk); - oax25 = ax25_sk(osk); + oax25 = sk_to_ax25(osk); ax25->modulus = oax25->modulus; ax25->backoff = oax25->backoff; @@ -938,7 +938,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) } } - sk->sk_protinfo = ax25; + ax25_sk(sk)->cb = ax25; sk->sk_destruct = ax25_free_sock; ax25->sk = sk; @@ -956,7 +956,7 @@ static int ax25_release(struct socket *sock) sock_hold(sk); sock_orphan(sk); lock_sock(sk); - ax25 = ax25_sk(sk); + ax25 = sk_to_ax25(sk); if (sk->sk_type == SOCK_SEQPACKET) { switch (ax25->state) { @@ -1066,7 +1066,7 @@ static int ax25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) lock_sock(sk); - ax25 = ax25_sk(sk); + ax25 = sk_to_ax25(sk); if (!sock_flag(sk, SOCK_ZAPPED)) { err = -EINVAL; goto out; @@ -1113,7 +1113,7 @@ static int __must_check ax25_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; - ax25_cb *ax25 = ax25_sk(sk), *ax25t; + ax25_cb *ax25 = sk_to_ax25(sk), *ax25t; struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr; ax25_digi *digi = NULL; int ct = 0, err = 0; @@ -1394,7 +1394,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, memset(fsa, 0, sizeof(*fsa)); lock_sock(sk); - ax25 = ax25_sk(sk); + ax25 = sk_to_ax25(sk); if (peer != 0) { if (sk->sk_state != TCP_ESTABLISHED) { @@ -1446,7 +1446,7 @@ static int ax25_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) return -EINVAL; lock_sock(sk); - ax25 = ax25_sk(sk); + ax25 = sk_to_ax25(sk); if (sock_flag(sk, SOCK_ZAPPED)) { err = -EADDRNOTAVAIL; @@ -1621,7 +1621,7 @@ static int ax25_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, if (skb == NULL) goto out; - if (!ax25_sk(sk)->pidincl) + if (!sk_to_ax25(sk)->pidincl) skb_pull(skb, 1); /* Remove PID */ skb_reset_transport_header(skb); @@ -1762,7 +1762,7 @@ static int ax25_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) case SIOCAX25GETINFO: case SIOCAX25GETINFOOLD: { - ax25_cb *ax25 = ax25_sk(sk); + ax25_cb *ax25 = sk_to_ax25(sk); struct ax25_info_struct ax25_info; ax25_info.t1 = ax25->t1 / HZ; diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 29a3687237aa..bb5a0e4e98d9 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -353,7 +353,7 @@ static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, return 0; } - ax25 = ax25_sk(make); + ax25 = sk_to_ax25(make); skb_set_owner_r(skb, make); skb_queue_head(&sk->sk_receive_queue, skb); diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 79e8f71aef5b..cb7db320dd27 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -352,8 +352,8 @@ ceph_parse_options(char *options, const char *dev_name, /* start with defaults */ opt->flags = CEPH_OPT_DEFAULT; opt->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; - opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ - opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */ + opt->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; + opt->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* get mon ip(s) */ /* ip1[:port1][,ip2[:port2]...] */ @@ -439,13 +439,32 @@ ceph_parse_options(char *options, const char *dev_name, pr_warn("ignoring deprecated osdtimeout option\n"); break; case Opt_osdkeepalivetimeout: - opt->osd_keepalive_timeout = intval; + /* 0 isn't well defined right now, reject it */ + if (intval < 1 || intval > INT_MAX / 1000) { + pr_err("osdkeepalive out of range\n"); + err = -EINVAL; + goto out; + } + opt->osd_keepalive_timeout = + msecs_to_jiffies(intval * 1000); break; case Opt_osd_idle_ttl: - opt->osd_idle_ttl = intval; + /* 0 isn't well defined right now, reject it */ + if (intval < 1 || intval > INT_MAX / 1000) { + pr_err("osd_idle_ttl out of range\n"); + err = -EINVAL; + goto out; + } + opt->osd_idle_ttl = msecs_to_jiffies(intval * 1000); break; case Opt_mount_timeout: - opt->mount_timeout = intval; + /* 0 is "wait forever" (i.e. infinite timeout) */ + if (intval < 0 || intval > INT_MAX / 1000) { + pr_err("mount_timeout out of range\n"); + err = -EINVAL; + goto out; + } + opt->mount_timeout = msecs_to_jiffies(intval * 1000); break; case Opt_share: @@ -512,12 +531,14 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client) seq_puts(m, "notcp_nodelay,"); if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT) - seq_printf(m, "mount_timeout=%d,", opt->mount_timeout); + seq_printf(m, "mount_timeout=%d,", + jiffies_to_msecs(opt->mount_timeout) / 1000); if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT) - seq_printf(m, "osd_idle_ttl=%d,", opt->osd_idle_ttl); + seq_printf(m, "osd_idle_ttl=%d,", + jiffies_to_msecs(opt->osd_idle_ttl) / 1000); if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT) seq_printf(m, "osdkeepalivetimeout=%d,", - opt->osd_keepalive_timeout); + jiffies_to_msecs(opt->osd_keepalive_timeout) / 1000); /* drop redundant comma */ if (m->count != pos) @@ -626,8 +647,8 @@ static int have_mon_and_osd_map(struct ceph_client *client) */ int __ceph_open_session(struct ceph_client *client, unsigned long started) { - int err; - unsigned long timeout = client->options->mount_timeout * HZ; + unsigned long timeout = client->options->mount_timeout; + long err; /* open session, and wait for mon and osd maps */ err = ceph_monc_open_session(&client->monc); @@ -635,16 +656,15 @@ int __ceph_open_session(struct ceph_client *client, unsigned long started) return err; while (!have_mon_and_osd_map(client)) { - err = -EIO; if (timeout && time_after_eq(jiffies, started + timeout)) - return err; + return -ETIMEDOUT; /* wait */ dout("mount waiting for mon_map\n"); err = wait_event_interruptible_timeout(client->auth_wq, have_mon_and_osd_map(client) || (client->auth_err < 0), - timeout); - if (err == -EINTR || err == -ERESTARTSYS) + ceph_timeout_jiffies(timeout)); + if (err < 0) return err; if (client->auth_err < 0) return client->auth_err; @@ -721,5 +741,5 @@ module_exit(exit_ceph_lib); MODULE_AUTHOR("Sage Weil <[email protected]>"); MODULE_AUTHOR("Yehuda Sadeh <[email protected]>"); MODULE_AUTHOR("Patience Warnick <[email protected]>"); -MODULE_DESCRIPTION("Ceph filesystem for Linux"); +MODULE_DESCRIPTION("Ceph core library"); MODULE_LICENSE("GPL"); diff --git a/net/ceph/crush/crush.c b/net/ceph/crush/crush.c index 9d84ce4ea0df..80d7c3a97cb8 100644 --- a/net/ceph/crush/crush.c +++ b/net/ceph/crush/crush.c @@ -1,15 +1,11 @@ - #ifdef __KERNEL__ # include <linux/slab.h> +# include <linux/crush/crush.h> #else -# include <stdlib.h> -# include <assert.h> -# define kfree(x) do { if (x) free(x); } while (0) -# define BUG_ON(x) assert(!(x)) +# include "crush_compat.h" +# include "crush.h" #endif -#include <linux/crush/crush.h> - const char *crush_bucket_alg_name(int alg) { switch (alg) { @@ -134,6 +130,9 @@ void crush_destroy(struct crush_map *map) kfree(map->rules); } +#ifndef __KERNEL__ + kfree(map->choose_tries); +#endif kfree(map); } diff --git a/net/ceph/crush/crush_ln_table.h b/net/ceph/crush/crush_ln_table.h index 6192c7fc958c..aae534c901a4 100644 --- a/net/ceph/crush/crush_ln_table.h +++ b/net/ceph/crush/crush_ln_table.h @@ -10,20 +10,20 @@ * */ -#if defined(__linux__) -#include <linux/types.h> -#elif defined(__FreeBSD__) -#include <sys/types.h> -#endif - #ifndef CEPH_CRUSH_LN_H #define CEPH_CRUSH_LN_H +#ifdef __KERNEL__ +# include <linux/types.h> +#else +# include "crush_compat.h" +#endif -// RH_LH_tbl[2*k] = 2^48/(1.0+k/128.0) -// RH_LH_tbl[2*k+1] = 2^48*log2(1.0+k/128.0) - -static int64_t __RH_LH_tbl[128*2+2] = { +/* + * RH_LH_tbl[2*k] = 2^48/(1.0+k/128.0) + * RH_LH_tbl[2*k+1] = 2^48*log2(1.0+k/128.0) + */ +static __s64 __RH_LH_tbl[128*2+2] = { 0x0001000000000000ll, 0x0000000000000000ll, 0x0000fe03f80fe040ll, 0x000002dfca16dde1ll, 0x0000fc0fc0fc0fc1ll, 0x000005b9e5a170b4ll, 0x0000fa232cf25214ll, 0x0000088e68ea899all, 0x0000f83e0f83e0f9ll, 0x00000b5d69bac77ell, 0x0000f6603d980f67ll, 0x00000e26fd5c8555ll, @@ -89,11 +89,12 @@ static int64_t __RH_LH_tbl[128*2+2] = { 0x0000820820820821ll, 0x0000fa2f045e7832ll, 0x000081848da8faf1ll, 0x0000fba577877d7dll, 0x0000810204081021ll, 0x0000fd1a708bbe11ll, 0x0000808080808081ll, 0x0000fe8df263f957ll, 0x0000800000000000ll, 0x0000ffff00000000ll, - }; - +}; - // LL_tbl[k] = 2^48*log2(1.0+k/2^15); -static int64_t __LL_tbl[256] = { +/* + * LL_tbl[k] = 2^48*log2(1.0+k/2^15) + */ +static __s64 __LL_tbl[256] = { 0x0000000000000000ull, 0x00000002e2a60a00ull, 0x000000070cb64ec5ull, 0x00000009ef50ce67ull, 0x0000000cd1e588fdull, 0x0000000fb4747e9cull, 0x0000001296fdaf5eull, 0x0000001579811b58ull, 0x000000185bfec2a1ull, 0x0000001b3e76a552ull, 0x0000001e20e8c380ull, 0x0000002103551d43ull, @@ -160,7 +161,4 @@ static int64_t __LL_tbl[256] = { 0x000002d4562d2ec6ull, 0x000002d73330209dull, 0x000002da102d63b0ull, 0x000002dced24f814ull, }; - - - #endif diff --git a/net/ceph/crush/hash.c b/net/ceph/crush/hash.c index 5bb63e37a8a1..ed123af49eba 100644 --- a/net/ceph/crush/hash.c +++ b/net/ceph/crush/hash.c @@ -1,6 +1,8 @@ - -#include <linux/types.h> -#include <linux/crush/hash.h> +#ifdef __KERNEL__ +# include <linux/crush/hash.h> +#else +# include "hash.h" +#endif /* * Robert Jenkins' function for mixing 32-bit values diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c index 5b47736d27d9..393bfb22d5bb 100644 --- a/net/ceph/crush/mapper.c +++ b/net/ceph/crush/mapper.c @@ -1,27 +1,31 @@ +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 Intel Corporation All Rights Reserved + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ #ifdef __KERNEL__ # include <linux/string.h> # include <linux/slab.h> # include <linux/bug.h> # include <linux/kernel.h> -# ifndef dprintk -# define dprintk(args...) -# endif +# include <linux/crush/crush.h> +# include <linux/crush/hash.h> #else -# include <string.h> -# include <stdio.h> -# include <stdlib.h> -# include <assert.h> -# define BUG_ON(x) assert(!(x)) -# define dprintk(args...) /* printf(args) */ -# define kmalloc(x, f) malloc(x) -# define kfree(x) free(x) +# include "crush_compat.h" +# include "crush.h" +# include "hash.h" #endif - -#include <linux/crush/crush.h> -#include <linux/crush/hash.h> #include "crush_ln_table.h" +#define dprintk(args...) /* printf(args) */ + /* * Implement the core CRUSH mapping algorithm. */ @@ -139,7 +143,7 @@ static int bucket_list_choose(struct crush_bucket_list *bucket, int i; for (i = bucket->h.size-1; i >= 0; i--) { - __u64 w = crush_hash32_4(bucket->h.hash,x, bucket->h.items[i], + __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i], r, bucket->h.id); w &= 0xffff; dprintk("list_choose i=%d x=%d r=%d item %d weight %x " @@ -238,43 +242,46 @@ static int bucket_straw_choose(struct crush_bucket_straw *bucket, return bucket->h.items[high]; } -// compute 2^44*log2(input+1) -uint64_t crush_ln(unsigned xin) +/* compute 2^44*log2(input+1) */ +static __u64 crush_ln(unsigned int xin) { - unsigned x=xin, x1; - int iexpon, index1, index2; - uint64_t RH, LH, LL, xl64, result; + unsigned int x = xin, x1; + int iexpon, index1, index2; + __u64 RH, LH, LL, xl64, result; - x++; + x++; - // normalize input - iexpon = 15; - while(!(x&0x18000)) { x<<=1; iexpon--; } + /* normalize input */ + iexpon = 15; + while (!(x & 0x18000)) { + x <<= 1; + iexpon--; + } - index1 = (x>>8)<<1; - // RH ~ 2^56/index1 - RH = __RH_LH_tbl[index1 - 256]; - // LH ~ 2^48 * log2(index1/256) - LH = __RH_LH_tbl[index1 + 1 - 256]; + index1 = (x >> 8) << 1; + /* RH ~ 2^56/index1 */ + RH = __RH_LH_tbl[index1 - 256]; + /* LH ~ 2^48 * log2(index1/256) */ + LH = __RH_LH_tbl[index1 + 1 - 256]; - // RH*x ~ 2^48 * (2^15 + xf), xf<2^8 - xl64 = (int64_t)x * RH; - xl64 >>= 48; - x1 = xl64; + /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */ + xl64 = (__s64)x * RH; + xl64 >>= 48; + x1 = xl64; - result = iexpon; - result <<= (12 + 32); + result = iexpon; + result <<= (12 + 32); - index2 = x1 & 0xff; - // LL ~ 2^48*log2(1.0+index2/2^15) - LL = __LL_tbl[index2]; + index2 = x1 & 0xff; + /* LL ~ 2^48*log2(1.0+index2/2^15) */ + LL = __LL_tbl[index2]; - LH = LH + LL; + LH = LH + LL; - LH >>= (48-12 - 32); - result += LH; + LH >>= (48 - 12 - 32); + result += LH; - return result; + return result; } @@ -290,9 +297,9 @@ uint64_t crush_ln(unsigned xin) static int bucket_straw2_choose(struct crush_bucket_straw2 *bucket, int x, int r) { - unsigned i, high = 0; - unsigned u; - unsigned w; + unsigned int i, high = 0; + unsigned int u; + unsigned int w; __s64 ln, draw, high_draw = 0; for (i = 0; i < bucket->h.size; i++) { @@ -567,6 +574,10 @@ reject: out[outpos] = item; outpos++; count--; +#ifndef __KERNEL__ + if (map->choose_tries && ftotal <= map->choose_total_tries) + map->choose_tries[ftotal]++; +#endif } dprintk("CHOOSE returns %d\n", outpos); @@ -610,6 +621,20 @@ static void crush_choose_indep(const struct crush_map *map, } for (ftotal = 0; left > 0 && ftotal < tries; ftotal++) { +#ifdef DEBUG_INDEP + if (out2 && ftotal) { + dprintk("%u %d a: ", ftotal, left); + for (rep = outpos; rep < endpos; rep++) { + dprintk(" %d", out[rep]); + } + dprintk("\n"); + dprintk("%u %d b: ", ftotal, left); + for (rep = outpos; rep < endpos; rep++) { + dprintk(" %d", out2[rep]); + } + dprintk("\n"); + } +#endif for (rep = outpos; rep < endpos; rep++) { if (out[rep] != CRUSH_ITEM_UNDEF) continue; @@ -726,6 +751,24 @@ static void crush_choose_indep(const struct crush_map *map, out2[rep] = CRUSH_ITEM_NONE; } } +#ifndef __KERNEL__ + if (map->choose_tries && ftotal <= map->choose_total_tries) + map->choose_tries[ftotal]++; +#endif +#ifdef DEBUG_INDEP + if (out2) { + dprintk("%u %d a: ", ftotal, left); + for (rep = outpos; rep < endpos; rep++) { + dprintk(" %d", out[rep]); + } + dprintk("\n"); + dprintk("%u %d b: ", ftotal, left); + for (rep = outpos; rep < endpos; rep++) { + dprintk(" %d", out2[rep]); + } + dprintk("\n"); + } +#endif } /** @@ -790,8 +833,15 @@ int crush_do_rule(const struct crush_map *map, switch (curstep->op) { case CRUSH_RULE_TAKE: - w[0] = curstep->arg1; - wsize = 1; + if ((curstep->arg1 >= 0 && + curstep->arg1 < map->max_devices) || + (-1-curstep->arg1 < map->max_buckets && + map->buckets[-1-curstep->arg1])) { + w[0] = curstep->arg1; + wsize = 1; + } else { + dprintk(" bad take value %d\n", curstep->arg1); + } break; case CRUSH_RULE_SET_CHOOSE_TRIES: @@ -877,7 +927,7 @@ int crush_do_rule(const struct crush_map *map, 0); } else { out_size = ((numrep < (result_max-osize)) ? - numrep : (result_max-osize)); + numrep : (result_max-osize)); crush_choose_indep( map, map->buckets[-1-w[i]], @@ -923,5 +973,3 @@ int crush_do_rule(const struct crush_map *map, } return result_len; } - - diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 073262fea6dd..1679f47280e2 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -278,7 +278,6 @@ static void _ceph_msgr_exit(void) ceph_msgr_slab_exit(); BUG_ON(zero_page == NULL); - kunmap(zero_page); page_cache_release(zero_page); zero_page = NULL; } @@ -1545,7 +1544,7 @@ static int write_partial_message_data(struct ceph_connection *con) page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, &last_piece); ret = ceph_tcp_sendpage(con->sock, page, page_offset, - length, last_piece); + length, !last_piece); if (ret <= 0) { if (do_datacrc) msg->footer.data_crc = cpu_to_le32(crc); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 2b3cf05e87b0..9d6ff1215928 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -298,21 +298,28 @@ void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) } EXPORT_SYMBOL(ceph_monc_request_next_osdmap); +/* + * Wait for an osdmap with a given epoch. + * + * @epoch: epoch to wait for + * @timeout: in jiffies, 0 means "wait forever" + */ int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch, unsigned long timeout) { unsigned long started = jiffies; - int ret; + long ret; mutex_lock(&monc->mutex); while (monc->have_osdmap < epoch) { mutex_unlock(&monc->mutex); - if (timeout != 0 && time_after_eq(jiffies, started + timeout)) + if (timeout && time_after_eq(jiffies, started + timeout)) return -ETIMEDOUT; ret = wait_event_interruptible_timeout(monc->client->auth_wq, - monc->have_osdmap >= epoch, timeout); + monc->have_osdmap >= epoch, + ceph_timeout_jiffies(timeout)); if (ret < 0) return ret; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index c4ec9239249a..50033677c0fa 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -296,6 +296,9 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req, case CEPH_OSD_OP_CMPXATTR: ceph_osd_data_release(&op->xattr.osd_data); break; + case CEPH_OSD_OP_STAT: + ceph_osd_data_release(&op->raw_data_in); + break; default: break; } @@ -450,7 +453,7 @@ __CEPH_FORALL_OSD_OPS(GENERATE_CASE) */ static struct ceph_osd_req_op * _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, - u16 opcode) + u16 opcode, u32 flags) { struct ceph_osd_req_op *op; @@ -460,14 +463,15 @@ _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, op = &osd_req->r_ops[which]; memset(op, 0, sizeof (*op)); op->op = opcode; + op->flags = flags; return op; } void osd_req_op_init(struct ceph_osd_request *osd_req, - unsigned int which, u16 opcode) + unsigned int which, u16 opcode, u32 flags) { - (void)_osd_req_op_init(osd_req, which, opcode); + (void)_osd_req_op_init(osd_req, which, opcode, flags); } EXPORT_SYMBOL(osd_req_op_init); @@ -476,7 +480,8 @@ void osd_req_op_extent_init(struct ceph_osd_request *osd_req, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, + opcode, 0); size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE && @@ -515,7 +520,8 @@ EXPORT_SYMBOL(osd_req_op_extent_update); void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, + opcode, 0); struct ceph_pagelist *pagelist; size_t payload_len = 0; size_t size; @@ -552,7 +558,8 @@ int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *name, const void *value, size_t size, u8 cmp_op, u8 cmp_mode) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, + opcode, 0); struct ceph_pagelist *pagelist; size_t payload_len; @@ -585,7 +592,8 @@ void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 cookie, u64 version, int flag) { - struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, + opcode, 0); BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); @@ -602,7 +610,8 @@ void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req, u64 expected_write_size) { struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, - CEPH_OSD_OP_SETALLOCHINT); + CEPH_OSD_OP_SETALLOCHINT, + 0); op->alloc_hint.expected_object_size = expected_object_size; op->alloc_hint.expected_write_size = expected_write_size; @@ -786,7 +795,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, } if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) { - osd_req_op_init(req, which, opcode); + osd_req_op_init(req, which, opcode, 0); } else { u32 object_size = le32_to_cpu(layout->fl_object_size); u32 object_base = off - objoff; @@ -1088,7 +1097,7 @@ static void __move_osd_to_lru(struct ceph_osd_client *osdc, BUG_ON(!list_empty(&osd->o_osd_lru)); list_add_tail(&osd->o_osd_lru, &osdc->osd_lru); - osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl * HZ; + osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl; } static void maybe_move_osd_to_lru(struct ceph_osd_client *osdc, @@ -1199,7 +1208,7 @@ static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o) static void __schedule_osd_timeout(struct ceph_osd_client *osdc) { schedule_delayed_work(&osdc->timeout_work, - osdc->client->options->osd_keepalive_timeout * HZ); + osdc->client->options->osd_keepalive_timeout); } static void __cancel_osd_timeout(struct ceph_osd_client *osdc) @@ -1567,10 +1576,9 @@ static void handle_timeout(struct work_struct *work) { struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, timeout_work.work); + struct ceph_options *opts = osdc->client->options; struct ceph_osd_request *req; struct ceph_osd *osd; - unsigned long keepalive = - osdc->client->options->osd_keepalive_timeout * HZ; struct list_head slow_osds; dout("timeout\n"); down_read(&osdc->map_sem); @@ -1586,7 +1594,8 @@ static void handle_timeout(struct work_struct *work) */ INIT_LIST_HEAD(&slow_osds); list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { - if (time_before(jiffies, req->r_stamp + keepalive)) + if (time_before(jiffies, + req->r_stamp + opts->osd_keepalive_timeout)) break; osd = req->r_osd; @@ -1613,8 +1622,7 @@ static void handle_osds_timeout(struct work_struct *work) struct ceph_osd_client *osdc = container_of(work, struct ceph_osd_client, osds_timeout_work.work); - unsigned long delay = - osdc->client->options->osd_idle_ttl * HZ >> 2; + unsigned long delay = osdc->client->options->osd_idle_ttl / 4; dout("osds timeout\n"); down_read(&osdc->map_sem); @@ -2619,7 +2627,7 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) osdc->event_count = 0; schedule_delayed_work(&osdc->osds_timeout_work, - round_jiffies_relative(osdc->client->options->osd_idle_ttl * HZ)); + round_jiffies_relative(osdc->client->options->osd_idle_ttl)); err = -ENOMEM; osdc->req_mempool = mempool_create_kmalloc_pool(10, diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 15796696d64e..4a3125836b64 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -89,7 +89,7 @@ static int crush_decode_tree_bucket(void **p, void *end, { int j; dout("crush_decode_tree_bucket %p to %p\n", *p, end); - ceph_decode_32_safe(p, end, b->num_nodes, bad); + ceph_decode_8_safe(p, end, b->num_nodes, bad); b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS); if (b->node_weights == NULL) return -ENOMEM; diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 096d91447e06..d4f5f220a8e5 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c @@ -51,10 +51,7 @@ void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) set_page_dirty_lock(pages[i]); put_page(pages[i]); } - if (is_vmalloc_addr(pages)) - vfree(pages); - else - kfree(pages); + kvfree(pages); } EXPORT_SYMBOL(ceph_put_page_vector); diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 476e5dda59e1..2a834c6179b9 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -129,7 +129,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_tags *key_tags; struct flow_dissector_key_keyid *key_keyid; - u8 ip_proto; + u8 ip_proto = 0; if (!data) { data = skb->data; diff --git a/net/core/sock.c b/net/core/sock.c index 1e1fe9a68d83..08f16db46070 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1454,7 +1454,7 @@ void sk_destruct(struct sock *sk) static void __sk_free(struct sock *sk) { - if (unlikely(sock_diag_has_destroy_listeners(sk))) + if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) sock_diag_broadcast_destroy(sk); else sk_destruct(sk); @@ -2269,7 +2269,6 @@ static void sock_def_write_space(struct sock *sk) static void sock_def_destruct(struct sock *sk) { - kfree(sk->sk_protinfo); } void sk_send_sigurg(struct sock *sk) diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 04ffad311704..0917123790ea 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -112,7 +112,7 @@ static int dsa_slave_open(struct net_device *dev) clear_promisc: if (dev->flags & IFF_PROMISC) - dev_set_promiscuity(master, 0); + dev_set_promiscuity(master, -1); clear_allmulti: if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(master, -1); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3bfccd83551c..c7358ea4ae93 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1045,7 +1045,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif)) goto nla_put_failure; if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) { - in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev); + in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) rtm->rtm_flags |= RTNH_F_DEAD; @@ -1074,7 +1074,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, rtnh->rtnh_flags = nh->nh_flags & 0xFF; if (nh->nh_flags & RTNH_F_LINKDOWN) { - in_dev = __in_dev_get_rcu(nh->nh_dev); + in_dev = __in_dev_get_rtnl(nh->nh_dev); if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) rtnh->rtnh_flags |= RTNH_F_DEAD; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index 65de0684e22a..61eafc9b4545 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -197,11 +197,4 @@ static int __init ipv4_netfilter_init(void) { return nf_register_afinfo(&nf_ip_afinfo); } - -static void __exit ipv4_netfilter_fini(void) -{ - nf_unregister_afinfo(&nf_ip_afinfo); -} - -module_init(ipv4_netfilter_init); -module_exit(ipv4_netfilter_fini); +subsys_initcall(ipv4_netfilter_init); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index b92d3f49c23e..9d37ccd95062 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -216,8 +216,8 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, - [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, - [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, }; static void fl_set_key_val(struct nlattr **tb, diff --git a/net/sctp/output.c b/net/sctp/output.c index fc5e45b8a832..abe7c2db2412 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -599,7 +599,9 @@ out: return err; no_route: kfree_skb(nskb); - IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); + + if (asoc) + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); /* FIXME: Returning the 'err' will effect all the associations * associated with a socket, although only one of the paths of the diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 5f6c4e61325b..1425ec2bbd5a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -2121,12 +2121,6 @@ static int sctp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (sp->subscribe.sctp_data_io_event) sctp_ulpevent_read_sndrcvinfo(event, msg); -#if 0 - /* FIXME: we should be calling IP/IPv6 layers. */ - if (sk->sk_protinfo.af_inet.cmsg_flags) - ip_cmsg_recv(msg, skb); -#endif - err = copied; /* If skb's length exceeds the user's buffer, update the skb and diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile index 936ad0a15371..b512fbd9d79a 100644 --- a/net/sunrpc/Makefile +++ b/net/sunrpc/Makefile @@ -14,6 +14,6 @@ sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ sunrpc_syms.o cache.o rpc_pipe.o \ svc_xprt.o sunrpc-$(CONFIG_SUNRPC_DEBUG) += debugfs.o -sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o bc_svc.o +sunrpc-$(CONFIG_SUNRPC_BACKCHANNEL) += backchannel_rqst.o sunrpc-$(CONFIG_PROC_FS) += stats.o sunrpc-$(CONFIG_SYSCTL) += sysctl.o diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 9dd0ea8db463..9825ff0f91d6 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -37,16 +37,18 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ static inline int xprt_need_to_requeue(struct rpc_xprt *xprt) { - return xprt->bc_alloc_count > 0; + return xprt->bc_alloc_count < atomic_read(&xprt->bc_free_slots); } static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n) { + atomic_add(n, &xprt->bc_free_slots); xprt->bc_alloc_count += n; } static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n) { + atomic_sub(n, &xprt->bc_free_slots); return xprt->bc_alloc_count -= n; } @@ -60,13 +62,62 @@ static void xprt_free_allocation(struct rpc_rqst *req) dprintk("RPC: free allocations for req= %p\n", req); WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); - xbufp = &req->rq_private_buf; + xbufp = &req->rq_rcv_buf; free_page((unsigned long)xbufp->head[0].iov_base); xbufp = &req->rq_snd_buf; free_page((unsigned long)xbufp->head[0].iov_base); kfree(req); } +static int xprt_alloc_xdr_buf(struct xdr_buf *buf, gfp_t gfp_flags) +{ + struct page *page; + /* Preallocate one XDR receive buffer */ + page = alloc_page(gfp_flags); + if (page == NULL) + return -ENOMEM; + buf->head[0].iov_base = page_address(page); + buf->head[0].iov_len = PAGE_SIZE; + buf->tail[0].iov_base = NULL; + buf->tail[0].iov_len = 0; + buf->page_len = 0; + buf->len = 0; + buf->buflen = PAGE_SIZE; + return 0; +} + +static +struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags) +{ + struct rpc_rqst *req; + + /* Pre-allocate one backchannel rpc_rqst */ + req = kzalloc(sizeof(*req), gfp_flags); + if (req == NULL) + return NULL; + + req->rq_xprt = xprt; + INIT_LIST_HEAD(&req->rq_list); + INIT_LIST_HEAD(&req->rq_bc_list); + + /* Preallocate one XDR receive buffer */ + if (xprt_alloc_xdr_buf(&req->rq_rcv_buf, gfp_flags) < 0) { + printk(KERN_ERR "Failed to create bc receive xbuf\n"); + goto out_free; + } + req->rq_rcv_buf.len = PAGE_SIZE; + + /* Preallocate one XDR send buffer */ + if (xprt_alloc_xdr_buf(&req->rq_snd_buf, gfp_flags) < 0) { + printk(KERN_ERR "Failed to create bc snd xbuf\n"); + goto out_free; + } + return req; +out_free: + xprt_free_allocation(req); + return NULL; +} + /* * Preallocate up to min_reqs structures and related buffers for use * by the backchannel. This function can be called multiple times @@ -87,9 +138,7 @@ static void xprt_free_allocation(struct rpc_rqst *req) */ int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) { - struct page *page_rcv = NULL, *page_snd = NULL; - struct xdr_buf *xbufp = NULL; - struct rpc_rqst *req, *tmp; + struct rpc_rqst *req; struct list_head tmp_list; int i; @@ -106,7 +155,7 @@ int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) INIT_LIST_HEAD(&tmp_list); for (i = 0; i < min_reqs; i++) { /* Pre-allocate one backchannel rpc_rqst */ - req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); + req = xprt_alloc_bc_req(xprt, GFP_KERNEL); if (req == NULL) { printk(KERN_ERR "Failed to create bc rpc_rqst\n"); goto out_free; @@ -115,41 +164,6 @@ int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs) /* Add the allocated buffer to the tmp list */ dprintk("RPC: adding req= %p\n", req); list_add(&req->rq_bc_pa_list, &tmp_list); - - req->rq_xprt = xprt; - INIT_LIST_HEAD(&req->rq_list); - INIT_LIST_HEAD(&req->rq_bc_list); - - /* Preallocate one XDR receive buffer */ - page_rcv = alloc_page(GFP_KERNEL); - if (page_rcv == NULL) { - printk(KERN_ERR "Failed to create bc receive xbuf\n"); - goto out_free; - } - xbufp = &req->rq_rcv_buf; - xbufp->head[0].iov_base = page_address(page_rcv); - xbufp->head[0].iov_len = PAGE_SIZE; - xbufp->tail[0].iov_base = NULL; - xbufp->tail[0].iov_len = 0; - xbufp->page_len = 0; - xbufp->len = PAGE_SIZE; - xbufp->buflen = PAGE_SIZE; - - /* Preallocate one XDR send buffer */ - page_snd = alloc_page(GFP_KERNEL); - if (page_snd == NULL) { - printk(KERN_ERR "Failed to create bc snd xbuf\n"); - goto out_free; - } - - xbufp = &req->rq_snd_buf; - xbufp->head[0].iov_base = page_address(page_snd); - xbufp->head[0].iov_len = 0; - xbufp->tail[0].iov_base = NULL; - xbufp->tail[0].iov_len = 0; - xbufp->page_len = 0; - xbufp->len = 0; - xbufp->buflen = PAGE_SIZE; } /* @@ -167,7 +181,10 @@ out_free: /* * Memory allocation failed, free the temporary list */ - list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) { + while (!list_empty(&tmp_list)) { + req = list_first_entry(&tmp_list, + struct rpc_rqst, + rq_bc_pa_list); list_del(&req->rq_bc_pa_list); xprt_free_allocation(req); } @@ -217,9 +234,15 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid) struct rpc_rqst *req = NULL; dprintk("RPC: allocate a backchannel request\n"); - if (list_empty(&xprt->bc_pa_list)) + if (atomic_read(&xprt->bc_free_slots) <= 0) goto not_found; - + if (list_empty(&xprt->bc_pa_list)) { + req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); + if (!req) + goto not_found; + /* Note: this 'free' request adds it to xprt->bc_pa_list */ + xprt_free_bc_request(req); + } req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, rq_bc_pa_list); req->rq_reply_bytes_recvd = 0; @@ -245,11 +268,21 @@ void xprt_free_bc_request(struct rpc_rqst *req) req->rq_connect_cookie = xprt->connect_cookie - 1; smp_mb__before_atomic(); - WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); smp_mb__after_atomic(); - if (!xprt_need_to_requeue(xprt)) { + /* + * Return it to the list of preallocations so that it + * may be reused by a new callback request. + */ + spin_lock_bh(&xprt->bc_pa_lock); + if (xprt_need_to_requeue(xprt)) { + list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); + xprt->bc_alloc_count++; + req = NULL; + } + spin_unlock_bh(&xprt->bc_pa_lock); + if (req != NULL) { /* * The last remaining session was destroyed while this * entry was in use. Free the entry and don't attempt @@ -260,14 +293,6 @@ void xprt_free_bc_request(struct rpc_rqst *req) xprt_free_allocation(req); return; } - - /* - * Return it to the list of preallocations so that it - * may be reused by a new callback request. - */ - spin_lock_bh(&xprt->bc_pa_lock); - list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); - spin_unlock_bh(&xprt->bc_pa_lock); } /* @@ -311,6 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) spin_lock(&xprt->bc_pa_lock); list_del(&req->rq_bc_pa_list); + xprt->bc_alloc_count--; spin_unlock(&xprt->bc_pa_lock); req->rq_private_buf.len = copied; diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c deleted file mode 100644 index 15c7a8a1c24f..000000000000 --- a/net/sunrpc/bc_svc.c +++ /dev/null @@ -1,63 +0,0 @@ -/****************************************************************************** - -(c) 2007 Network Appliance, Inc. All Rights Reserved. -(c) 2009 NetApp. All Rights Reserved. - -NetApp provides this source code under the GPL v2 License. -The GPL v2 license is available at -http://opensource.org/licenses/gpl-license.php. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -******************************************************************************/ - -/* - * The NFSv4.1 callback service helper routines. - * They implement the transport level processing required to send the - * reply over an existing open connection previously established by the client. - */ - -#include <linux/module.h> - -#include <linux/sunrpc/xprt.h> -#include <linux/sunrpc/sched.h> -#include <linux/sunrpc/bc_xprt.h> - -#define RPCDBG_FACILITY RPCDBG_SVCDSP - -/* Empty callback ops */ -static const struct rpc_call_ops nfs41_callback_ops = { -}; - - -/* - * Send the callback reply - */ -int bc_send(struct rpc_rqst *req) -{ - struct rpc_task *task; - int ret; - - dprintk("RPC: bc_send req= %p\n", req); - task = rpc_run_bc_task(req, &nfs41_callback_ops); - if (IS_ERR(task)) - ret = PTR_ERR(task); - else { - WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); - ret = task->tk_status; - rpc_put_task(task); - } - dprintk("RPC: bc_send ret= %d\n", ret); - return ret; -} - diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index e6ce1517367f..cbc6af923dd1 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -891,15 +891,8 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) task->tk_flags |= RPC_TASK_SOFT; if (clnt->cl_noretranstimeo) task->tk_flags |= RPC_TASK_NO_RETRANS_TIMEOUT; - if (sk_memalloc_socks()) { - struct rpc_xprt *xprt; - - rcu_read_lock(); - xprt = rcu_dereference(clnt->cl_xprt); - if (xprt->swapper) - task->tk_flags |= RPC_TASK_SWAPPER; - rcu_read_unlock(); - } + if (atomic_read(&clnt->cl_swapper)) + task->tk_flags |= RPC_TASK_SWAPPER; /* Add to the client's list of all tasks */ spin_lock(&clnt->cl_lock); list_add_tail(&task->tk_task, &clnt->cl_tasks); @@ -1031,15 +1024,14 @@ EXPORT_SYMBOL_GPL(rpc_call_async); * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run * rpc_execute against it * @req: RPC request - * @tk_ops: RPC call ops */ -struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, - const struct rpc_call_ops *tk_ops) +struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req) { struct rpc_task *task; struct xdr_buf *xbufp = &req->rq_snd_buf; struct rpc_task_setup task_setup_data = { - .callback_ops = tk_ops, + .callback_ops = &rpc_default_ops, + .flags = RPC_TASK_SOFTCONN, }; dprintk("RPC: rpc_run_bc_task req= %p\n", req); @@ -1614,6 +1606,7 @@ call_allocate(struct rpc_task *task) req->rq_callsize + req->rq_rcvsize); if (req->rq_buffer != NULL) return; + xprt_inject_disconnect(xprt); dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); @@ -1951,33 +1944,36 @@ call_bc_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; - if (!xprt_prepare_transmit(task)) { - /* - * Could not reserve the transport. Try again after the - * transport is released. - */ - task->tk_status = 0; - task->tk_action = call_bc_transmit; - return; - } + if (!xprt_prepare_transmit(task)) + goto out_retry; - task->tk_action = rpc_exit_task; if (task->tk_status < 0) { printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); - return; + goto out_done; } + if (req->rq_connect_cookie != req->rq_xprt->connect_cookie) + req->rq_bytes_sent = 0; xprt_transmit(task); + + if (task->tk_status == -EAGAIN) + goto out_nospace; + xprt_end_transmit(task); dprint_status(task); switch (task->tk_status) { case 0: /* Success */ - break; case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: + case -ECONNRESET: + case -ECONNREFUSED: + case -EADDRINUSE: + case -ENOTCONN: + case -EPIPE: + break; case -ETIMEDOUT: /* * Problem reaching the server. Disconnect and let the @@ -2002,6 +1998,13 @@ call_bc_transmit(struct rpc_task *task) break; } rpc_wake_up_queued_task(&req->rq_xprt->pending, task); +out_done: + task->tk_action = rpc_exit_task; + return; +out_nospace: + req->rq_connect_cookie = req->rq_xprt->connect_cookie; +out_retry: + task->tk_status = 0; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ @@ -2476,3 +2479,59 @@ void rpc_show_tasks(struct net *net) spin_unlock(&sn->rpc_client_lock); } #endif + +#if IS_ENABLED(CONFIG_SUNRPC_SWAP) +int +rpc_clnt_swap_activate(struct rpc_clnt *clnt) +{ + int ret = 0; + struct rpc_xprt *xprt; + + if (atomic_inc_return(&clnt->cl_swapper) == 1) { +retry: + rcu_read_lock(); + xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); + rcu_read_unlock(); + if (!xprt) { + /* + * If we didn't get a reference, then we likely are + * racing with a migration event. Wait for a grace + * period and try again. + */ + synchronize_rcu(); + goto retry; + } + + ret = xprt_enable_swap(xprt); + xprt_put(xprt); + } + return ret; +} +EXPORT_SYMBOL_GPL(rpc_clnt_swap_activate); + +void +rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) +{ + struct rpc_xprt *xprt; + + if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) { +retry: + rcu_read_lock(); + xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); + rcu_read_unlock(); + if (!xprt) { + /* + * If we didn't get a reference, then we likely are + * racing with a migration event. Wait for a grace + * period and try again. + */ + synchronize_rcu(); + goto retry; + } + + xprt_disable_swap(xprt); + xprt_put(xprt); + } +} +EXPORT_SYMBOL_GPL(rpc_clnt_swap_deactivate); +#endif /* CONFIG_SUNRPC_SWAP */ diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index 82962f7e6e88..e7b4d93566df 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c @@ -10,9 +10,12 @@ #include "netns.h" static struct dentry *topdir; +static struct dentry *rpc_fault_dir; static struct dentry *rpc_clnt_dir; static struct dentry *rpc_xprt_dir; +unsigned int rpc_inject_disconnect; + struct rpc_clnt_iter { struct rpc_clnt *clnt; loff_t pos; @@ -257,6 +260,8 @@ rpc_xprt_debugfs_register(struct rpc_xprt *xprt) debugfs_remove_recursive(xprt->debugfs); xprt->debugfs = NULL; } + + atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); } void @@ -266,11 +271,79 @@ rpc_xprt_debugfs_unregister(struct rpc_xprt *xprt) xprt->debugfs = NULL; } +static int +fault_open(struct inode *inode, struct file *filp) +{ + filp->private_data = kmalloc(128, GFP_KERNEL); + if (!filp->private_data) + return -ENOMEM; + return 0; +} + +static int +fault_release(struct inode *inode, struct file *filp) +{ + kfree(filp->private_data); + return 0; +} + +static ssize_t +fault_disconnect_read(struct file *filp, char __user *user_buf, + size_t len, loff_t *offset) +{ + char *buffer = (char *)filp->private_data; + size_t size; + + size = sprintf(buffer, "%u\n", rpc_inject_disconnect); + return simple_read_from_buffer(user_buf, len, offset, buffer, size); +} + +static ssize_t +fault_disconnect_write(struct file *filp, const char __user *user_buf, + size_t len, loff_t *offset) +{ + char buffer[16]; + + if (len >= sizeof(buffer)) + len = sizeof(buffer) - 1; + if (copy_from_user(buffer, user_buf, len)) + return -EFAULT; + buffer[len] = '\0'; + if (kstrtouint(buffer, 10, &rpc_inject_disconnect)) + return -EINVAL; + return len; +} + +static const struct file_operations fault_disconnect_fops = { + .owner = THIS_MODULE, + .open = fault_open, + .read = fault_disconnect_read, + .write = fault_disconnect_write, + .release = fault_release, +}; + +static struct dentry * +inject_fault_dir(struct dentry *topdir) +{ + struct dentry *faultdir; + + faultdir = debugfs_create_dir("inject_fault", topdir); + if (!faultdir) + return NULL; + + if (!debugfs_create_file("disconnect", S_IFREG | S_IRUSR, faultdir, + NULL, &fault_disconnect_fops)) + return NULL; + + return faultdir; +} + void __exit sunrpc_debugfs_exit(void) { debugfs_remove_recursive(topdir); topdir = NULL; + rpc_fault_dir = NULL; rpc_clnt_dir = NULL; rpc_xprt_dir = NULL; } @@ -282,6 +355,10 @@ sunrpc_debugfs_init(void) if (!topdir) return; + rpc_fault_dir = inject_fault_dir(topdir); + if (!rpc_fault_dir) + goto out_remove; + rpc_clnt_dir = debugfs_create_dir("rpc_clnt", topdir); if (!rpc_clnt_dir) goto out_remove; @@ -294,5 +371,6 @@ sunrpc_debugfs_init(void) out_remove: debugfs_remove_recursive(topdir); topdir = NULL; + rpc_fault_dir = NULL; rpc_clnt_dir = NULL; } diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 852ae606b02a..5a16d8d8c831 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1350,6 +1350,11 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, { struct kvec *argv = &rqstp->rq_arg.head[0]; struct kvec *resv = &rqstp->rq_res.head[0]; + struct rpc_task *task; + int proc_error; + int error; + + dprintk("svc: %s(%p)\n", __func__, req); /* Build the svc_rqst used by the common processing routine */ rqstp->rq_xprt = serv->sv_bc_xprt; @@ -1372,21 +1377,36 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req, /* * Skip the next two words because they've already been - * processed in the trasport + * processed in the transport */ svc_getu32(argv); /* XID */ svc_getnl(argv); /* CALLDIR */ - /* Returns 1 for send, 0 for drop */ - if (svc_process_common(rqstp, argv, resv)) { - memcpy(&req->rq_snd_buf, &rqstp->rq_res, - sizeof(req->rq_snd_buf)); - return bc_send(req); - } else { - /* drop request */ + /* Parse and execute the bc call */ + proc_error = svc_process_common(rqstp, argv, resv); + + atomic_inc(&req->rq_xprt->bc_free_slots); + if (!proc_error) { + /* Processing error: drop the request */ xprt_free_bc_request(req); return 0; } + + /* Finally, send the reply synchronously */ + memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); + task = rpc_run_bc_task(req); + if (IS_ERR(task)) { + error = PTR_ERR(task); + goto out; + } + + WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); + error = task->tk_status; + rpc_put_task(task); + +out: + dprintk("svc: %s(), error=%d\n", __func__, error); + return error; } EXPORT_SYMBOL_GPL(bc_svc_process); #endif /* CONFIG_SUNRPC_BACKCHANNEL */ diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 1d4fe24af06a..ab5dd621ae0c 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -68,6 +68,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net); static void xprt_request_init(struct rpc_task *, struct rpc_xprt *); static void xprt_connect_status(struct rpc_task *task); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); +static void __xprt_put_cong(struct rpc_xprt *, struct rpc_rqst *); static void xprt_destroy(struct rpc_xprt *xprt); static DEFINE_SPINLOCK(xprt_list_lock); @@ -250,6 +251,8 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task) } xprt_clear_locked(xprt); out_sleep: + if (req) + __xprt_put_cong(xprt, req); dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt); task->tk_timeout = 0; task->tk_status = -EAGAIN; @@ -608,8 +611,8 @@ static void xprt_autoclose(struct work_struct *work) struct rpc_xprt *xprt = container_of(work, struct rpc_xprt, task_cleanup); - xprt->ops->close(xprt); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); + xprt->ops->close(xprt); xprt_release_write(xprt, NULL); } @@ -967,6 +970,7 @@ void xprt_transmit(struct rpc_task *task) task->tk_status = status; return; } + xprt_inject_disconnect(xprt); dprintk("RPC: %5u xmit complete\n", task->tk_pid); task->tk_flags |= RPC_TASK_SENT; @@ -1285,6 +1289,7 @@ void xprt_release(struct rpc_task *task) spin_unlock_bh(&xprt->transport_lock); if (req->rq_buffer) xprt->ops->buf_free(req->rq_buffer); + xprt_inject_disconnect(xprt); if (req->rq_cred != NULL) put_rpccred(req->rq_cred); task->tk_rqstp = NULL; diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 302d4ebf6fbf..f1e8dafbd507 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -11,6 +11,21 @@ * can take tens of usecs to complete. */ +/* Normal operation + * + * A Memory Region is prepared for RDMA READ or WRITE using the + * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is + * finished, the Memory Region is unmapped using the ib_unmap_fmr + * verb (fmr_op_unmap). + */ + +/* Transport recovery + * + * After a transport reconnect, fmr_op_map re-uses the MR already + * allocated for the RPC, but generates a fresh rkey then maps the + * MR again. This process is synchronous. + */ + #include "xprt_rdma.h" #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) @@ -50,19 +65,28 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt) struct rpcrdma_mw *r; int i, rc; + spin_lock_init(&buf->rb_mwlock); INIT_LIST_HEAD(&buf->rb_mws); INIT_LIST_HEAD(&buf->rb_all); - i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; - dprintk("RPC: %s: initializing %d FMRs\n", __func__, i); + i = max_t(int, RPCRDMA_MAX_DATA_SEGS / RPCRDMA_MAX_FMR_SGES, 1); + i += 2; /* head + tail */ + i *= buf->rb_max_requests; /* one set for each RPC slot */ + dprintk("RPC: %s: initalizing %d FMRs\n", __func__, i); + rc = -ENOMEM; while (i--) { r = kzalloc(sizeof(*r), GFP_KERNEL); if (!r) - return -ENOMEM; + goto out; - r->r.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr); - if (IS_ERR(r->r.fmr)) + r->r.fmr.physaddrs = kmalloc(RPCRDMA_MAX_FMR_SGES * + sizeof(u64), GFP_KERNEL); + if (!r->r.fmr.physaddrs) + goto out_free; + + r->r.fmr.fmr = ib_alloc_fmr(pd, mr_access_flags, &fmr_attr); + if (IS_ERR(r->r.fmr.fmr)) goto out_fmr_err; list_add(&r->mw_list, &buf->rb_mws); @@ -71,12 +95,24 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt) return 0; out_fmr_err: - rc = PTR_ERR(r->r.fmr); + rc = PTR_ERR(r->r.fmr.fmr); dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__, rc); + kfree(r->r.fmr.physaddrs); +out_free: kfree(r); +out: return rc; } +static int +__fmr_unmap(struct rpcrdma_mw *r) +{ + LIST_HEAD(l); + + list_add(&r->r.fmr.fmr->list, &l); + return ib_unmap_fmr(&l); +} + /* Use the ib_map_phys_fmr() verb to register a memory region * for remote access via RDMA READ or RDMA WRITE. */ @@ -85,12 +121,24 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, int nsegs, bool writing) { struct rpcrdma_ia *ia = &r_xprt->rx_ia; - struct ib_device *device = ia->ri_id->device; + struct ib_device *device = ia->ri_device; enum dma_data_direction direction = rpcrdma_data_dir(writing); struct rpcrdma_mr_seg *seg1 = seg; - struct rpcrdma_mw *mw = seg1->rl_mw; - u64 physaddrs[RPCRDMA_MAX_DATA_SEGS]; int len, pageoff, i, rc; + struct rpcrdma_mw *mw; + + mw = seg1->rl_mw; + seg1->rl_mw = NULL; + if (!mw) { + mw = rpcrdma_get_mw(r_xprt); + if (!mw) + return -ENOMEM; + } else { + /* this is a retransmit; generate a fresh rkey */ + rc = __fmr_unmap(mw); + if (rc) + return rc; + } pageoff = offset_in_page(seg1->mr_offset); seg1->mr_offset -= pageoff; /* start of page */ @@ -100,7 +148,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, nsegs = RPCRDMA_MAX_FMR_SGES; for (i = 0; i < nsegs;) { rpcrdma_map_one(device, seg, direction); - physaddrs[i] = seg->mr_dma; + mw->r.fmr.physaddrs[i] = seg->mr_dma; len += seg->mr_len; ++seg; ++i; @@ -110,11 +158,13 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, break; } - rc = ib_map_phys_fmr(mw->r.fmr, physaddrs, i, seg1->mr_dma); + rc = ib_map_phys_fmr(mw->r.fmr.fmr, mw->r.fmr.physaddrs, + i, seg1->mr_dma); if (rc) goto out_maperr; - seg1->mr_rkey = mw->r.fmr->rkey; + seg1->rl_mw = mw; + seg1->mr_rkey = mw->r.fmr.fmr->rkey; seg1->mr_base = seg1->mr_dma + pageoff; seg1->mr_nsegs = i; seg1->mr_len = len; @@ -137,48 +187,28 @@ fmr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) { struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_mr_seg *seg1 = seg; - struct ib_device *device; + struct rpcrdma_mw *mw = seg1->rl_mw; int rc, nsegs = seg->mr_nsegs; - LIST_HEAD(l); - list_add(&seg1->rl_mw->r.fmr->list, &l); - rc = ib_unmap_fmr(&l); - read_lock(&ia->ri_qplock); - device = ia->ri_id->device; + dprintk("RPC: %s: FMR %p\n", __func__, mw); + + seg1->rl_mw = NULL; while (seg1->mr_nsegs--) - rpcrdma_unmap_one(device, seg++); - read_unlock(&ia->ri_qplock); + rpcrdma_unmap_one(ia->ri_device, seg++); + rc = __fmr_unmap(mw); if (rc) goto out_err; + rpcrdma_put_mw(r_xprt, mw); return nsegs; out_err: + /* The FMR is abandoned, but remains in rb_all. fmr_op_destroy + * will attempt to release it when the transport is destroyed. + */ dprintk("RPC: %s: ib_unmap_fmr status %i\n", __func__, rc); return nsegs; } -/* After a disconnect, unmap all FMRs. - * - * This is invoked only in the transport connect worker in order - * to serialize with rpcrdma_register_fmr_external(). - */ -static void -fmr_op_reset(struct rpcrdma_xprt *r_xprt) -{ - struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - struct rpcrdma_mw *r; - LIST_HEAD(list); - int rc; - - list_for_each_entry(r, &buf->rb_all, mw_all) - list_add(&r->r.fmr->list, &list); - - rc = ib_unmap_fmr(&list); - if (rc) - dprintk("RPC: %s: ib_unmap_fmr failed %i\n", - __func__, rc); -} - static void fmr_op_destroy(struct rpcrdma_buffer *buf) { @@ -188,10 +218,13 @@ fmr_op_destroy(struct rpcrdma_buffer *buf) while (!list_empty(&buf->rb_all)) { r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); list_del(&r->mw_all); - rc = ib_dealloc_fmr(r->r.fmr); + kfree(r->r.fmr.physaddrs); + + rc = ib_dealloc_fmr(r->r.fmr.fmr); if (rc) dprintk("RPC: %s: ib_dealloc_fmr failed %i\n", __func__, rc); + kfree(r); } } @@ -202,7 +235,6 @@ const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops = { .ro_open = fmr_op_open, .ro_maxpages = fmr_op_maxpages, .ro_init = fmr_op_init, - .ro_reset = fmr_op_reset, .ro_destroy = fmr_op_destroy, .ro_displayname = "fmr", }; diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index d234521320a4..04ea914201b2 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -11,12 +11,136 @@ * but most complex memory registration mode. */ +/* Normal operation + * + * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG + * Work Request (frmr_op_map). When the RDMA operation is finished, this + * Memory Region is invalidated using a LOCAL_INV Work Request + * (frmr_op_unmap). + * + * Typically these Work Requests are not signaled, and neither are RDMA + * SEND Work Requests (with the exception of signaling occasionally to + * prevent provider work queue overflows). This greatly reduces HCA + * interrupt workload. + * + * As an optimization, frwr_op_unmap marks MRs INVALID before the + * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on + * rb_mws immediately so that no work (like managing a linked list + * under a spinlock) is needed in the completion upcall. + * + * But this means that frwr_op_map() can occasionally encounter an MR + * that is INVALID but the LOCAL_INV WR has not completed. Work Queue + * ordering prevents a subsequent FAST_REG WR from executing against + * that MR while it is still being invalidated. + */ + +/* Transport recovery + * + * ->op_map and the transport connect worker cannot run at the same + * time, but ->op_unmap can fire while the transport connect worker + * is running. Thus MR recovery is handled in ->op_map, to guarantee + * that recovered MRs are owned by a sending RPC, and not one where + * ->op_unmap could fire at the same time transport reconnect is + * being done. + * + * When the underlying transport disconnects, MRs are left in one of + * three states: + * + * INVALID: The MR was not in use before the QP entered ERROR state. + * (Or, the LOCAL_INV WR has not completed or flushed yet). + * + * STALE: The MR was being registered or unregistered when the QP + * entered ERROR state, and the pending WR was flushed. + * + * VALID: The MR was registered before the QP entered ERROR state. + * + * When frwr_op_map encounters STALE and VALID MRs, they are recovered + * with ib_dereg_mr and then are re-initialized. Beause MR recovery + * allocates fresh resources, it is deferred to a workqueue, and the + * recovered MRs are placed back on the rb_mws list when recovery is + * complete. frwr_op_map allocates another MR for the current RPC while + * the broken MR is reset. + * + * To ensure that frwr_op_map doesn't encounter an MR that is marked + * INVALID but that is about to be flushed due to a previous transport + * disconnect, the transport connect worker attempts to drain all + * pending send queue WRs before the transport is reconnected. + */ + #include "xprt_rdma.h" #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) # define RPCDBG_FACILITY RPCDBG_TRANS #endif +static struct workqueue_struct *frwr_recovery_wq; + +#define FRWR_RECOVERY_WQ_FLAGS (WQ_UNBOUND | WQ_MEM_RECLAIM) + +int +frwr_alloc_recovery_wq(void) +{ + frwr_recovery_wq = alloc_workqueue("frwr_recovery", + FRWR_RECOVERY_WQ_FLAGS, 0); + return !frwr_recovery_wq ? -ENOMEM : 0; +} + +void +frwr_destroy_recovery_wq(void) +{ + struct workqueue_struct *wq; + + if (!frwr_recovery_wq) + return; + + wq = frwr_recovery_wq; + frwr_recovery_wq = NULL; + destroy_workqueue(wq); +} + +/* Deferred reset of a single FRMR. Generate a fresh rkey by + * replacing the MR. + * + * There's no recovery if this fails. The FRMR is abandoned, but + * remains in rb_all. It will be cleaned up when the transport is + * destroyed. + */ +static void +__frwr_recovery_worker(struct work_struct *work) +{ + struct rpcrdma_mw *r = container_of(work, struct rpcrdma_mw, + r.frmr.fr_work); + struct rpcrdma_xprt *r_xprt = r->r.frmr.fr_xprt; + unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; + struct ib_pd *pd = r_xprt->rx_ia.ri_pd; + + if (ib_dereg_mr(r->r.frmr.fr_mr)) + goto out_fail; + + r->r.frmr.fr_mr = ib_alloc_fast_reg_mr(pd, depth); + if (IS_ERR(r->r.frmr.fr_mr)) + goto out_fail; + + dprintk("RPC: %s: recovered FRMR %p\n", __func__, r); + r->r.frmr.fr_state = FRMR_IS_INVALID; + rpcrdma_put_mw(r_xprt, r); + return; + +out_fail: + pr_warn("RPC: %s: FRMR %p unrecovered\n", + __func__, r); +} + +/* A broken MR was discovered in a context that can't sleep. + * Defer recovery to the recovery worker. + */ +static void +__frwr_queue_recovery(struct rpcrdma_mw *r) +{ + INIT_WORK(&r->r.frmr.fr_work, __frwr_recovery_worker); + queue_work(frwr_recovery_wq, &r->r.frmr.fr_work); +} + static int __frwr_init(struct rpcrdma_mw *r, struct ib_pd *pd, struct ib_device *device, unsigned int depth) @@ -128,7 +252,7 @@ frwr_sendcompletion(struct ib_wc *wc) /* WARNING: Only wr_id and status are reliable at this point */ r = (struct rpcrdma_mw *)(unsigned long)wc->wr_id; - dprintk("RPC: %s: frmr %p (stale), status %s (%d)\n", + pr_warn("RPC: %s: frmr %p flushed, status %s (%d)\n", __func__, r, ib_wc_status_msg(wc->status), wc->status); r->r.frmr.fr_state = FRMR_IS_STALE; } @@ -137,16 +261,19 @@ static int frwr_op_init(struct rpcrdma_xprt *r_xprt) { struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - struct ib_device *device = r_xprt->rx_ia.ri_id->device; + struct ib_device *device = r_xprt->rx_ia.ri_device; unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; struct ib_pd *pd = r_xprt->rx_ia.ri_pd; int i; + spin_lock_init(&buf->rb_mwlock); INIT_LIST_HEAD(&buf->rb_mws); INIT_LIST_HEAD(&buf->rb_all); - i = (buf->rb_max_requests + 1) * RPCRDMA_MAX_SEGS; - dprintk("RPC: %s: initializing %d FRMRs\n", __func__, i); + i = max_t(int, RPCRDMA_MAX_DATA_SEGS / depth, 1); + i += 2; /* head + tail */ + i *= buf->rb_max_requests; /* one set for each RPC slot */ + dprintk("RPC: %s: initalizing %d FRMRs\n", __func__, i); while (i--) { struct rpcrdma_mw *r; @@ -165,6 +292,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt) list_add(&r->mw_list, &buf->rb_mws); list_add(&r->mw_all, &buf->rb_all); r->mw_sendcompletion = frwr_sendcompletion; + r->r.frmr.fr_xprt = r_xprt; } return 0; @@ -178,12 +306,12 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, int nsegs, bool writing) { struct rpcrdma_ia *ia = &r_xprt->rx_ia; - struct ib_device *device = ia->ri_id->device; + struct ib_device *device = ia->ri_device; enum dma_data_direction direction = rpcrdma_data_dir(writing); struct rpcrdma_mr_seg *seg1 = seg; - struct rpcrdma_mw *mw = seg1->rl_mw; - struct rpcrdma_frmr *frmr = &mw->r.frmr; - struct ib_mr *mr = frmr->fr_mr; + struct rpcrdma_mw *mw; + struct rpcrdma_frmr *frmr; + struct ib_mr *mr; struct ib_send_wr fastreg_wr, *bad_wr; u8 key; int len, pageoff; @@ -192,12 +320,25 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, u64 pa; int page_no; + mw = seg1->rl_mw; + seg1->rl_mw = NULL; + do { + if (mw) + __frwr_queue_recovery(mw); + mw = rpcrdma_get_mw(r_xprt); + if (!mw) + return -ENOMEM; + } while (mw->r.frmr.fr_state != FRMR_IS_INVALID); + frmr = &mw->r.frmr; + frmr->fr_state = FRMR_IS_VALID; + pageoff = offset_in_page(seg1->mr_offset); seg1->mr_offset -= pageoff; /* start of page */ seg1->mr_len += pageoff; len = -pageoff; if (nsegs > ia->ri_max_frmr_depth) nsegs = ia->ri_max_frmr_depth; + for (page_no = i = 0; i < nsegs;) { rpcrdma_map_one(device, seg, direction); pa = seg->mr_dma; @@ -216,8 +357,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, dprintk("RPC: %s: Using frmr %p to map %d segments (%d bytes)\n", __func__, mw, i, len); - frmr->fr_state = FRMR_IS_VALID; - memset(&fastreg_wr, 0, sizeof(fastreg_wr)); fastreg_wr.wr_id = (unsigned long)(void *)mw; fastreg_wr.opcode = IB_WR_FAST_REG_MR; @@ -229,6 +368,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, fastreg_wr.wr.fast_reg.access_flags = writing ? IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : IB_ACCESS_REMOTE_READ; + mr = frmr->fr_mr; key = (u8)(mr->rkey & 0x000000FF); ib_update_fast_reg_key(mr, ++key); fastreg_wr.wr.fast_reg.rkey = mr->rkey; @@ -238,6 +378,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, if (rc) goto out_senderr; + seg1->rl_mw = mw; seg1->mr_rkey = mr->rkey; seg1->mr_base = seg1->mr_dma + pageoff; seg1->mr_nsegs = i; @@ -246,10 +387,9 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, out_senderr: dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); - ib_update_fast_reg_key(mr, --key); - frmr->fr_state = FRMR_IS_INVALID; while (i--) rpcrdma_unmap_one(device, --seg); + __frwr_queue_recovery(mw); return rc; } @@ -261,78 +401,46 @@ frwr_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) { struct rpcrdma_mr_seg *seg1 = seg; struct rpcrdma_ia *ia = &r_xprt->rx_ia; + struct rpcrdma_mw *mw = seg1->rl_mw; struct ib_send_wr invalidate_wr, *bad_wr; int rc, nsegs = seg->mr_nsegs; - struct ib_device *device; - seg1->rl_mw->r.frmr.fr_state = FRMR_IS_INVALID; + dprintk("RPC: %s: FRMR %p\n", __func__, mw); + + seg1->rl_mw = NULL; + mw->r.frmr.fr_state = FRMR_IS_INVALID; memset(&invalidate_wr, 0, sizeof(invalidate_wr)); - invalidate_wr.wr_id = (unsigned long)(void *)seg1->rl_mw; + invalidate_wr.wr_id = (unsigned long)(void *)mw; invalidate_wr.opcode = IB_WR_LOCAL_INV; - invalidate_wr.ex.invalidate_rkey = seg1->rl_mw->r.frmr.fr_mr->rkey; + invalidate_wr.ex.invalidate_rkey = mw->r.frmr.fr_mr->rkey; DECR_CQCOUNT(&r_xprt->rx_ep); - read_lock(&ia->ri_qplock); - device = ia->ri_id->device; while (seg1->mr_nsegs--) - rpcrdma_unmap_one(device, seg++); + rpcrdma_unmap_one(ia->ri_device, seg++); + read_lock(&ia->ri_qplock); rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); read_unlock(&ia->ri_qplock); if (rc) goto out_err; + + rpcrdma_put_mw(r_xprt, mw); return nsegs; out_err: - /* Force rpcrdma_buffer_get() to retry */ - seg1->rl_mw->r.frmr.fr_state = FRMR_IS_STALE; dprintk("RPC: %s: ib_post_send status %i\n", __func__, rc); + __frwr_queue_recovery(mw); return nsegs; } -/* After a disconnect, a flushed FAST_REG_MR can leave an FRMR in - * an unusable state. Find FRMRs in this state and dereg / reg - * each. FRMRs that are VALID and attached to an rpcrdma_req are - * also torn down. - * - * This gives all in-use FRMRs a fresh rkey and leaves them INVALID. - * - * This is invoked only in the transport connect worker in order - * to serialize with rpcrdma_register_frmr_external(). - */ -static void -frwr_op_reset(struct rpcrdma_xprt *r_xprt) -{ - struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - struct ib_device *device = r_xprt->rx_ia.ri_id->device; - unsigned int depth = r_xprt->rx_ia.ri_max_frmr_depth; - struct ib_pd *pd = r_xprt->rx_ia.ri_pd; - struct rpcrdma_mw *r; - int rc; - - list_for_each_entry(r, &buf->rb_all, mw_all) { - if (r->r.frmr.fr_state == FRMR_IS_INVALID) - continue; - - __frwr_release(r); - rc = __frwr_init(r, pd, device, depth); - if (rc) { - dprintk("RPC: %s: mw %p left %s\n", - __func__, r, - (r->r.frmr.fr_state == FRMR_IS_STALE ? - "stale" : "valid")); - continue; - } - - r->r.frmr.fr_state = FRMR_IS_INVALID; - } -} - static void frwr_op_destroy(struct rpcrdma_buffer *buf) { struct rpcrdma_mw *r; + /* Ensure stale MWs for "buf" are no longer in flight */ + flush_workqueue(frwr_recovery_wq); + while (!list_empty(&buf->rb_all)) { r = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); list_del(&r->mw_all); @@ -347,7 +455,6 @@ const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = { .ro_open = frwr_op_open, .ro_maxpages = frwr_op_maxpages, .ro_init = frwr_op_init, - .ro_reset = frwr_op_reset, .ro_destroy = frwr_op_destroy, .ro_displayname = "frwr", }; diff --git a/net/sunrpc/xprtrdma/physical_ops.c b/net/sunrpc/xprtrdma/physical_ops.c index ba518af16787..41985d07fdb7 100644 --- a/net/sunrpc/xprtrdma/physical_ops.c +++ b/net/sunrpc/xprtrdma/physical_ops.c @@ -50,8 +50,7 @@ physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg, { struct rpcrdma_ia *ia = &r_xprt->rx_ia; - rpcrdma_map_one(ia->ri_id->device, seg, - rpcrdma_data_dir(writing)); + rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing)); seg->mr_rkey = ia->ri_bind_mem->rkey; seg->mr_base = seg->mr_dma; seg->mr_nsegs = 1; @@ -65,19 +64,11 @@ physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg) { struct rpcrdma_ia *ia = &r_xprt->rx_ia; - read_lock(&ia->ri_qplock); - rpcrdma_unmap_one(ia->ri_id->device, seg); - read_unlock(&ia->ri_qplock); - + rpcrdma_unmap_one(ia->ri_device, seg); return 1; } static void -physical_op_reset(struct rpcrdma_xprt *r_xprt) -{ -} - -static void physical_op_destroy(struct rpcrdma_buffer *buf) { } @@ -88,7 +79,6 @@ const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = { .ro_open = physical_op_open, .ro_maxpages = physical_op_maxpages, .ro_init = physical_op_init, - .ro_reset = physical_op_reset, .ro_destroy = physical_op_destroy, .ro_displayname = "physical", }; diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 2c53ea9e1b83..84ea37daef36 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c @@ -284,9 +284,6 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target, return (unsigned char *)iptr - (unsigned char *)headerp; out: - if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR) - return n; - for (pos = 0; nchunks--;) pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt, &req->rl_segments[pos]); @@ -732,8 +729,8 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) struct rpcrdma_msg *headerp; struct rpcrdma_req *req; struct rpc_rqst *rqst; - struct rpc_xprt *xprt = rep->rr_xprt; - struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); + struct rpcrdma_xprt *r_xprt = rep->rr_rxprt; + struct rpc_xprt *xprt = &r_xprt->rx_xprt; __be32 *iptr; int rdmalen, status; unsigned long cwnd; @@ -770,7 +767,6 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep) rep->rr_len); repost: r_xprt->rx_stats.bad_reply_count++; - rep->rr_func = rpcrdma_reply_handler; if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep)) rpcrdma_recv_buffer_put(rep); diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 436da2caec95..680f888a9ddd 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -240,6 +240,16 @@ xprt_rdma_connect_worker(struct work_struct *work) xprt_clear_connecting(xprt); } +static void +xprt_rdma_inject_disconnect(struct rpc_xprt *xprt) +{ + struct rpcrdma_xprt *r_xprt = container_of(xprt, struct rpcrdma_xprt, + rx_xprt); + + pr_info("rpcrdma: injecting transport disconnect on xprt=%p\n", xprt); + rdma_disconnect(r_xprt->rx_ia.ri_id); +} + /* * xprt_rdma_destroy * @@ -612,12 +622,6 @@ xprt_rdma_send_request(struct rpc_task *task) if (req->rl_reply == NULL) /* e.g. reconnection */ rpcrdma_recv_buffer_get(req); - if (req->rl_reply) { - req->rl_reply->rr_func = rpcrdma_reply_handler; - /* this need only be done once, but... */ - req->rl_reply->rr_xprt = xprt; - } - /* Must suppress retransmit to maintain credits */ if (req->rl_connect_cookie == xprt->connect_cookie) goto drop_connection; @@ -676,6 +680,17 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) r_xprt->rx_stats.bad_reply_count); } +static int +xprt_rdma_enable_swap(struct rpc_xprt *xprt) +{ + return -EINVAL; +} + +static void +xprt_rdma_disable_swap(struct rpc_xprt *xprt) +{ +} + /* * Plumbing for rpc transport switch and kernel module */ @@ -694,7 +709,10 @@ static struct rpc_xprt_ops xprt_rdma_procs = { .send_request = xprt_rdma_send_request, .close = xprt_rdma_close, .destroy = xprt_rdma_destroy, - .print_stats = xprt_rdma_print_stats + .print_stats = xprt_rdma_print_stats, + .enable_swap = xprt_rdma_enable_swap, + .disable_swap = xprt_rdma_disable_swap, + .inject_disconnect = xprt_rdma_inject_disconnect }; static struct xprt_class xprt_rdma = { @@ -720,17 +738,24 @@ void xprt_rdma_cleanup(void) if (rc) dprintk("RPC: %s: xprt_unregister returned %i\n", __func__, rc); + + frwr_destroy_recovery_wq(); } int xprt_rdma_init(void) { int rc; - rc = xprt_register_transport(&xprt_rdma); - + rc = frwr_alloc_recovery_wq(); if (rc) return rc; + rc = xprt_register_transport(&xprt_rdma); + if (rc) { + frwr_destroy_recovery_wq(); + return rc; + } + dprintk("RPCRDMA Module Init, register RPC RDMA transport\n"); dprintk("Defaults:\n"); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 52df265b472a..891c4ede2c20 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -80,7 +80,6 @@ static void rpcrdma_run_tasklet(unsigned long data) { struct rpcrdma_rep *rep; - void (*func)(struct rpcrdma_rep *); unsigned long flags; data = data; @@ -89,14 +88,9 @@ rpcrdma_run_tasklet(unsigned long data) rep = list_entry(rpcrdma_tasklets_g.next, struct rpcrdma_rep, rr_list); list_del(&rep->rr_list); - func = rep->rr_func; - rep->rr_func = NULL; spin_unlock_irqrestore(&rpcrdma_tk_lock_g, flags); - if (func) - func(rep); - else - rpcrdma_recv_buffer_put(rep); + rpcrdma_reply_handler(rep); spin_lock_irqsave(&rpcrdma_tk_lock_g, flags); } @@ -236,7 +230,7 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list) __func__, rep, wc->byte_len); rep->rr_len = wc->byte_len; - ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device, + ib_dma_sync_single_for_cpu(rep->rr_device, rdmab_addr(rep->rr_rdmabuf), rep->rr_len, DMA_FROM_DEVICE); prefetch(rdmab_to_msg(rep->rr_rdmabuf)); @@ -407,7 +401,7 @@ connected: pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n", sap, rpc_get_port(sap), - ia->ri_id->device->name, + ia->ri_device->name, ia->ri_ops->ro_displayname, xprt->rx_buf.rb_max_requests, ird, ird < 4 && ird < tird / 2 ? " (low!)" : ""); @@ -508,8 +502,9 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) rc = PTR_ERR(ia->ri_id); goto out1; } + ia->ri_device = ia->ri_id->device; - ia->ri_pd = ib_alloc_pd(ia->ri_id->device); + ia->ri_pd = ib_alloc_pd(ia->ri_device); if (IS_ERR(ia->ri_pd)) { rc = PTR_ERR(ia->ri_pd); dprintk("RPC: %s: ib_alloc_pd() failed %i\n", @@ -517,7 +512,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) goto out2; } - rc = ib_query_device(ia->ri_id->device, devattr); + rc = ib_query_device(ia->ri_device, devattr); if (rc) { dprintk("RPC: %s: ib_query_device failed %d\n", __func__, rc); @@ -526,7 +521,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) if (devattr->device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) { ia->ri_have_dma_lkey = 1; - ia->ri_dma_lkey = ia->ri_id->device->local_dma_lkey; + ia->ri_dma_lkey = ia->ri_device->local_dma_lkey; } if (memreg == RPCRDMA_FRMR) { @@ -541,7 +536,7 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) } } if (memreg == RPCRDMA_MTHCAFMR) { - if (!ia->ri_id->device->alloc_fmr) { + if (!ia->ri_device->alloc_fmr) { dprintk("RPC: %s: MTHCAFMR registration " "not supported by HCA\n", __func__); memreg = RPCRDMA_ALLPHYSICAL; @@ -590,9 +585,6 @@ rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) dprintk("RPC: %s: memory registration strategy is '%s'\n", __func__, ia->ri_ops->ro_displayname); - /* Else will do memory reg/dereg for each chunk */ - ia->ri_memreg_strategy = memreg; - rwlock_init(&ia->ri_qplock); return 0; @@ -622,17 +614,17 @@ rpcrdma_ia_close(struct rpcrdma_ia *ia) dprintk("RPC: %s: ib_dereg_mr returned %i\n", __func__, rc); } + if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { if (ia->ri_id->qp) rdma_destroy_qp(ia->ri_id); rdma_destroy_id(ia->ri_id); ia->ri_id = NULL; } - if (ia->ri_pd != NULL && !IS_ERR(ia->ri_pd)) { - rc = ib_dealloc_pd(ia->ri_pd); - dprintk("RPC: %s: ib_dealloc_pd returned %i\n", - __func__, rc); - } + + /* If the pd is still busy, xprtrdma missed freeing a resource */ + if (ia->ri_pd && !IS_ERR(ia->ri_pd)) + WARN_ON(ib_dealloc_pd(ia->ri_pd)); } /* @@ -693,8 +685,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); cq_attr.cqe = ep->rep_attr.cap.max_send_wr + 1; - sendcq = ib_create_cq(ia->ri_id->device, rpcrdma_sendcq_upcall, - rpcrdma_cq_async_error_upcall, ep, &cq_attr); + sendcq = ib_create_cq(ia->ri_device, rpcrdma_sendcq_upcall, + rpcrdma_cq_async_error_upcall, ep, &cq_attr); if (IS_ERR(sendcq)) { rc = PTR_ERR(sendcq); dprintk("RPC: %s: failed to create send CQ: %i\n", @@ -710,8 +702,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, } cq_attr.cqe = ep->rep_attr.cap.max_recv_wr + 1; - recvcq = ib_create_cq(ia->ri_id->device, rpcrdma_recvcq_upcall, - rpcrdma_cq_async_error_upcall, ep, &cq_attr); + recvcq = ib_create_cq(ia->ri_device, rpcrdma_recvcq_upcall, + rpcrdma_cq_async_error_upcall, ep, &cq_attr); if (IS_ERR(recvcq)) { rc = PTR_ERR(recvcq); dprintk("RPC: %s: failed to create recv CQ: %i\n", @@ -817,8 +809,6 @@ retry: rpcrdma_flush_cqs(ep); xprt = container_of(ia, struct rpcrdma_xprt, rx_ia); - ia->ri_ops->ro_reset(xprt); - id = rpcrdma_create_id(xprt, ia, (struct sockaddr *)&xprt->rx_data.addr); if (IS_ERR(id)) { @@ -832,7 +822,7 @@ retry: * More stuff I haven't thought of! * Rrrgh! */ - if (ia->ri_id->device != id->device) { + if (ia->ri_device != id->device) { printk("RPC: %s: can't reconnect on " "different device!\n", __func__); rdma_destroy_id(id); @@ -974,7 +964,8 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) goto out_free; } - rep->rr_buffer = &r_xprt->rx_buf; + rep->rr_device = ia->ri_device; + rep->rr_rxprt = r_xprt; return rep; out_free: @@ -1098,31 +1089,33 @@ rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) kfree(buf->rb_pool); } -/* "*mw" can be NULL when rpcrdma_buffer_get_mrs() fails, leaving - * some req segments uninitialized. - */ -static void -rpcrdma_buffer_put_mr(struct rpcrdma_mw **mw, struct rpcrdma_buffer *buf) +struct rpcrdma_mw * +rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) { - if (*mw) { - list_add_tail(&(*mw)->mw_list, &buf->rb_mws); - *mw = NULL; + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; + struct rpcrdma_mw *mw = NULL; + + spin_lock(&buf->rb_mwlock); + if (!list_empty(&buf->rb_mws)) { + mw = list_first_entry(&buf->rb_mws, + struct rpcrdma_mw, mw_list); + list_del_init(&mw->mw_list); } + spin_unlock(&buf->rb_mwlock); + + if (!mw) + pr_err("RPC: %s: no MWs available\n", __func__); + return mw; } -/* Cycle mw's back in reverse order, and "spin" them. - * This delays and scrambles reuse as much as possible. - */ -static void -rpcrdma_buffer_put_mrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) +void +rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) { - struct rpcrdma_mr_seg *seg = req->rl_segments; - struct rpcrdma_mr_seg *seg1 = seg; - int i; + struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - for (i = 1, seg++; i < RPCRDMA_MAX_SEGS; seg++, i++) - rpcrdma_buffer_put_mr(&seg->rl_mw, buf); - rpcrdma_buffer_put_mr(&seg1->rl_mw, buf); + spin_lock(&buf->rb_mwlock); + list_add_tail(&mw->mw_list, &buf->rb_mws); + spin_unlock(&buf->rb_mwlock); } static void @@ -1132,115 +1125,10 @@ rpcrdma_buffer_put_sendbuf(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) req->rl_niovs = 0; if (req->rl_reply) { buf->rb_recv_bufs[--buf->rb_recv_index] = req->rl_reply; - req->rl_reply->rr_func = NULL; req->rl_reply = NULL; } } -/* rpcrdma_unmap_one() was already done during deregistration. - * Redo only the ib_post_send(). - */ -static void -rpcrdma_retry_local_inv(struct rpcrdma_mw *r, struct rpcrdma_ia *ia) -{ - struct rpcrdma_xprt *r_xprt = - container_of(ia, struct rpcrdma_xprt, rx_ia); - struct ib_send_wr invalidate_wr, *bad_wr; - int rc; - - dprintk("RPC: %s: FRMR %p is stale\n", __func__, r); - - /* When this FRMR is re-inserted into rb_mws, it is no longer stale */ - r->r.frmr.fr_state = FRMR_IS_INVALID; - - memset(&invalidate_wr, 0, sizeof(invalidate_wr)); - invalidate_wr.wr_id = (unsigned long)(void *)r; - invalidate_wr.opcode = IB_WR_LOCAL_INV; - invalidate_wr.ex.invalidate_rkey = r->r.frmr.fr_mr->rkey; - DECR_CQCOUNT(&r_xprt->rx_ep); - - dprintk("RPC: %s: frmr %p invalidating rkey %08x\n", - __func__, r, r->r.frmr.fr_mr->rkey); - - read_lock(&ia->ri_qplock); - rc = ib_post_send(ia->ri_id->qp, &invalidate_wr, &bad_wr); - read_unlock(&ia->ri_qplock); - if (rc) { - /* Force rpcrdma_buffer_get() to retry */ - r->r.frmr.fr_state = FRMR_IS_STALE; - dprintk("RPC: %s: ib_post_send failed, %i\n", - __func__, rc); - } -} - -static void -rpcrdma_retry_flushed_linv(struct list_head *stale, - struct rpcrdma_buffer *buf) -{ - struct rpcrdma_ia *ia = rdmab_to_ia(buf); - struct list_head *pos; - struct rpcrdma_mw *r; - unsigned long flags; - - list_for_each(pos, stale) { - r = list_entry(pos, struct rpcrdma_mw, mw_list); - rpcrdma_retry_local_inv(r, ia); - } - - spin_lock_irqsave(&buf->rb_lock, flags); - list_splice_tail(stale, &buf->rb_mws); - spin_unlock_irqrestore(&buf->rb_lock, flags); -} - -static struct rpcrdma_req * -rpcrdma_buffer_get_frmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf, - struct list_head *stale) -{ - struct rpcrdma_mw *r; - int i; - - i = RPCRDMA_MAX_SEGS - 1; - while (!list_empty(&buf->rb_mws)) { - r = list_entry(buf->rb_mws.next, - struct rpcrdma_mw, mw_list); - list_del(&r->mw_list); - if (r->r.frmr.fr_state == FRMR_IS_STALE) { - list_add(&r->mw_list, stale); - continue; - } - req->rl_segments[i].rl_mw = r; - if (unlikely(i-- == 0)) - return req; /* Success */ - } - - /* Not enough entries on rb_mws for this req */ - rpcrdma_buffer_put_sendbuf(req, buf); - rpcrdma_buffer_put_mrs(req, buf); - return NULL; -} - -static struct rpcrdma_req * -rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) -{ - struct rpcrdma_mw *r; - int i; - - i = RPCRDMA_MAX_SEGS - 1; - while (!list_empty(&buf->rb_mws)) { - r = list_entry(buf->rb_mws.next, - struct rpcrdma_mw, mw_list); - list_del(&r->mw_list); - req->rl_segments[i].rl_mw = r; - if (unlikely(i-- == 0)) - return req; /* Success */ - } - - /* Not enough entries on rb_mws for this req */ - rpcrdma_buffer_put_sendbuf(req, buf); - rpcrdma_buffer_put_mrs(req, buf); - return NULL; -} - /* * Get a set of request/reply buffers. * @@ -1253,12 +1141,11 @@ rpcrdma_buffer_get_fmrs(struct rpcrdma_req *req, struct rpcrdma_buffer *buf) struct rpcrdma_req * rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) { - struct rpcrdma_ia *ia = rdmab_to_ia(buffers); - struct list_head stale; struct rpcrdma_req *req; unsigned long flags; spin_lock_irqsave(&buffers->rb_lock, flags); + if (buffers->rb_send_index == buffers->rb_max_requests) { spin_unlock_irqrestore(&buffers->rb_lock, flags); dprintk("RPC: %s: out of request buffers\n", __func__); @@ -1277,20 +1164,7 @@ rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) } buffers->rb_send_bufs[buffers->rb_send_index++] = NULL; - INIT_LIST_HEAD(&stale); - switch (ia->ri_memreg_strategy) { - case RPCRDMA_FRMR: - req = rpcrdma_buffer_get_frmrs(req, buffers, &stale); - break; - case RPCRDMA_MTHCAFMR: - req = rpcrdma_buffer_get_fmrs(req, buffers); - break; - default: - break; - } spin_unlock_irqrestore(&buffers->rb_lock, flags); - if (!list_empty(&stale)) - rpcrdma_retry_flushed_linv(&stale, buffers); return req; } @@ -1302,19 +1176,10 @@ void rpcrdma_buffer_put(struct rpcrdma_req *req) { struct rpcrdma_buffer *buffers = req->rl_buffer; - struct rpcrdma_ia *ia = rdmab_to_ia(buffers); unsigned long flags; spin_lock_irqsave(&buffers->rb_lock, flags); rpcrdma_buffer_put_sendbuf(req, buffers); - switch (ia->ri_memreg_strategy) { - case RPCRDMA_FRMR: - case RPCRDMA_MTHCAFMR: - rpcrdma_buffer_put_mrs(req, buffers); - break; - default: - break; - } spin_unlock_irqrestore(&buffers->rb_lock, flags); } @@ -1344,10 +1209,9 @@ rpcrdma_recv_buffer_get(struct rpcrdma_req *req) void rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) { - struct rpcrdma_buffer *buffers = rep->rr_buffer; + struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; unsigned long flags; - rep->rr_func = NULL; spin_lock_irqsave(&buffers->rb_lock, flags); buffers->rb_recv_bufs[--buffers->rb_recv_index] = rep; spin_unlock_irqrestore(&buffers->rb_lock, flags); @@ -1376,9 +1240,9 @@ rpcrdma_register_internal(struct rpcrdma_ia *ia, void *va, int len, /* * All memory passed here was kmalloc'ed, therefore phys-contiguous. */ - iov->addr = ib_dma_map_single(ia->ri_id->device, + iov->addr = ib_dma_map_single(ia->ri_device, va, len, DMA_BIDIRECTIONAL); - if (ib_dma_mapping_error(ia->ri_id->device, iov->addr)) + if (ib_dma_mapping_error(ia->ri_device, iov->addr)) return -ENOMEM; iov->length = len; @@ -1422,8 +1286,8 @@ rpcrdma_deregister_internal(struct rpcrdma_ia *ia, { int rc; - ib_dma_unmap_single(ia->ri_id->device, - iov->addr, iov->length, DMA_BIDIRECTIONAL); + ib_dma_unmap_single(ia->ri_device, + iov->addr, iov->length, DMA_BIDIRECTIONAL); if (NULL == mr) return 0; @@ -1516,15 +1380,18 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, send_wr.num_sge = req->rl_niovs; send_wr.opcode = IB_WR_SEND; if (send_wr.num_sge == 4) /* no need to sync any pad (constant) */ - ib_dma_sync_single_for_device(ia->ri_id->device, - req->rl_send_iov[3].addr, req->rl_send_iov[3].length, - DMA_TO_DEVICE); - ib_dma_sync_single_for_device(ia->ri_id->device, - req->rl_send_iov[1].addr, req->rl_send_iov[1].length, - DMA_TO_DEVICE); - ib_dma_sync_single_for_device(ia->ri_id->device, - req->rl_send_iov[0].addr, req->rl_send_iov[0].length, - DMA_TO_DEVICE); + ib_dma_sync_single_for_device(ia->ri_device, + req->rl_send_iov[3].addr, + req->rl_send_iov[3].length, + DMA_TO_DEVICE); + ib_dma_sync_single_for_device(ia->ri_device, + req->rl_send_iov[1].addr, + req->rl_send_iov[1].length, + DMA_TO_DEVICE); + ib_dma_sync_single_for_device(ia->ri_device, + req->rl_send_iov[0].addr, + req->rl_send_iov[0].length, + DMA_TO_DEVICE); if (DECR_CQCOUNT(ep) > 0) send_wr.send_flags = 0; @@ -1557,7 +1424,7 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; recv_wr.num_sge = 1; - ib_dma_sync_single_for_cpu(ia->ri_id->device, + ib_dma_sync_single_for_cpu(ia->ri_device, rdmab_addr(rep->rr_rdmabuf), rdmab_length(rep->rr_rdmabuf), DMA_BIDIRECTIONAL); diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 58163b88738c..f49dd8b38122 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -62,6 +62,7 @@ struct rpcrdma_ia { const struct rpcrdma_memreg_ops *ri_ops; rwlock_t ri_qplock; + struct ib_device *ri_device; struct rdma_cm_id *ri_id; struct ib_pd *ri_pd; struct ib_mr *ri_bind_mem; @@ -69,7 +70,6 @@ struct rpcrdma_ia { int ri_have_dma_lkey; struct completion ri_done; int ri_async_rc; - enum rpcrdma_memreg ri_memreg_strategy; unsigned int ri_max_frmr_depth; struct ib_device_attr ri_devattr; struct ib_qp_attr ri_qp_attr; @@ -173,9 +173,8 @@ struct rpcrdma_buffer; struct rpcrdma_rep { unsigned int rr_len; - struct rpcrdma_buffer *rr_buffer; - struct rpc_xprt *rr_xprt; - void (*rr_func)(struct rpcrdma_rep *); + struct ib_device *rr_device; + struct rpcrdma_xprt *rr_rxprt; struct list_head rr_list; struct rpcrdma_regbuf *rr_rdmabuf; }; @@ -203,11 +202,18 @@ struct rpcrdma_frmr { struct ib_fast_reg_page_list *fr_pgl; struct ib_mr *fr_mr; enum rpcrdma_frmr_state fr_state; + struct work_struct fr_work; + struct rpcrdma_xprt *fr_xprt; +}; + +struct rpcrdma_fmr { + struct ib_fmr *fmr; + u64 *physaddrs; }; struct rpcrdma_mw { union { - struct ib_fmr *fmr; + struct rpcrdma_fmr fmr; struct rpcrdma_frmr frmr; } r; void (*mw_sendcompletion)(struct ib_wc *); @@ -281,15 +287,17 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) * One of these is associated with a transport instance */ struct rpcrdma_buffer { - spinlock_t rb_lock; /* protects indexes */ - u32 rb_max_requests;/* client max requests */ - struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ - struct list_head rb_all; - int rb_send_index; + spinlock_t rb_mwlock; /* protect rb_mws list */ + struct list_head rb_mws; + struct list_head rb_all; + char *rb_pool; + + spinlock_t rb_lock; /* protect buf arrays */ + u32 rb_max_requests; + int rb_send_index; + int rb_recv_index; struct rpcrdma_req **rb_send_bufs; - int rb_recv_index; struct rpcrdma_rep **rb_recv_bufs; - char *rb_pool; }; #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) @@ -350,7 +358,6 @@ struct rpcrdma_memreg_ops { struct rpcrdma_create_data_internal *); size_t (*ro_maxpages)(struct rpcrdma_xprt *); int (*ro_init)(struct rpcrdma_xprt *); - void (*ro_reset)(struct rpcrdma_xprt *); void (*ro_destroy)(struct rpcrdma_buffer *); const char *ro_displayname; }; @@ -413,6 +420,8 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_ep *, int rpcrdma_buffer_create(struct rpcrdma_xprt *); void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); +struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *); +void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *); struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); void rpcrdma_buffer_put(struct rpcrdma_req *); void rpcrdma_recv_buffer_get(struct rpcrdma_req *); @@ -425,6 +434,9 @@ void rpcrdma_free_regbuf(struct rpcrdma_ia *, unsigned int rpcrdma_max_segments(struct rpcrdma_xprt *); +int frwr_alloc_recovery_wq(void); +void frwr_destroy_recovery_wq(void); + /* * Wrappers for chunk registration, shared by read/write chunk code. */ diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index b0517287075b..e193c2b5476b 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -623,24 +623,6 @@ process_status: } /** - * xs_tcp_shutdown - gracefully shut down a TCP socket - * @xprt: transport - * - * Initiates a graceful shutdown of the TCP socket by calling the - * equivalent of shutdown(SHUT_RDWR); - */ -static void xs_tcp_shutdown(struct rpc_xprt *xprt) -{ - struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); - struct socket *sock = transport->sock; - - if (sock != NULL) { - kernel_sock_shutdown(sock, SHUT_RDWR); - trace_rpc_socket_shutdown(xprt, sock); - } -} - -/** * xs_tcp_send_request - write an RPC request to a TCP socket * @task: address of RPC task that manages the state of an RPC request * @@ -786,6 +768,7 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt) xs_sock_reset_connection_flags(xprt); /* Mark transport as closed and wake up all pending tasks */ xprt_disconnect_done(xprt); + xprt_force_disconnect(xprt); } /** @@ -827,6 +810,9 @@ static void xs_reset_transport(struct sock_xprt *transport) if (sk == NULL) return; + if (atomic_read(&transport->xprt.swapper)) + sk_clear_memalloc(sk); + write_lock_bh(&sk->sk_callback_lock); transport->inet = NULL; transport->sock = NULL; @@ -863,6 +849,13 @@ static void xs_close(struct rpc_xprt *xprt) xprt_disconnect_done(xprt); } +static void xs_inject_disconnect(struct rpc_xprt *xprt) +{ + dprintk("RPC: injecting transport disconnect on xprt=%p\n", + xprt); + xprt_disconnect_done(xprt); +} + static void xs_xprt_free(struct rpc_xprt *xprt) { xs_free_peer_addresses(xprt); @@ -901,7 +894,6 @@ static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) /** * xs_local_data_ready - "data ready" callback for AF_LOCAL sockets * @sk: socket with data to read - * @len: how much data to read * * Currently this assumes we can read the whole reply in a single gulp. */ @@ -965,7 +957,6 @@ static void xs_local_data_ready(struct sock *sk) /** * xs_udp_data_ready - "data ready" callback for UDP sockets * @sk: socket with data to read - * @len: how much data to read * */ static void xs_udp_data_ready(struct sock *sk) @@ -1389,7 +1380,6 @@ static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, uns /** * xs_tcp_data_ready - "data ready" callback for TCP sockets * @sk: socket with data to read - * @bytes: how much data to read * */ static void xs_tcp_data_ready(struct sock *sk) @@ -1886,9 +1876,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, /** * xs_local_setup_socket - create AF_LOCAL socket, connect to a local endpoint - * @xprt: RPC transport to connect * @transport: socket transport to connect - * @create_sock: function to create a socket of the correct type */ static int xs_local_setup_socket(struct sock_xprt *transport) { @@ -1960,43 +1948,84 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task) msleep_interruptible(15000); } -#ifdef CONFIG_SUNRPC_SWAP +#if IS_ENABLED(CONFIG_SUNRPC_SWAP) +/* + * Note that this should be called with XPRT_LOCKED held (or when we otherwise + * know that we have exclusive access to the socket), to guard against + * races with xs_reset_transport. + */ static void xs_set_memalloc(struct rpc_xprt *xprt) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); - if (xprt->swapper) + /* + * If there's no sock, then we have nothing to set. The + * reconnecting process will get it for us. + */ + if (!transport->inet) + return; + if (atomic_read(&xprt->swapper)) sk_set_memalloc(transport->inet); } /** - * xs_swapper - Tag this transport as being used for swap. + * xs_enable_swap - Tag this transport as being used for swap. * @xprt: transport to tag - * @enable: enable/disable * + * Take a reference to this transport on behalf of the rpc_clnt, and + * optionally mark it for swapping if it wasn't already. */ -int xs_swapper(struct rpc_xprt *xprt, int enable) +static int +xs_enable_swap(struct rpc_xprt *xprt) { - struct sock_xprt *transport = container_of(xprt, struct sock_xprt, - xprt); - int err = 0; + struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); - if (enable) { - xprt->swapper++; - xs_set_memalloc(xprt); - } else if (xprt->swapper) { - xprt->swapper--; - sk_clear_memalloc(transport->inet); - } + if (atomic_inc_return(&xprt->swapper) != 1) + return 0; + if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) + return -ERESTARTSYS; + if (xs->inet) + sk_set_memalloc(xs->inet); + xprt_release_xprt(xprt, NULL); + return 0; +} - return err; +/** + * xs_disable_swap - Untag this transport as being used for swap. + * @xprt: transport to tag + * + * Drop a "swapper" reference to this xprt on behalf of the rpc_clnt. If the + * swapper refcount goes to 0, untag the socket as a memalloc socket. + */ +static void +xs_disable_swap(struct rpc_xprt *xprt) +{ + struct sock_xprt *xs = container_of(xprt, struct sock_xprt, xprt); + + if (!atomic_dec_and_test(&xprt->swapper)) + return; + if (wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_KILLABLE)) + return; + if (xs->inet) + sk_clear_memalloc(xs->inet); + xprt_release_xprt(xprt, NULL); } -EXPORT_SYMBOL_GPL(xs_swapper); #else static void xs_set_memalloc(struct rpc_xprt *xprt) { } + +static int +xs_enable_swap(struct rpc_xprt *xprt) +{ + return -EINVAL; +} + +static void +xs_disable_swap(struct rpc_xprt *xprt) +{ +} #endif static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) @@ -2057,6 +2086,27 @@ out: xprt_wake_pending_tasks(xprt, status); } +/** + * xs_tcp_shutdown - gracefully shut down a TCP socket + * @xprt: transport + * + * Initiates a graceful shutdown of the TCP socket by calling the + * equivalent of shutdown(SHUT_RDWR); + */ +static void xs_tcp_shutdown(struct rpc_xprt *xprt) +{ + struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); + struct socket *sock = transport->sock; + + if (sock == NULL) + return; + if (xprt_connected(xprt)) { + kernel_sock_shutdown(sock, SHUT_RDWR); + trace_rpc_socket_shutdown(xprt, sock); + } else + xs_reset_transport(transport); +} + static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); @@ -2067,6 +2117,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) unsigned int keepidle = xprt->timeout->to_initval / HZ; unsigned int keepcnt = xprt->timeout->to_retries + 1; unsigned int opt_on = 1; + unsigned int timeo; /* TCP Keepalive options */ kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, @@ -2078,6 +2129,12 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) kernel_setsockopt(sock, SOL_TCP, TCP_KEEPCNT, (char *)&keepcnt, sizeof(keepcnt)); + /* TCP user timeout (see RFC5482) */ + timeo = jiffies_to_msecs(xprt->timeout->to_initval) * + (xprt->timeout->to_retries + 1); + kernel_setsockopt(sock, SOL_TCP, TCP_USER_TIMEOUT, + (char *)&timeo, sizeof(timeo)); + write_lock_bh(&sk->sk_callback_lock); xs_save_old_callbacks(transport, sk); @@ -2125,9 +2182,6 @@ out: /** * xs_tcp_setup_socket - create a TCP socket and connect to a remote endpoint - * @xprt: RPC transport to connect - * @transport: socket transport to connect - * @create_sock: function to create a socket of the correct type * * Invoked by a work queue tasklet. */ @@ -2463,6 +2517,8 @@ static struct rpc_xprt_ops xs_local_ops = { .close = xs_close, .destroy = xs_destroy, .print_stats = xs_local_print_stats, + .enable_swap = xs_enable_swap, + .disable_swap = xs_disable_swap, }; static struct rpc_xprt_ops xs_udp_ops = { @@ -2482,6 +2538,9 @@ static struct rpc_xprt_ops xs_udp_ops = { .close = xs_close, .destroy = xs_destroy, .print_stats = xs_udp_print_stats, + .enable_swap = xs_enable_swap, + .disable_swap = xs_disable_swap, + .inject_disconnect = xs_inject_disconnect, }; static struct rpc_xprt_ops xs_tcp_ops = { @@ -2498,6 +2557,9 @@ static struct rpc_xprt_ops xs_tcp_ops = { .close = xs_tcp_shutdown, .destroy = xs_destroy, .print_stats = xs_tcp_print_stats, + .enable_swap = xs_enable_swap, + .disable_swap = xs_disable_swap, + .inject_disconnect = xs_inject_disconnect, }; /* @@ -2515,6 +2577,9 @@ static struct rpc_xprt_ops bc_tcp_ops = { .close = bc_close, .destroy = bc_destroy, .print_stats = xs_tcp_print_stats, + .enable_swap = xs_enable_swap, + .disable_swap = xs_disable_swap, + .inject_disconnect = xs_inject_disconnect, }; static int xs_init_anyaddr(const int family, struct sockaddr *sap) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 4906ca3c0f3a..a816382fc8af 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -108,6 +108,11 @@ void tipc_bclink_remove_node(struct net *net, u32 addr) tipc_bclink_lock(net); tipc_nmap_remove(&tn->bclink->bcast_nodes, addr); + + /* Last node? => reset backlog queue */ + if (!tn->bclink->bcast_nodes.count) + tipc_link_purge_backlog(&tn->bclink->link); + tipc_bclink_unlock(net); } diff --git a/net/tipc/link.c b/net/tipc/link.c index ca8b8e0f49b5..eaa9fe54b4ae 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -404,7 +404,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) l_ptr->reasm_buf = NULL; } -static void tipc_link_purge_backlog(struct tipc_link *l) +void tipc_link_purge_backlog(struct tipc_link *l) { __skb_queue_purge(&l->backlogq); l->backlog[TIPC_LOW_IMPORTANCE].len = 0; diff --git a/net/tipc/link.h b/net/tipc/link.h index 0c02c973e985..ae0a0ea572f2 100644 --- a/net/tipc/link.h +++ b/net/tipc/link.h @@ -218,6 +218,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr); int tipc_link_is_up(struct tipc_link *l_ptr); int tipc_link_is_active(struct tipc_link *l_ptr); void tipc_link_purge_queues(struct tipc_link *l_ptr); +void tipc_link_purge_backlog(struct tipc_link *l); void tipc_link_reset_all(struct tipc_node *node); void tipc_link_reset(struct tipc_link *l_ptr); int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, diff --git a/scripts/dtc/checks.c b/scripts/dtc/checks.c index ee96a2519eff..e81a8c74b8d2 100644 --- a/scripts/dtc/checks.c +++ b/scripts/dtc/checks.c @@ -53,7 +53,7 @@ struct check { void *data; bool warn, error; enum checkstatus status; - int inprogress; + bool inprogress; int num_prereqs; struct check **prereq; }; @@ -113,6 +113,7 @@ static inline void check_msg(struct check *c, const char *fmt, ...) vfprintf(stderr, fmt, ap); fprintf(stderr, "\n"); } + va_end(ap); } #define FAIL(c, ...) \ @@ -141,9 +142,9 @@ static void check_nodes_props(struct check *c, struct node *dt, struct node *nod check_nodes_props(c, dt, child); } -static int run_check(struct check *c, struct node *dt) +static bool run_check(struct check *c, struct node *dt) { - int error = 0; + bool error = false; int i; assert(!c->inprogress); @@ -151,11 +152,11 @@ static int run_check(struct check *c, struct node *dt) if (c->status != UNCHECKED) goto out; - c->inprogress = 1; + c->inprogress = true; for (i = 0; i < c->num_prereqs; i++) { struct check *prq = c->prereq[i]; - error |= run_check(prq, dt); + error = error || run_check(prq, dt); if (prq->status != PASSED) { c->status = PREREQ; check_msg(c, "Failed prerequisite '%s'", @@ -177,9 +178,9 @@ static int run_check(struct check *c, struct node *dt) TRACE(c, "\tCompleted, status %d", c->status); out: - c->inprogress = 0; + c->inprogress = false; if ((c->status != PASSED) && (c->error)) - error = 1; + error = true; return error; } @@ -624,11 +625,11 @@ static void check_avoid_default_addr_size(struct check *c, struct node *dt, if (!reg && !ranges) return; - if ((node->parent->addr_cells == -1)) + if (node->parent->addr_cells == -1) FAIL(c, "Relying on default #address-cells value for %s", node->fullpath); - if ((node->parent->size_cells == -1)) + if (node->parent->size_cells == -1) FAIL(c, "Relying on default #size-cells value for %s", node->fullpath); } @@ -706,15 +707,15 @@ static void disable_warning_error(struct check *c, bool warn, bool error) c->error = c->error && !error; } -void parse_checks_option(bool warn, bool error, const char *optarg) +void parse_checks_option(bool warn, bool error, const char *arg) { int i; - const char *name = optarg; + const char *name = arg; bool enable = true; - if ((strncmp(optarg, "no-", 3) == 0) - || (strncmp(optarg, "no_", 3) == 0)) { - name = optarg + 3; + if ((strncmp(arg, "no-", 3) == 0) + || (strncmp(arg, "no_", 3) == 0)) { + name = arg + 3; enable = false; } @@ -733,7 +734,7 @@ void parse_checks_option(bool warn, bool error, const char *optarg) die("Unrecognized check name \"%s\"\n", name); } -void process_checks(int force, struct boot_info *bi) +void process_checks(bool force, struct boot_info *bi) { struct node *dt = bi->dt; int i; diff --git a/scripts/dtc/data.c b/scripts/dtc/data.c index 4a40c5b92474..8cae23746882 100644 --- a/scripts/dtc/data.c +++ b/scripts/dtc/data.c @@ -74,7 +74,7 @@ struct data data_copy_escape_string(const char *s, int len) struct data d; char *q; - d = data_grow_for(empty_data, strlen(s)+1); + d = data_grow_for(empty_data, len + 1); q = d.val; while (i < len) { @@ -250,20 +250,20 @@ struct data data_add_marker(struct data d, enum markertype type, char *ref) return data_append_markers(d, m); } -int data_is_one_string(struct data d) +bool data_is_one_string(struct data d) { int i; int len = d.len; if (len == 0) - return 0; + return false; for (i = 0; i < len-1; i++) if (d.val[i] == '\0') - return 0; + return false; if (d.val[len-1] != '\0') - return 0; + return false; - return 1; + return true; } diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l index 3b41bfca636c..0ee1caf03dd0 100644 --- a/scripts/dtc/dtc-lexer.l +++ b/scripts/dtc/dtc-lexer.l @@ -20,7 +20,6 @@ %option noyywrap nounput noinput never-interactive -%x INCLUDE %x BYTESTRING %x PROPNODENAME %s V1 @@ -40,6 +39,7 @@ LINECOMMENT "//".*\n #include "dtc-parser.tab.h" YYLTYPE yylloc; +extern bool treesource_error; /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ #define YY_USER_ACTION \ @@ -61,7 +61,8 @@ static int dts_version = 1; BEGIN(V1); \ static void push_input_file(const char *filename); -static int pop_input_file(void); +static bool pop_input_file(void); +static void lexical_error(const char *fmt, ...); %} %% @@ -75,11 +76,11 @@ static int pop_input_file(void); char *line, *tmp, *fn; /* skip text before line # */ line = yytext; - while (!isdigit(*line)) + while (!isdigit((unsigned char)*line)) line++; /* skip digits in line # */ tmp = line; - while (!isspace(*tmp)) + while (!isspace((unsigned char)*tmp)) tmp++; /* "NULL"-terminate line # */ *tmp = '\0'; @@ -146,15 +147,42 @@ static int pop_input_file(void); } <V1>([0-9]+|0[xX][0-9a-fA-F]+)(U|L|UL|LL|ULL)? { - yylval.literal = xstrdup(yytext); - DPRINT("Literal: '%s'\n", yylval.literal); + char *e; + DPRINT("Integer Literal: '%s'\n", yytext); + + errno = 0; + yylval.integer = strtoull(yytext, &e, 0); + + assert(!(*e) || !e[strspn(e, "UL")]); + + if (errno == ERANGE) + lexical_error("Integer literal '%s' out of range", + yytext); + else + /* ERANGE is the only strtoull error triggerable + * by strings matching the pattern */ + assert(errno == 0); return DT_LITERAL; } <*>{CHAR_LITERAL} { - yytext[yyleng-1] = '\0'; - yylval.literal = xstrdup(yytext+1); - DPRINT("Character literal: %s\n", yylval.literal); + struct data d; + DPRINT("Character literal: %s\n", yytext); + + d = data_copy_escape_string(yytext+1, yyleng-2); + if (d.len == 1) { + lexical_error("Empty character literal"); + yylval.integer = 0; + return DT_CHAR_LITERAL; + } + + yylval.integer = (unsigned char)d.val[0]; + + if (d.len > 2) + lexical_error("Character literal has %d" + " characters instead of 1", + d.len - 1); + return DT_CHAR_LITERAL; } @@ -164,7 +192,7 @@ static int pop_input_file(void); return DT_REF; } -<*>"&{/"{PATHCHAR}+\} { /* new-style path reference */ +<*>"&{/"{PATHCHAR}*\} { /* new-style path reference */ yytext[yyleng-1] = '\0'; DPRINT("Ref: %s\n", yytext+2); yylval.labelref = xstrdup(yytext+2); @@ -238,13 +266,24 @@ static void push_input_file(const char *filename) } -static int pop_input_file(void) +static bool pop_input_file(void) { if (srcfile_pop() == 0) - return 0; + return false; yypop_buffer_state(); yyin = current_srcfile->f; - return 1; + return true; +} + +static void lexical_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + srcpos_verror(&yylloc, "Lexical error", fmt, ap); + va_end(ap); + + treesource_error = true; } diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped index 2d30f41778b7..11cd78e72305 100644 --- a/scripts/dtc/dtc-lexer.lex.c_shipped +++ b/scripts/dtc/dtc-lexer.lex.c_shipped @@ -9,7 +9,7 @@ #define FLEX_SCANNER #define YY_FLEX_MAJOR_VERSION 2 #define YY_FLEX_MINOR_VERSION 5 -#define YY_FLEX_SUBMINOR_VERSION 35 +#define YY_FLEX_SUBMINOR_VERSION 39 #if YY_FLEX_SUBMINOR_VERSION > 0 #define FLEX_BETA #endif @@ -162,7 +162,12 @@ typedef unsigned int flex_uint32_t; typedef struct yy_buffer_state *YY_BUFFER_STATE; #endif -extern int yyleng; +#ifndef YY_TYPEDEF_YY_SIZE_T +#define YY_TYPEDEF_YY_SIZE_T +typedef size_t yy_size_t; +#endif + +extern yy_size_t yyleng; extern FILE *yyin, *yyout; @@ -171,6 +176,7 @@ extern FILE *yyin, *yyout; #define EOB_ACT_LAST_MATCH 2 #define YY_LESS_LINENO(n) + #define YY_LINENO_REWIND_TO(ptr) /* Return all but the first "n" matched characters back to the input stream. */ #define yyless(n) \ @@ -188,11 +194,6 @@ extern FILE *yyin, *yyout; #define unput(c) yyunput( c, (yytext_ptr) ) -#ifndef YY_TYPEDEF_YY_SIZE_T -#define YY_TYPEDEF_YY_SIZE_T -typedef size_t yy_size_t; -#endif - #ifndef YY_STRUCT_YY_BUFFER_STATE #define YY_STRUCT_YY_BUFFER_STATE struct yy_buffer_state @@ -210,7 +211,7 @@ struct yy_buffer_state /* Number of characters read into yy_ch_buf, not including EOB * characters. */ - int yy_n_chars; + yy_size_t yy_n_chars; /* Whether we "own" the buffer - i.e., we know we created it, * and can realloc() it to grow it, and should free() it to @@ -280,8 +281,8 @@ static YY_BUFFER_STATE * yy_buffer_stack = 0; /**< Stack as an array. */ /* yy_hold_char holds the character lost when yytext is formed. */ static char yy_hold_char; -static int yy_n_chars; /* number of characters read into yy_ch_buf */ -int yyleng; +static yy_size_t yy_n_chars; /* number of characters read into yy_ch_buf */ +yy_size_t yyleng; /* Points to current character in buffer. */ static char *yy_c_buf_p = (char *) 0; @@ -309,7 +310,7 @@ static void yy_init_buffer (YY_BUFFER_STATE b,FILE *file ); YY_BUFFER_STATE yy_scan_buffer (char *base,yy_size_t size ); YY_BUFFER_STATE yy_scan_string (yyconst char *yy_str ); -YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,int len ); +YY_BUFFER_STATE yy_scan_bytes (yyconst char *bytes,yy_size_t len ); void *yyalloc (yy_size_t ); void *yyrealloc (void *,yy_size_t ); @@ -341,7 +342,7 @@ void yyfree (void * ); /* Begin user sect3 */ -#define yywrap(n) 1 +#define yywrap() 1 #define YY_SKIP_YYWRAP typedef unsigned char YY_CHAR; @@ -381,25 +382,25 @@ struct yy_trans_info flex_int32_t yy_verify; flex_int32_t yy_nxt; }; -static yyconst flex_int16_t yy_accept[161] = +static yyconst flex_int16_t yy_accept[159] = { 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 31, 29, 18, 18, 29, 29, 29, 29, 29, 29, - 29, 29, 29, 29, 29, 29, 29, 29, 15, 16, - 16, 29, 16, 10, 10, 18, 26, 0, 3, 0, - 27, 12, 0, 0, 11, 0, 0, 0, 0, 0, - 0, 0, 21, 23, 25, 24, 22, 0, 9, 28, - 0, 0, 0, 14, 14, 16, 16, 16, 10, 10, - 10, 0, 12, 0, 11, 0, 0, 0, 20, 0, - 0, 0, 0, 0, 0, 0, 0, 16, 10, 10, - 10, 0, 19, 0, 0, 0, 0, 0, 0, 0, - - 0, 0, 16, 13, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 16, 6, 0, 0, 0, 0, 0, - 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, - 4, 17, 0, 0, 2, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, - 0, 0, 5, 8, 0, 0, 0, 0, 7, 0 + 0, 0, 0, 0, 0, 0, 0, 0, 31, 29, + 18, 18, 29, 29, 29, 29, 29, 29, 29, 29, + 29, 29, 29, 29, 29, 29, 15, 16, 16, 29, + 16, 10, 10, 18, 26, 0, 3, 0, 27, 12, + 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, + 21, 23, 25, 24, 22, 0, 9, 28, 0, 0, + 0, 14, 14, 16, 16, 16, 10, 10, 10, 0, + 12, 0, 11, 0, 0, 0, 20, 0, 0, 0, + 0, 0, 0, 0, 0, 16, 10, 10, 10, 0, + 13, 19, 0, 0, 0, 0, 0, 0, 0, 0, + + 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 16, 6, 0, 0, 0, 0, 0, 0, 2, + 0, 0, 0, 0, 0, 0, 0, 0, 4, 17, + 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, + 5, 8, 0, 0, 0, 0, 7, 0 } ; static yyconst flex_int32_t yy_ec[256] = @@ -440,157 +441,157 @@ static yyconst flex_int32_t yy_meta[47] = 2, 2, 4, 5, 5, 5, 6, 1, 1, 1, 7, 8, 8, 8, 8, 1, 1, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, - 8, 8, 8, 3, 1, 1 + 8, 8, 8, 3, 1, 4 } ; -static yyconst flex_int16_t yy_base[175] = +static yyconst flex_int16_t yy_base[173] = { 0, - 0, 385, 378, 40, 41, 383, 72, 382, 34, 44, - 388, 393, 61, 117, 368, 116, 115, 115, 115, 48, - 367, 107, 368, 339, 127, 120, 0, 147, 393, 0, - 127, 0, 133, 156, 168, 153, 393, 125, 393, 380, - 393, 0, 369, 127, 393, 160, 371, 377, 347, 21, - 343, 346, 393, 393, 393, 393, 393, 359, 393, 393, - 183, 343, 339, 393, 356, 0, 183, 340, 187, 348, - 347, 0, 0, 0, 178, 359, 195, 365, 354, 326, - 332, 325, 334, 328, 204, 326, 331, 324, 393, 335, - 150, 311, 343, 342, 315, 322, 340, 179, 313, 207, - - 319, 316, 317, 393, 337, 333, 305, 302, 311, 301, - 310, 190, 338, 337, 393, 307, 322, 301, 305, 277, - 208, 311, 307, 278, 271, 270, 248, 246, 213, 130, - 393, 393, 263, 235, 207, 221, 218, 229, 213, 213, - 206, 234, 218, 210, 208, 193, 219, 393, 223, 204, - 176, 157, 393, 393, 120, 106, 97, 119, 393, 393, - 245, 251, 259, 263, 267, 273, 280, 284, 292, 300, - 304, 310, 318, 326 + 0, 383, 34, 382, 65, 381, 37, 105, 387, 391, + 54, 111, 367, 110, 109, 109, 112, 41, 366, 104, + 367, 338, 124, 117, 0, 144, 391, 0, 121, 0, + 135, 155, 140, 179, 391, 160, 391, 379, 391, 0, + 368, 141, 391, 167, 370, 376, 346, 103, 342, 345, + 391, 391, 391, 391, 391, 358, 391, 391, 175, 342, + 338, 391, 355, 0, 185, 339, 184, 347, 346, 0, + 0, 322, 175, 357, 175, 363, 352, 324, 330, 323, + 332, 326, 201, 324, 329, 322, 391, 333, 181, 309, + 391, 341, 340, 313, 320, 338, 178, 311, 146, 317, + + 314, 315, 335, 331, 303, 300, 309, 299, 308, 188, + 336, 335, 391, 305, 320, 281, 283, 271, 203, 288, + 281, 271, 266, 264, 245, 242, 208, 104, 391, 391, + 244, 218, 204, 219, 206, 224, 201, 212, 204, 229, + 215, 208, 207, 200, 219, 391, 233, 221, 200, 181, + 391, 391, 149, 122, 86, 41, 391, 391, 245, 251, + 259, 263, 267, 273, 280, 284, 292, 300, 304, 310, + 318, 326 } ; -static yyconst flex_int16_t yy_def[175] = +static yyconst flex_int16_t yy_def[173] = { 0, - 160, 1, 1, 1, 1, 5, 160, 7, 1, 1, - 160, 160, 160, 160, 160, 161, 162, 163, 160, 160, - 160, 160, 164, 160, 160, 160, 165, 164, 160, 166, - 167, 166, 166, 160, 160, 160, 160, 161, 160, 161, - 160, 168, 160, 163, 160, 163, 169, 170, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 164, 160, 160, - 160, 160, 160, 160, 164, 166, 167, 166, 160, 160, - 160, 171, 168, 172, 163, 169, 169, 170, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 166, 160, 160, - 171, 172, 160, 160, 160, 160, 160, 160, 160, 160, - - 160, 160, 166, 160, 160, 160, 160, 160, 160, 160, - 160, 173, 160, 166, 160, 160, 160, 160, 160, 160, - 173, 160, 173, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 174, 160, 160, 160, 174, 160, 174, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160, 0, - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160 + 158, 1, 1, 3, 158, 5, 1, 1, 158, 158, + 158, 158, 158, 159, 160, 161, 158, 158, 158, 158, + 162, 158, 158, 158, 163, 162, 158, 164, 165, 164, + 164, 158, 158, 158, 158, 159, 158, 159, 158, 166, + 158, 161, 158, 161, 167, 168, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 162, 158, 158, 158, 158, + 158, 158, 162, 164, 165, 164, 158, 158, 158, 169, + 166, 170, 161, 167, 167, 168, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 164, 158, 158, 169, 170, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + + 158, 164, 158, 158, 158, 158, 158, 158, 158, 171, + 158, 164, 158, 158, 158, 158, 158, 158, 171, 158, + 171, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 172, 158, 158, 158, 172, 158, 172, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158, 0, 158, 158, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158 } ; -static yyconst flex_int16_t yy_nxt[440] = +static yyconst flex_int16_t yy_nxt[438] = { 0, - 12, 13, 14, 13, 15, 16, 12, 17, 18, 12, - 12, 12, 19, 12, 12, 12, 12, 20, 21, 22, - 23, 23, 23, 23, 23, 12, 12, 23, 23, 23, - 23, 23, 23, 23, 23, 23, 23, 23, 23, 23, - 23, 23, 23, 12, 24, 12, 25, 34, 35, 35, - 25, 81, 26, 26, 27, 27, 27, 34, 35, 35, - 82, 28, 36, 36, 36, 53, 54, 29, 28, 28, - 28, 28, 12, 13, 14, 13, 15, 16, 30, 17, - 18, 30, 30, 30, 26, 30, 30, 30, 12, 20, - 21, 22, 31, 31, 31, 31, 31, 32, 12, 31, - - 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, - 31, 31, 31, 31, 31, 12, 24, 12, 36, 36, - 36, 39, 41, 45, 47, 56, 57, 48, 61, 47, - 39, 159, 48, 66, 61, 45, 66, 66, 66, 158, - 46, 40, 49, 59, 50, 157, 51, 49, 52, 50, - 40, 63, 46, 52, 36, 36, 36, 156, 43, 62, - 65, 65, 65, 59, 136, 68, 137, 65, 75, 69, - 69, 69, 70, 71, 65, 65, 65, 65, 70, 71, - 72, 69, 69, 69, 61, 46, 45, 155, 154, 66, - 70, 71, 66, 66, 66, 122, 85, 85, 85, 59, - - 69, 69, 69, 46, 77, 100, 109, 93, 100, 70, - 71, 110, 112, 122, 129, 123, 153, 85, 85, 85, - 135, 135, 135, 148, 148, 160, 135, 135, 135, 152, - 142, 142, 142, 123, 143, 142, 142, 142, 151, 143, - 150, 146, 145, 149, 149, 38, 38, 38, 38, 38, - 38, 38, 38, 42, 144, 141, 140, 42, 42, 44, - 44, 44, 44, 44, 44, 44, 44, 58, 58, 58, - 58, 64, 139, 64, 66, 138, 134, 66, 133, 66, - 66, 67, 132, 131, 67, 67, 67, 67, 73, 130, - 73, 73, 76, 76, 76, 76, 76, 76, 76, 76, - - 78, 78, 78, 78, 78, 78, 78, 78, 91, 160, - 91, 92, 129, 92, 92, 128, 92, 92, 121, 121, - 121, 121, 121, 121, 121, 121, 147, 147, 147, 147, - 147, 147, 147, 147, 127, 126, 125, 124, 61, 61, - 120, 119, 118, 117, 116, 115, 47, 114, 110, 113, - 111, 108, 107, 106, 48, 105, 104, 89, 103, 102, - 101, 99, 98, 97, 96, 95, 94, 79, 77, 90, - 89, 88, 59, 87, 86, 59, 84, 83, 80, 79, - 77, 74, 160, 60, 59, 55, 37, 160, 33, 25, - 26, 25, 11, 160, 160, 160, 160, 160, 160, 160, - - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160 + 10, 11, 12, 11, 13, 14, 10, 15, 16, 10, + 10, 10, 17, 10, 10, 10, 10, 18, 19, 20, + 21, 21, 21, 21, 21, 10, 10, 21, 21, 21, + 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, + 21, 21, 21, 10, 22, 10, 24, 25, 25, 25, + 32, 33, 33, 157, 26, 34, 34, 34, 51, 52, + 27, 26, 26, 26, 26, 10, 11, 12, 11, 13, + 14, 28, 15, 16, 28, 28, 28, 24, 28, 28, + 28, 10, 18, 19, 20, 29, 29, 29, 29, 29, + 30, 10, 29, 29, 29, 29, 29, 29, 29, 29, + + 29, 29, 29, 29, 29, 29, 29, 29, 10, 22, + 10, 23, 34, 34, 34, 37, 39, 43, 32, 33, + 33, 45, 54, 55, 46, 59, 45, 64, 156, 46, + 64, 64, 64, 79, 44, 38, 59, 57, 134, 47, + 135, 48, 80, 49, 47, 50, 48, 99, 61, 43, + 50, 110, 41, 67, 67, 67, 60, 63, 63, 63, + 57, 155, 68, 69, 63, 37, 44, 66, 67, 67, + 67, 63, 63, 63, 63, 73, 59, 68, 69, 70, + 34, 34, 34, 43, 75, 38, 154, 92, 83, 83, + 83, 64, 44, 120, 64, 64, 64, 67, 67, 67, + + 44, 57, 99, 68, 69, 107, 68, 69, 120, 127, + 108, 153, 152, 121, 83, 83, 83, 133, 133, 133, + 146, 133, 133, 133, 146, 140, 140, 140, 121, 141, + 140, 140, 140, 151, 141, 158, 150, 149, 148, 144, + 147, 143, 142, 139, 147, 36, 36, 36, 36, 36, + 36, 36, 36, 40, 138, 137, 136, 40, 40, 42, + 42, 42, 42, 42, 42, 42, 42, 56, 56, 56, + 56, 62, 132, 62, 64, 131, 130, 64, 129, 64, + 64, 65, 128, 158, 65, 65, 65, 65, 71, 127, + 71, 71, 74, 74, 74, 74, 74, 74, 74, 74, + + 76, 76, 76, 76, 76, 76, 76, 76, 89, 126, + 89, 90, 125, 90, 90, 124, 90, 90, 119, 119, + 119, 119, 119, 119, 119, 119, 145, 145, 145, 145, + 145, 145, 145, 145, 123, 122, 59, 59, 118, 117, + 116, 115, 114, 113, 45, 112, 108, 111, 109, 106, + 105, 104, 46, 103, 91, 87, 102, 101, 100, 98, + 97, 96, 95, 94, 93, 77, 75, 91, 88, 87, + 86, 57, 85, 84, 57, 82, 81, 78, 77, 75, + 72, 158, 58, 57, 53, 35, 158, 31, 23, 23, + 9, 158, 158, 158, 158, 158, 158, 158, 158, 158, + + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158 } ; -static yyconst flex_int16_t yy_chk[440] = +static yyconst flex_int16_t yy_chk[438] = { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 4, 9, 9, 9, - 10, 50, 4, 5, 5, 5, 5, 10, 10, 10, - 50, 5, 13, 13, 13, 20, 20, 5, 5, 5, - 5, 5, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - - 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 14, 14, - 14, 16, 17, 18, 19, 22, 22, 19, 25, 26, - 38, 158, 26, 31, 33, 44, 31, 31, 31, 157, - 18, 16, 19, 31, 19, 156, 19, 26, 19, 26, - 38, 26, 44, 26, 36, 36, 36, 155, 17, 25, - 28, 28, 28, 28, 130, 33, 130, 28, 46, 34, - 34, 34, 91, 91, 28, 28, 28, 28, 34, 34, - 34, 35, 35, 35, 61, 46, 75, 152, 151, 67, - 35, 35, 67, 67, 67, 112, 61, 61, 61, 67, - - 69, 69, 69, 75, 77, 85, 98, 77, 100, 69, - 69, 98, 100, 121, 129, 112, 150, 85, 85, 85, - 135, 135, 135, 143, 147, 149, 129, 129, 129, 146, - 138, 138, 138, 121, 138, 142, 142, 142, 145, 142, - 144, 141, 140, 143, 147, 161, 161, 161, 161, 161, - 161, 161, 161, 162, 139, 137, 136, 162, 162, 163, - 163, 163, 163, 163, 163, 163, 163, 164, 164, 164, - 164, 165, 134, 165, 166, 133, 128, 166, 127, 166, - 166, 167, 126, 125, 167, 167, 167, 167, 168, 124, - 168, 168, 169, 169, 169, 169, 169, 169, 169, 169, - - 170, 170, 170, 170, 170, 170, 170, 170, 171, 123, - 171, 172, 122, 172, 172, 120, 172, 172, 173, 173, - 173, 173, 173, 173, 173, 173, 174, 174, 174, 174, - 174, 174, 174, 174, 119, 118, 117, 116, 114, 113, - 111, 110, 109, 108, 107, 106, 105, 103, 102, 101, - 99, 97, 96, 95, 94, 93, 92, 90, 88, 87, - 86, 84, 83, 82, 81, 80, 79, 78, 76, 71, - 70, 68, 65, 63, 62, 58, 52, 51, 49, 48, - 47, 43, 40, 24, 23, 21, 15, 11, 8, 6, - 3, 2, 160, 160, 160, 160, 160, 160, 160, 160, - - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160, 160, - 160, 160, 160, 160, 160, 160, 160, 160, 160 + 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, + 7, 7, 7, 156, 3, 11, 11, 11, 18, 18, + 3, 3, 3, 3, 3, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 8, 12, 12, 12, 14, 15, 16, 8, 8, + 8, 17, 20, 20, 17, 23, 24, 29, 155, 24, + 29, 29, 29, 48, 16, 14, 31, 29, 128, 17, + 128, 17, 48, 17, 24, 17, 24, 99, 24, 42, + 24, 99, 15, 33, 33, 33, 23, 26, 26, 26, + 26, 154, 33, 33, 26, 36, 42, 31, 32, 32, + 32, 26, 26, 26, 26, 44, 59, 32, 32, 32, + 34, 34, 34, 73, 75, 36, 153, 75, 59, 59, + 59, 65, 44, 110, 65, 65, 65, 67, 67, 67, + + 73, 65, 83, 89, 89, 97, 67, 67, 119, 127, + 97, 150, 149, 110, 83, 83, 83, 133, 133, 133, + 141, 127, 127, 127, 145, 136, 136, 136, 119, 136, + 140, 140, 140, 148, 140, 147, 144, 143, 142, 139, + 141, 138, 137, 135, 145, 159, 159, 159, 159, 159, + 159, 159, 159, 160, 134, 132, 131, 160, 160, 161, + 161, 161, 161, 161, 161, 161, 161, 162, 162, 162, + 162, 163, 126, 163, 164, 125, 124, 164, 123, 164, + 164, 165, 122, 121, 165, 165, 165, 165, 166, 120, + 166, 166, 167, 167, 167, 167, 167, 167, 167, 167, + + 168, 168, 168, 168, 168, 168, 168, 168, 169, 118, + 169, 170, 117, 170, 170, 116, 170, 170, 171, 171, + 171, 171, 171, 171, 171, 171, 172, 172, 172, 172, + 172, 172, 172, 172, 115, 114, 112, 111, 109, 108, + 107, 106, 105, 104, 103, 102, 101, 100, 98, 96, + 95, 94, 93, 92, 90, 88, 86, 85, 84, 82, + 81, 80, 79, 78, 77, 76, 74, 72, 69, 68, + 66, 63, 61, 60, 56, 50, 49, 47, 46, 45, + 41, 38, 22, 21, 19, 13, 9, 6, 4, 2, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158, 158, 158, 158, + 158, 158, 158, 158, 158, 158, 158 } ; static yy_state_type yy_last_accepting_state; @@ -631,13 +632,13 @@ char *yytext; - -#line 38 "dtc-lexer.l" +#line 37 "dtc-lexer.l" #include "dtc.h" #include "srcpos.h" #include "dtc-parser.tab.h" YYLTYPE yylloc; +extern bool treesource_error; /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ #define YY_USER_ACTION \ @@ -659,14 +660,14 @@ static int dts_version = 1; BEGIN(V1); \ static void push_input_file(const char *filename); -static int pop_input_file(void); -#line 664 "dtc-lexer.lex.c" +static bool pop_input_file(void); +static void lexical_error(const char *fmt, ...); +#line 666 "dtc-lexer.lex.c" #define INITIAL 0 -#define INCLUDE 1 -#define BYTESTRING 2 -#define PROPNODENAME 3 -#define V1 4 +#define BYTESTRING 1 +#define PROPNODENAME 2 +#define V1 3 #ifndef YY_NO_UNISTD_H /* Special case for "unistd.h", since it is non-ANSI. We include it way @@ -703,7 +704,7 @@ FILE *yyget_out (void ); void yyset_out (FILE * out_str ); -int yyget_leng (void ); +yy_size_t yyget_leng (void ); char *yyget_text (void ); @@ -852,10 +853,6 @@ YY_DECL register char *yy_cp, *yy_bp; register int yy_act; -#line 67 "dtc-lexer.l" - -#line 858 "dtc-lexer.lex.c" - if ( !(yy_init) ) { (yy_init) = 1; @@ -882,6 +879,11 @@ YY_DECL yy_load_buffer_state( ); } + { +#line 68 "dtc-lexer.l" + +#line 886 "dtc-lexer.lex.c" + while ( 1 ) /* loops until end-of-file is reached */ { yy_cp = (yy_c_buf_p); @@ -899,7 +901,7 @@ YY_DECL yy_match: do { - register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)]; + register YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ; if ( yy_accept[yy_current_state] ) { (yy_last_accepting_state) = yy_current_state; @@ -908,13 +910,13 @@ yy_match: while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; - if ( yy_current_state >= 161 ) + if ( yy_current_state >= 159 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; ++yy_cp; } - while ( yy_current_state != 160 ); + while ( yy_current_state != 158 ); yy_cp = (yy_last_accepting_cpos); yy_current_state = (yy_last_accepting_state); @@ -937,7 +939,7 @@ do_action: /* This label is used only to access EOF actions. */ case 1: /* rule 1 can match eol */ YY_RULE_SETUP -#line 68 "dtc-lexer.l" +#line 69 "dtc-lexer.l" { char *name = strchr(yytext, '\"') + 1; yytext[yyleng-1] = '\0'; @@ -947,16 +949,16 @@ YY_RULE_SETUP case 2: /* rule 2 can match eol */ YY_RULE_SETUP -#line 74 "dtc-lexer.l" +#line 75 "dtc-lexer.l" { char *line, *tmp, *fn; /* skip text before line # */ line = yytext; - while (!isdigit(*line)) + while (!isdigit((unsigned char)*line)) line++; /* skip digits in line # */ tmp = line; - while (!isspace(*tmp)) + while (!isspace((unsigned char)*tmp)) tmp++; /* "NULL"-terminate line # */ *tmp = '\0'; @@ -970,11 +972,10 @@ YY_RULE_SETUP } YY_BREAK case YY_STATE_EOF(INITIAL): -case YY_STATE_EOF(INCLUDE): case YY_STATE_EOF(BYTESTRING): case YY_STATE_EOF(PROPNODENAME): case YY_STATE_EOF(V1): -#line 95 "dtc-lexer.l" +#line 96 "dtc-lexer.l" { if (!pop_input_file()) { yyterminate(); @@ -984,7 +985,7 @@ case YY_STATE_EOF(V1): case 3: /* rule 3 can match eol */ YY_RULE_SETUP -#line 101 "dtc-lexer.l" +#line 102 "dtc-lexer.l" { DPRINT("String: %s\n", yytext); yylval.data = data_copy_escape_string(yytext+1, @@ -994,7 +995,7 @@ YY_RULE_SETUP YY_BREAK case 4: YY_RULE_SETUP -#line 108 "dtc-lexer.l" +#line 109 "dtc-lexer.l" { DPRINT("Keyword: /dts-v1/\n"); dts_version = 1; @@ -1004,7 +1005,7 @@ YY_RULE_SETUP YY_BREAK case 5: YY_RULE_SETUP -#line 115 "dtc-lexer.l" +#line 116 "dtc-lexer.l" { DPRINT("Keyword: /memreserve/\n"); BEGIN_DEFAULT(); @@ -1013,7 +1014,7 @@ YY_RULE_SETUP YY_BREAK case 6: YY_RULE_SETUP -#line 121 "dtc-lexer.l" +#line 122 "dtc-lexer.l" { DPRINT("Keyword: /bits/\n"); BEGIN_DEFAULT(); @@ -1022,7 +1023,7 @@ YY_RULE_SETUP YY_BREAK case 7: YY_RULE_SETUP -#line 127 "dtc-lexer.l" +#line 128 "dtc-lexer.l" { DPRINT("Keyword: /delete-property/\n"); DPRINT("<PROPNODENAME>\n"); @@ -1032,7 +1033,7 @@ YY_RULE_SETUP YY_BREAK case 8: YY_RULE_SETUP -#line 134 "dtc-lexer.l" +#line 135 "dtc-lexer.l" { DPRINT("Keyword: /delete-node/\n"); DPRINT("<PROPNODENAME>\n"); @@ -1042,7 +1043,7 @@ YY_RULE_SETUP YY_BREAK case 9: YY_RULE_SETUP -#line 141 "dtc-lexer.l" +#line 142 "dtc-lexer.l" { DPRINT("Label: %s\n", yytext); yylval.labelref = xstrdup(yytext); @@ -1052,27 +1053,54 @@ YY_RULE_SETUP YY_BREAK case 10: YY_RULE_SETUP -#line 148 "dtc-lexer.l" +#line 149 "dtc-lexer.l" { - yylval.literal = xstrdup(yytext); - DPRINT("Literal: '%s'\n", yylval.literal); + char *e; + DPRINT("Integer Literal: '%s'\n", yytext); + + errno = 0; + yylval.integer = strtoull(yytext, &e, 0); + + assert(!(*e) || !e[strspn(e, "UL")]); + + if (errno == ERANGE) + lexical_error("Integer literal '%s' out of range", + yytext); + else + /* ERANGE is the only strtoull error triggerable + * by strings matching the pattern */ + assert(errno == 0); return DT_LITERAL; } YY_BREAK case 11: /* rule 11 can match eol */ YY_RULE_SETUP -#line 154 "dtc-lexer.l" +#line 168 "dtc-lexer.l" { - yytext[yyleng-1] = '\0'; - yylval.literal = xstrdup(yytext+1); - DPRINT("Character literal: %s\n", yylval.literal); + struct data d; + DPRINT("Character literal: %s\n", yytext); + + d = data_copy_escape_string(yytext+1, yyleng-2); + if (d.len == 1) { + lexical_error("Empty character literal"); + yylval.integer = 0; + return DT_CHAR_LITERAL; + } + + yylval.integer = (unsigned char)d.val[0]; + + if (d.len > 2) + lexical_error("Character literal has %d" + " characters instead of 1", + d.len - 1); + return DT_CHAR_LITERAL; } YY_BREAK case 12: YY_RULE_SETUP -#line 161 "dtc-lexer.l" +#line 189 "dtc-lexer.l" { /* label reference */ DPRINT("Ref: %s\n", yytext+1); yylval.labelref = xstrdup(yytext+1); @@ -1081,7 +1109,7 @@ YY_RULE_SETUP YY_BREAK case 13: YY_RULE_SETUP -#line 167 "dtc-lexer.l" +#line 195 "dtc-lexer.l" { /* new-style path reference */ yytext[yyleng-1] = '\0'; DPRINT("Ref: %s\n", yytext+2); @@ -1091,7 +1119,7 @@ YY_RULE_SETUP YY_BREAK case 14: YY_RULE_SETUP -#line 174 "dtc-lexer.l" +#line 202 "dtc-lexer.l" { yylval.byte = strtol(yytext, NULL, 16); DPRINT("Byte: %02x\n", (int)yylval.byte); @@ -1100,7 +1128,7 @@ YY_RULE_SETUP YY_BREAK case 15: YY_RULE_SETUP -#line 180 "dtc-lexer.l" +#line 208 "dtc-lexer.l" { DPRINT("/BYTESTRING\n"); BEGIN_DEFAULT(); @@ -1109,7 +1137,7 @@ YY_RULE_SETUP YY_BREAK case 16: YY_RULE_SETUP -#line 186 "dtc-lexer.l" +#line 214 "dtc-lexer.l" { DPRINT("PropNodeName: %s\n", yytext); yylval.propnodename = xstrdup((yytext[0] == '\\') ? @@ -1120,7 +1148,7 @@ YY_RULE_SETUP YY_BREAK case 17: YY_RULE_SETUP -#line 194 "dtc-lexer.l" +#line 222 "dtc-lexer.l" { DPRINT("Binary Include\n"); return DT_INCBIN; @@ -1129,64 +1157,64 @@ YY_RULE_SETUP case 18: /* rule 18 can match eol */ YY_RULE_SETUP -#line 199 "dtc-lexer.l" +#line 227 "dtc-lexer.l" /* eat whitespace */ YY_BREAK case 19: /* rule 19 can match eol */ YY_RULE_SETUP -#line 200 "dtc-lexer.l" +#line 228 "dtc-lexer.l" /* eat C-style comments */ YY_BREAK case 20: /* rule 20 can match eol */ YY_RULE_SETUP -#line 201 "dtc-lexer.l" +#line 229 "dtc-lexer.l" /* eat C++-style comments */ YY_BREAK case 21: YY_RULE_SETUP -#line 203 "dtc-lexer.l" +#line 231 "dtc-lexer.l" { return DT_LSHIFT; }; YY_BREAK case 22: YY_RULE_SETUP -#line 204 "dtc-lexer.l" +#line 232 "dtc-lexer.l" { return DT_RSHIFT; }; YY_BREAK case 23: YY_RULE_SETUP -#line 205 "dtc-lexer.l" +#line 233 "dtc-lexer.l" { return DT_LE; }; YY_BREAK case 24: YY_RULE_SETUP -#line 206 "dtc-lexer.l" +#line 234 "dtc-lexer.l" { return DT_GE; }; YY_BREAK case 25: YY_RULE_SETUP -#line 207 "dtc-lexer.l" +#line 235 "dtc-lexer.l" { return DT_EQ; }; YY_BREAK case 26: YY_RULE_SETUP -#line 208 "dtc-lexer.l" +#line 236 "dtc-lexer.l" { return DT_NE; }; YY_BREAK case 27: YY_RULE_SETUP -#line 209 "dtc-lexer.l" +#line 237 "dtc-lexer.l" { return DT_AND; }; YY_BREAK case 28: YY_RULE_SETUP -#line 210 "dtc-lexer.l" +#line 238 "dtc-lexer.l" { return DT_OR; }; YY_BREAK case 29: YY_RULE_SETUP -#line 212 "dtc-lexer.l" +#line 240 "dtc-lexer.l" { DPRINT("Char: %c (\\x%02x)\n", yytext[0], (unsigned)yytext[0]); @@ -1204,10 +1232,10 @@ YY_RULE_SETUP YY_BREAK case 30: YY_RULE_SETUP -#line 227 "dtc-lexer.l" +#line 255 "dtc-lexer.l" ECHO; YY_BREAK -#line 1211 "dtc-lexer.lex.c" +#line 1239 "dtc-lexer.lex.c" case YY_END_OF_BUFFER: { @@ -1337,6 +1365,7 @@ ECHO; "fatal flex scanner internal error--no action found" ); } /* end of action switch */ } /* end of scanning one token */ + } /* end of user's declarations */ } /* end of yylex */ /* yy_get_next_buffer - try to read in a new buffer @@ -1392,21 +1421,21 @@ static int yy_get_next_buffer (void) else { - int num_to_read = + yy_size_t num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1; while ( num_to_read <= 0 ) { /* Not enough room in the buffer - grow it. */ /* just a shorter name for the current buffer */ - YY_BUFFER_STATE b = YY_CURRENT_BUFFER; + YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE; int yy_c_buf_p_offset = (int) ((yy_c_buf_p) - b->yy_ch_buf); if ( b->yy_is_our_buffer ) { - int new_size = b->yy_buf_size * 2; + yy_size_t new_size = b->yy_buf_size * 2; if ( new_size <= 0 ) b->yy_buf_size += b->yy_buf_size / 8; @@ -1437,7 +1466,7 @@ static int yy_get_next_buffer (void) /* Read in more data. */ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]), - (yy_n_chars), (size_t) num_to_read ); + (yy_n_chars), num_to_read ); YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars); } @@ -1499,7 +1528,7 @@ static int yy_get_next_buffer (void) while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; - if ( yy_current_state >= 161 ) + if ( yy_current_state >= 159 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; @@ -1527,13 +1556,13 @@ static int yy_get_next_buffer (void) while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state ) { yy_current_state = (int) yy_def[yy_current_state]; - if ( yy_current_state >= 161 ) + if ( yy_current_state >= 159 ) yy_c = yy_meta[(unsigned int) yy_c]; } yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c]; - yy_is_jam = (yy_current_state == 160); + yy_is_jam = (yy_current_state == 158); - return yy_is_jam ? 0 : yy_current_state; + return yy_is_jam ? 0 : yy_current_state; } #ifndef YY_NO_INPUT @@ -1560,7 +1589,7 @@ static int yy_get_next_buffer (void) else { /* need more input */ - int offset = (yy_c_buf_p) - (yytext_ptr); + yy_size_t offset = (yy_c_buf_p) - (yytext_ptr); ++(yy_c_buf_p); switch ( yy_get_next_buffer( ) ) @@ -1834,7 +1863,7 @@ void yypop_buffer_state (void) */ static void yyensure_buffer_stack (void) { - int num_to_alloc; + yy_size_t num_to_alloc; if (!(yy_buffer_stack)) { @@ -1931,12 +1960,12 @@ YY_BUFFER_STATE yy_scan_string (yyconst char * yystr ) * * @return the newly allocated buffer state object. */ -YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, int _yybytes_len ) +YY_BUFFER_STATE yy_scan_bytes (yyconst char * yybytes, yy_size_t _yybytes_len ) { YY_BUFFER_STATE b; char *buf; yy_size_t n; - int i; + yy_size_t i; /* Get memory for full buffer, including space for trailing EOB's. */ n = _yybytes_len + 2; @@ -2018,7 +2047,7 @@ FILE *yyget_out (void) /** Get the length of the current token. * */ -int yyget_leng (void) +yy_size_t yyget_leng (void) { return yyleng; } @@ -2166,7 +2195,7 @@ void yyfree (void * ptr ) #define YYTABLES_NAME "yytables" -#line 227 "dtc-lexer.l" +#line 254 "dtc-lexer.l" @@ -2182,14 +2211,25 @@ static void push_input_file(const char *filename) } -static int pop_input_file(void) +static bool pop_input_file(void) { if (srcfile_pop() == 0) - return 0; + return false; yypop_buffer_state(); yyin = current_srcfile->f; - return 1; + return true; +} + +static void lexical_error(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + srcpos_verror(&yylloc, "Lexical error", fmt, ap); + va_end(ap); + + treesource_error = true; } diff --git a/scripts/dtc/dtc-parser.tab.c_shipped b/scripts/dtc/dtc-parser.tab.c_shipped index c8769d550cfb..116458c8dfc4 100644 --- a/scripts/dtc/dtc-parser.tab.c_shipped +++ b/scripts/dtc/dtc-parser.tab.c_shipped @@ -1,19 +1,19 @@ -/* A Bison parser, made by GNU Bison 2.7.12-4996. */ +/* A Bison parser, made by GNU Bison 3.0.2. */ /* Bison implementation for Yacc-like parsers in C - - Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc. - + + Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc. + This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ @@ -26,7 +26,7 @@ special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. - + This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ @@ -44,7 +44,7 @@ #define YYBISON 1 /* Bison version. */ -#define YYBISON_VERSION "2.7.12-4996" +#define YYBISON_VERSION "3.0.2" /* Skeleton name. */ #define YYSKELETON_NAME "yacc.c" @@ -62,34 +62,31 @@ /* Copy the first part of user declarations. */ -/* Line 371 of yacc.c */ -#line 21 "dtc-parser.y" +#line 20 "dtc-parser.y" /* yacc.c:339 */ #include <stdio.h> #include "dtc.h" #include "srcpos.h" -YYLTYPE yylloc; - extern int yylex(void); -extern void print_error(char const *fmt, ...); extern void yyerror(char const *s); +#define ERROR(loc, ...) \ + do { \ + srcpos_error((loc), "Error", __VA_ARGS__); \ + treesource_error = true; \ + } while (0) extern struct boot_info *the_boot_info; -extern int treesource_error; +extern bool treesource_error; -static unsigned long long eval_literal(const char *s, int base, int bits); -static unsigned char eval_char_literal(const char *s); +#line 84 "dtc-parser.tab.c" /* yacc.c:339 */ -/* Line 371 of yacc.c */ -#line 87 "dtc-parser.tab.c" - -# ifndef YY_NULL +# ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus -# define YY_NULL nullptr +# define YY_NULLPTR nullptr # else -# define YY_NULL 0 +# define YY_NULLPTR 0 # endif # endif @@ -105,7 +102,7 @@ static unsigned char eval_char_literal(const char *s); by #include "dtc-parser.tab.h". */ #ifndef YY_YY_DTC_PARSER_TAB_H_INCLUDED # define YY_YY_DTC_PARSER_TAB_H_INCLUDED -/* Enabling traces. */ +/* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif @@ -113,48 +110,44 @@ static unsigned char eval_char_literal(const char *s); extern int yydebug; #endif -/* Tokens. */ +/* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - DT_V1 = 258, - DT_MEMRESERVE = 259, - DT_LSHIFT = 260, - DT_RSHIFT = 261, - DT_LE = 262, - DT_GE = 263, - DT_EQ = 264, - DT_NE = 265, - DT_AND = 266, - DT_OR = 267, - DT_BITS = 268, - DT_DEL_PROP = 269, - DT_DEL_NODE = 270, - DT_PROPNODENAME = 271, - DT_LITERAL = 272, - DT_CHAR_LITERAL = 273, - DT_BASE = 274, - DT_BYTE = 275, - DT_STRING = 276, - DT_LABEL = 277, - DT_REF = 278, - DT_INCBIN = 279 - }; + enum yytokentype + { + DT_V1 = 258, + DT_MEMRESERVE = 259, + DT_LSHIFT = 260, + DT_RSHIFT = 261, + DT_LE = 262, + DT_GE = 263, + DT_EQ = 264, + DT_NE = 265, + DT_AND = 266, + DT_OR = 267, + DT_BITS = 268, + DT_DEL_PROP = 269, + DT_DEL_NODE = 270, + DT_PROPNODENAME = 271, + DT_LITERAL = 272, + DT_CHAR_LITERAL = 273, + DT_BYTE = 274, + DT_STRING = 275, + DT_LABEL = 276, + DT_REF = 277, + DT_INCBIN = 278 + }; #endif - +/* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED -typedef union YYSTYPE +typedef union YYSTYPE YYSTYPE; +union YYSTYPE { -/* Line 387 of yacc.c */ -#line 40 "dtc-parser.y" +#line 38 "dtc-parser.y" /* yacc.c:355 */ char *propnodename; - char *literal; char *labelref; - unsigned int cbase; uint8_t byte; struct data data; @@ -170,37 +163,36 @@ typedef union YYSTYPE struct reserve_info *re; uint64_t integer; - -/* Line 387 of yacc.c */ -#line 176 "dtc-parser.tab.c" -} YYSTYPE; +#line 167 "dtc-parser.tab.c" /* yacc.c:355 */ +}; # define YYSTYPE_IS_TRIVIAL 1 -# define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 #endif -extern YYSTYPE yylval; - -#ifdef YYPARSE_PARAM -#if defined __STDC__ || defined __cplusplus -int yyparse (void *YYPARSE_PARAM); -#else -int yyparse (); +/* Location type. */ +#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED +typedef struct YYLTYPE YYLTYPE; +struct YYLTYPE +{ + int first_line; + int first_column; + int last_line; + int last_column; +}; +# define YYLTYPE_IS_DECLARED 1 +# define YYLTYPE_IS_TRIVIAL 1 #endif -#else /* ! YYPARSE_PARAM */ -#if defined __STDC__ || defined __cplusplus + + +extern YYSTYPE yylval; +extern YYLTYPE yylloc; int yyparse (void); -#else -int yyparse (); -#endif -#endif /* ! YYPARSE_PARAM */ #endif /* !YY_YY_DTC_PARSER_TAB_H_INCLUDED */ /* Copy the second part of user declarations. */ -/* Line 390 of yacc.c */ -#line 204 "dtc-parser.tab.c" +#line 196 "dtc-parser.tab.c" /* yacc.c:358 */ #ifdef short # undef short @@ -214,11 +206,8 @@ typedef unsigned char yytype_uint8; #ifdef YYTYPE_INT8 typedef YYTYPE_INT8 yytype_int8; -#elif (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -typedef signed char yytype_int8; #else -typedef short int yytype_int8; +typedef signed char yytype_int8; #endif #ifdef YYTYPE_UINT16 @@ -238,8 +227,7 @@ typedef short int yytype_int16; # define YYSIZE_T __SIZE_TYPE__ # elif defined size_t # define YYSIZE_T size_t -# elif ! defined YYSIZE_T && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# elif ! defined YYSIZE_T # include <stddef.h> /* INFRINGES ON USER NAME SPACE */ # define YYSIZE_T size_t # else @@ -261,11 +249,30 @@ typedef short int yytype_int16; # endif #endif -#ifndef __attribute__ -/* This feature is available in gcc versions 2.5 and later. */ -# if (! defined __GNUC__ || __GNUC__ < 2 \ - || (__GNUC__ == 2 && __GNUC_MINOR__ < 5)) -# define __attribute__(Spec) /* empty */ +#ifndef YY_ATTRIBUTE +# if (defined __GNUC__ \ + && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ + || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C +# define YY_ATTRIBUTE(Spec) __attribute__(Spec) +# else +# define YY_ATTRIBUTE(Spec) /* empty */ +# endif +#endif + +#ifndef YY_ATTRIBUTE_PURE +# define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) +#endif + +#ifndef YY_ATTRIBUTE_UNUSED +# define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) +#endif + +#if !defined _Noreturn \ + && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) +# if defined _MSC_VER && 1200 <= _MSC_VER +# define _Noreturn __declspec (noreturn) +# else +# define _Noreturn YY_ATTRIBUTE ((__noreturn__)) # endif #endif @@ -276,25 +283,26 @@ typedef short int yytype_int16; # define YYUSE(E) /* empty */ #endif - -/* Identity function, used to suppress warnings about constant conditions. */ -#ifndef lint -# define YYID(N) (N) -#else -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static int -YYID (int yyi) +#if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ +/* Suppress an incorrect diagnostic about yylval being uninitialized. */ +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ + _Pragma ("GCC diagnostic push") \ + _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ + _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") +# define YY_IGNORE_MAYBE_UNINITIALIZED_END \ + _Pragma ("GCC diagnostic pop") #else -static int -YYID (yyi) - int yyi; +# define YY_INITIAL_VALUE(Value) Value #endif -{ - return yyi; -} +#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN +# define YY_IGNORE_MAYBE_UNINITIALIZED_END +#endif +#ifndef YY_INITIAL_VALUE +# define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif + #if ! defined yyoverflow || YYERROR_VERBOSE /* The parser invokes alloca or malloc; define the necessary symbols. */ @@ -312,8 +320,7 @@ YYID (yyi) # define alloca _alloca # else # define YYSTACK_ALLOC alloca -# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ /* Use EXIT_SUCCESS as a witness for stdlib.h. */ # ifndef EXIT_SUCCESS @@ -325,8 +332,8 @@ YYID (yyi) # endif # ifdef YYSTACK_ALLOC - /* Pacify GCC's `empty if-body' warning. */ -# define YYSTACK_FREE(Ptr) do { /* empty */; } while (YYID (0)) + /* Pacify GCC's 'empty if-body' warning. */ +# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0) # ifndef YYSTACK_ALLOC_MAXIMUM /* The OS might guarantee only one guard page at the bottom of the stack, and a page size can be as small as 4096 bytes. So we cannot safely @@ -342,7 +349,7 @@ YYID (yyi) # endif # if (defined __cplusplus && ! defined EXIT_SUCCESS \ && ! ((defined YYMALLOC || defined malloc) \ - && (defined YYFREE || defined free))) + && (defined YYFREE || defined free))) # include <stdlib.h> /* INFRINGES ON USER NAME SPACE */ # ifndef EXIT_SUCCESS # define EXIT_SUCCESS 0 @@ -350,15 +357,13 @@ YYID (yyi) # endif # ifndef YYMALLOC # define YYMALLOC malloc -# if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if ! defined malloc && ! defined EXIT_SUCCESS void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */ # endif # endif # ifndef YYFREE # define YYFREE free -# if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) +# if ! defined free && ! defined EXIT_SUCCESS void free (void *); /* INFRINGES ON USER NAME SPACE */ # endif # endif @@ -368,13 +373,15 @@ void free (void *); /* INFRINGES ON USER NAME SPACE */ #if (! defined yyoverflow \ && (! defined __cplusplus \ - || (defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) + || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \ + && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL))) /* A type that is properly aligned for any stack member. */ union yyalloc { yytype_int16 yyss_alloc; YYSTYPE yyvs_alloc; + YYLTYPE yyls_alloc; }; /* The size of the maximum gap between one aligned stack and the next. */ @@ -383,8 +390,8 @@ union yyalloc /* The size of an array large to enough to hold all stacks, each with N elements. */ # define YYSTACK_BYTES(N) \ - ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \ - + YYSTACK_GAP_MAXIMUM) + ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE) + sizeof (YYLTYPE)) \ + + 2 * YYSTACK_GAP_MAXIMUM) # define YYCOPY_NEEDED 1 @@ -393,16 +400,16 @@ union yyalloc elements in the stack, and YYPTR gives the new location of the stack. Advance YYPTR to a properly aligned location for the next stack. */ -# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ - do \ - { \ - YYSIZE_T yynewbytes; \ - YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ - Stack = &yyptr->Stack_alloc; \ - yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ - yyptr += yynewbytes / sizeof (*yyptr); \ - } \ - while (YYID (0)) +# define YYSTACK_RELOCATE(Stack_alloc, Stack) \ + do \ + { \ + YYSIZE_T yynewbytes; \ + YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \ + Stack = &yyptr->Stack_alloc; \ + yynewbytes = yystacksize * sizeof (*Stack) + YYSTACK_GAP_MAXIMUM; \ + yyptr += yynewbytes / sizeof (*yyptr); \ + } \ + while (0) #endif @@ -421,7 +428,7 @@ union yyalloc for (yyi = 0; yyi < (Count); yyi++) \ (Dst)[yyi] = (Src)[yyi]; \ } \ - while (YYID (0)) + while (0) # endif # endif #endif /* !YYCOPY_NEEDED */ @@ -429,40 +436,42 @@ union yyalloc /* YYFINAL -- State number of the termination state. */ #define YYFINAL 4 /* YYLAST -- Last index in YYTABLE. */ -#define YYLAST 133 +#define YYLAST 136 /* YYNTOKENS -- Number of terminals. */ -#define YYNTOKENS 48 +#define YYNTOKENS 47 /* YYNNTS -- Number of nonterminals. */ #define YYNNTS 28 /* YYNRULES -- Number of rules. */ -#define YYNRULES 79 -/* YYNRULES -- Number of states. */ -#define YYNSTATES 141 +#define YYNRULES 80 +/* YYNSTATES -- Number of states. */ +#define YYNSTATES 144 -/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ +/* YYTRANSLATE[YYX] -- Symbol number corresponding to YYX as returned + by yylex, with out-of-bounds checking. */ #define YYUNDEFTOK 2 -#define YYMAXUTOK 279 +#define YYMAXUTOK 278 -#define YYTRANSLATE(YYX) \ +#define YYTRANSLATE(YYX) \ ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) -/* YYTRANSLATE[YYLEX] -- Bison symbol number corresponding to YYLEX. */ +/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM + as returned by yylex, without out-of-bounds checking. */ static const yytype_uint8 yytranslate[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 47, 2, 2, 2, 45, 41, 2, - 33, 35, 44, 42, 34, 43, 2, 26, 2, 2, - 2, 2, 2, 2, 2, 2, 2, 2, 38, 25, - 36, 29, 30, 37, 2, 2, 2, 2, 2, 2, + 2, 2, 2, 46, 2, 2, 2, 44, 40, 2, + 32, 34, 43, 41, 33, 42, 2, 25, 2, 2, + 2, 2, 2, 2, 2, 2, 2, 2, 37, 24, + 35, 28, 29, 36, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 31, 2, 32, 40, 2, 2, 2, 2, 2, + 2, 30, 2, 31, 39, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 27, 39, 28, 46, 2, 2, 2, + 2, 2, 2, 26, 38, 27, 45, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, @@ -477,67 +486,22 @@ static const yytype_uint8 yytranslate[] = 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, - 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 + 15, 16, 17, 18, 19, 20, 21, 22, 23 }; #if YYDEBUG -/* YYPRHS[YYN] -- Index of the first RHS symbol of rule number YYN in - YYRHS. */ -static const yytype_uint16 yyprhs[] = -{ - 0, 0, 3, 8, 9, 12, 17, 20, 23, 27, - 31, 36, 42, 43, 46, 51, 54, 58, 61, 64, - 68, 73, 76, 86, 92, 95, 96, 99, 102, 106, - 108, 111, 114, 117, 119, 121, 125, 127, 129, 135, - 137, 141, 143, 147, 149, 153, 155, 159, 161, 165, - 167, 171, 175, 177, 181, 185, 189, 193, 197, 201, - 203, 207, 211, 213, 217, 221, 225, 227, 229, 232, - 235, 238, 239, 242, 245, 246, 249, 252, 255, 259 -}; - -/* YYRHS -- A `-1'-separated list of the rules' RHS. */ -static const yytype_int8 yyrhs[] = -{ - 49, 0, -1, 3, 25, 50, 52, -1, -1, 51, - 50, -1, 4, 59, 59, 25, -1, 22, 51, -1, - 26, 53, -1, 52, 26, 53, -1, 52, 23, 53, - -1, 52, 15, 23, 25, -1, 27, 54, 74, 28, - 25, -1, -1, 54, 55, -1, 16, 29, 56, 25, - -1, 16, 25, -1, 14, 16, 25, -1, 22, 55, - -1, 57, 21, -1, 57, 58, 30, -1, 57, 31, - 73, 32, -1, 57, 23, -1, 57, 24, 33, 21, - 34, 59, 34, 59, 35, -1, 57, 24, 33, 21, - 35, -1, 56, 22, -1, -1, 56, 34, -1, 57, - 22, -1, 13, 17, 36, -1, 36, -1, 58, 59, - -1, 58, 23, -1, 58, 22, -1, 17, -1, 18, - -1, 33, 60, 35, -1, 61, -1, 62, -1, 62, - 37, 60, 38, 61, -1, 63, -1, 62, 12, 63, - -1, 64, -1, 63, 11, 64, -1, 65, -1, 64, - 39, 65, -1, 66, -1, 65, 40, 66, -1, 67, - -1, 66, 41, 67, -1, 68, -1, 67, 9, 68, - -1, 67, 10, 68, -1, 69, -1, 68, 36, 69, - -1, 68, 30, 69, -1, 68, 7, 69, -1, 68, - 8, 69, -1, 69, 5, 70, -1, 69, 6, 70, - -1, 70, -1, 70, 42, 71, -1, 70, 43, 71, - -1, 71, -1, 71, 44, 72, -1, 71, 26, 72, - -1, 71, 45, 72, -1, 72, -1, 59, -1, 43, - 72, -1, 46, 72, -1, 47, 72, -1, -1, 73, - 20, -1, 73, 22, -1, -1, 75, 74, -1, 75, - 55, -1, 16, 53, -1, 15, 16, 25, -1, 22, - 75, -1 -}; - -/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ + /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */ static const yytype_uint16 yyrline[] = { - 0, 109, 109, 118, 121, 128, 132, 140, 144, 148, - 158, 172, 180, 183, 190, 194, 198, 202, 210, 214, - 218, 222, 226, 243, 253, 261, 264, 268, 275, 290, - 295, 315, 329, 336, 340, 344, 351, 355, 356, 360, - 361, 365, 366, 370, 371, 375, 376, 380, 381, 385, - 386, 387, 391, 392, 393, 394, 395, 399, 400, 401, - 405, 406, 407, 411, 412, 413, 414, 418, 419, 420, - 421, 426, 429, 433, 441, 444, 448, 456, 460, 464 + 0, 104, 104, 113, 116, 123, 127, 135, 139, 144, + 155, 165, 180, 188, 191, 198, 202, 206, 210, 218, + 222, 226, 230, 234, 250, 260, 268, 271, 275, 282, + 298, 303, 322, 336, 343, 344, 345, 352, 356, 357, + 361, 362, 366, 367, 371, 372, 376, 377, 381, 382, + 386, 387, 388, 392, 393, 394, 395, 396, 400, 401, + 402, 406, 407, 408, 412, 413, 414, 415, 419, 420, + 421, 422, 427, 430, 434, 442, 445, 449, 457, 461, + 465 }; #endif @@ -549,209 +513,199 @@ static const char *const yytname[] = "$end", "error", "$undefined", "DT_V1", "DT_MEMRESERVE", "DT_LSHIFT", "DT_RSHIFT", "DT_LE", "DT_GE", "DT_EQ", "DT_NE", "DT_AND", "DT_OR", "DT_BITS", "DT_DEL_PROP", "DT_DEL_NODE", "DT_PROPNODENAME", "DT_LITERAL", - "DT_CHAR_LITERAL", "DT_BASE", "DT_BYTE", "DT_STRING", "DT_LABEL", - "DT_REF", "DT_INCBIN", "';'", "'/'", "'{'", "'}'", "'='", "'>'", "'['", - "']'", "'('", "','", "')'", "'<'", "'?'", "':'", "'|'", "'^'", "'&'", - "'+'", "'-'", "'*'", "'%'", "'~'", "'!'", "$accept", "sourcefile", + "DT_CHAR_LITERAL", "DT_BYTE", "DT_STRING", "DT_LABEL", "DT_REF", + "DT_INCBIN", "';'", "'/'", "'{'", "'}'", "'='", "'>'", "'['", "']'", + "'('", "','", "')'", "'<'", "'?'", "':'", "'|'", "'^'", "'&'", "'+'", + "'-'", "'*'", "'%'", "'~'", "'!'", "$accept", "sourcefile", "memreserves", "memreserve", "devicetree", "nodedef", "proplist", "propdef", "propdata", "propdataprefix", "arrayprefix", "integer_prim", "integer_expr", "integer_trinary", "integer_or", "integer_and", "integer_bitor", "integer_bitxor", "integer_bitand", "integer_eq", "integer_rela", "integer_shift", "integer_add", "integer_mul", - "integer_unary", "bytestring", "subnodes", "subnode", YY_NULL + "integer_unary", "bytestring", "subnodes", "subnode", YY_NULLPTR }; #endif # ifdef YYPRINT -/* YYTOKNUM[YYLEX-NUM] -- Internal token number corresponding to - token YYLEX-NUM. */ +/* YYTOKNUM[NUM] -- (External) token number corresponding to the + (internal) symbol number NUM (which must be that of a token). */ static const yytype_uint16 yytoknum[] = { 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, - 275, 276, 277, 278, 279, 59, 47, 123, 125, 61, - 62, 91, 93, 40, 44, 41, 60, 63, 58, 124, - 94, 38, 43, 45, 42, 37, 126, 33 + 275, 276, 277, 278, 59, 47, 123, 125, 61, 62, + 91, 93, 40, 44, 41, 60, 63, 58, 124, 94, + 38, 43, 45, 42, 37, 126, 33 }; # endif -/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ -static const yytype_uint8 yyr1[] = -{ - 0, 48, 49, 50, 50, 51, 51, 52, 52, 52, - 52, 53, 54, 54, 55, 55, 55, 55, 56, 56, - 56, 56, 56, 56, 56, 57, 57, 57, 58, 58, - 58, 58, 58, 59, 59, 59, 60, 61, 61, 62, - 62, 63, 63, 64, 64, 65, 65, 66, 66, 67, - 67, 67, 68, 68, 68, 68, 68, 69, 69, 69, - 70, 70, 70, 71, 71, 71, 71, 72, 72, 72, - 72, 73, 73, 73, 74, 74, 74, 75, 75, 75 -}; +#define YYPACT_NINF -81 -/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ -static const yytype_uint8 yyr2[] = +#define yypact_value_is_default(Yystate) \ + (!!((Yystate) == (-81))) + +#define YYTABLE_NINF -1 + +#define yytable_value_is_error(Yytable_value) \ + 0 + + /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing + STATE-NUM. */ +static const yytype_int8 yypact[] = { - 0, 2, 4, 0, 2, 4, 2, 2, 3, 3, - 4, 5, 0, 2, 4, 2, 3, 2, 2, 3, - 4, 2, 9, 5, 2, 0, 2, 2, 3, 1, - 2, 2, 2, 1, 1, 3, 1, 1, 5, 1, - 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, - 3, 3, 1, 3, 3, 3, 3, 3, 3, 1, - 3, 3, 1, 3, 3, 3, 1, 1, 2, 2, - 2, 0, 2, 2, 0, 2, 2, 2, 3, 2 + 16, -11, 21, 10, -81, 25, 10, 19, 10, -81, + -81, -9, 25, -81, 2, 51, -81, -9, -9, -9, + -81, 1, -81, -6, 50, 14, 28, 29, 36, 3, + 58, 44, -3, -81, 47, -81, -81, 65, 68, 2, + 2, -81, -81, -81, -81, -9, -9, -9, -9, -9, + -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, + -9, -9, -9, -9, -81, 63, 69, 2, -81, -81, + 50, 57, 14, 28, 29, 36, 3, 3, 58, 58, + 58, 58, 44, 44, -3, -3, -81, -81, -81, 79, + 80, -8, 63, -81, 72, 63, -81, -81, -9, 76, + 77, -81, -81, -81, -81, -81, 78, -81, -81, -81, + -81, -81, 35, 4, -81, -81, -81, -81, 86, -81, + -81, -81, 73, -81, -81, 33, 71, 84, 39, -81, + -81, -81, -81, -81, 41, -81, -81, -81, 25, -81, + 74, 25, 75, -81 }; -/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM. - Performed when YYTABLE doesn't specify something else to do. Zero - means the default is an error. */ + /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. + Performed when YYTABLE does not specify something else to do. Zero + means the default is an error. */ static const yytype_uint8 yydefact[] = { - 0, 0, 0, 3, 1, 0, 0, 0, 3, 33, - 34, 0, 0, 6, 0, 2, 4, 0, 0, 0, - 67, 0, 36, 37, 39, 41, 43, 45, 47, 49, - 52, 59, 62, 66, 0, 12, 7, 0, 0, 0, - 68, 69, 70, 35, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 3, 1, 0, 0, 0, 3, 34, + 35, 0, 0, 6, 0, 2, 4, 0, 0, 0, + 68, 0, 37, 38, 40, 42, 44, 46, 48, 50, + 53, 60, 63, 67, 0, 13, 7, 0, 0, 0, + 0, 69, 70, 71, 36, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 5, 74, 0, 9, 8, 40, 0, - 42, 44, 46, 48, 50, 51, 55, 56, 54, 53, - 57, 58, 60, 61, 64, 63, 65, 0, 0, 0, - 0, 13, 0, 74, 10, 0, 0, 0, 15, 25, - 77, 17, 79, 0, 76, 75, 38, 16, 78, 0, - 0, 11, 24, 14, 26, 0, 18, 27, 21, 0, - 71, 29, 0, 0, 0, 0, 32, 31, 19, 30, - 28, 0, 72, 73, 20, 0, 23, 0, 0, 0, - 22 + 0, 0, 0, 0, 5, 75, 0, 0, 10, 8, + 41, 0, 43, 45, 47, 49, 51, 52, 56, 57, + 55, 54, 58, 59, 61, 62, 65, 64, 66, 0, + 0, 0, 0, 14, 0, 75, 11, 9, 0, 0, + 0, 16, 26, 78, 18, 80, 0, 77, 76, 39, + 17, 79, 0, 0, 12, 25, 15, 27, 0, 19, + 28, 22, 0, 72, 30, 0, 0, 0, 0, 33, + 32, 20, 31, 29, 0, 73, 74, 21, 0, 24, + 0, 0, 0, 23 }; -/* YYDEFGOTO[NTERM-NUM]. */ -static const yytype_int8 yydefgoto[] = + /* YYPGOTO[NTERM-NUM]. */ +static const yytype_int8 yypgoto[] = { - -1, 2, 7, 8, 15, 36, 64, 91, 109, 110, - 122, 20, 21, 22, 23, 24, 25, 26, 27, 28, - 29, 30, 31, 32, 33, 125, 92, 93 + -81, -81, 100, 104, -81, -38, -81, -80, -81, -81, + -81, -5, 66, 13, -81, 70, 67, 81, 64, 82, + 37, 27, 34, 38, -14, -81, 22, 24 }; -/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing - STATE-NUM. */ -#define YYPACT_NINF -78 -static const yytype_int8 yypact[] = + /* YYDEFGOTO[NTERM-NUM]. */ +static const yytype_int16 yydefgoto[] = { - 22, 11, 51, 10, -78, 23, 10, 2, 10, -78, - -78, -9, 23, -78, 30, 38, -78, -9, -9, -9, - -78, 35, -78, -6, 52, 29, 48, 49, 33, 3, - 71, 36, 0, -78, 64, -78, -78, 68, 30, 30, - -78, -78, -78, -78, -9, -9, -9, -9, -9, -9, - -9, -9, -9, -9, -9, -9, -9, -9, -9, -9, - -9, -9, -9, -78, 44, 67, -78, -78, 52, 55, - 29, 48, 49, 33, 3, 3, 71, 71, 71, 71, - 36, 36, 0, 0, -78, -78, -78, 78, 79, 42, - 44, -78, 69, 44, -78, -9, 73, 74, -78, -78, - -78, -78, -78, 75, -78, -78, -78, -78, -78, -7, - -1, -78, -78, -78, -78, 84, -78, -78, -78, 63, - -78, -78, 32, 66, 82, -3, -78, -78, -78, -78, - -78, 46, -78, -78, -78, 23, -78, 70, 23, 72, - -78 + -1, 2, 7, 8, 15, 36, 65, 93, 112, 113, + 125, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 128, 94, 95 }; -/* YYPGOTO[NTERM-NUM]. */ -static const yytype_int8 yypgoto[] = + /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If + positive, shift that token. If negative, reduce the rule whose + number is the opposite. If YYTABLE_NINF, syntax error. */ +static const yytype_uint8 yytable[] = { - -78, -78, 97, 100, -78, -37, -78, -77, -78, -78, - -78, -5, 65, 13, -78, 76, 77, 62, 80, 83, - 34, 20, 26, 28, -14, -78, 18, 24 + 12, 68, 69, 41, 42, 43, 45, 34, 9, 10, + 53, 54, 104, 3, 5, 107, 101, 118, 35, 1, + 102, 4, 61, 11, 119, 120, 121, 122, 35, 97, + 46, 6, 55, 17, 123, 44, 18, 19, 56, 124, + 62, 63, 9, 10, 14, 51, 52, 86, 87, 88, + 9, 10, 48, 103, 129, 130, 115, 11, 135, 116, + 136, 47, 131, 57, 58, 11, 37, 49, 117, 50, + 137, 64, 38, 39, 138, 139, 40, 89, 90, 91, + 78, 79, 80, 81, 92, 59, 60, 66, 76, 77, + 67, 82, 83, 96, 98, 99, 100, 84, 85, 106, + 110, 111, 114, 126, 134, 127, 133, 141, 16, 143, + 13, 109, 71, 74, 72, 70, 105, 108, 0, 0, + 132, 0, 0, 0, 0, 0, 0, 0, 0, 73, + 0, 0, 75, 140, 0, 0, 142 }; -/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If - positive, shift that token. If negative, reduce the rule which - number is the opposite. If YYTABLE_NINF, syntax error. */ -#define YYTABLE_NINF -1 -static const yytype_uint8 yytable[] = +static const yytype_int16 yycheck[] = { - 12, 66, 67, 40, 41, 42, 44, 34, 9, 10, - 52, 53, 115, 101, 5, 112, 104, 132, 113, 133, - 116, 117, 118, 119, 11, 1, 60, 114, 14, 134, - 120, 45, 6, 54, 17, 121, 3, 18, 19, 55, - 9, 10, 50, 51, 61, 62, 84, 85, 86, 9, - 10, 4, 100, 37, 126, 127, 11, 35, 87, 88, - 89, 38, 128, 46, 39, 11, 90, 98, 47, 35, - 43, 99, 76, 77, 78, 79, 56, 57, 58, 59, - 135, 136, 80, 81, 74, 75, 82, 83, 48, 63, - 49, 65, 94, 95, 96, 97, 124, 103, 107, 108, - 111, 123, 130, 131, 138, 16, 13, 140, 106, 71, - 69, 105, 0, 0, 102, 0, 0, 129, 0, 0, - 68, 0, 0, 70, 0, 0, 0, 0, 72, 0, - 137, 0, 73, 139 + 5, 39, 40, 17, 18, 19, 12, 12, 17, 18, + 7, 8, 92, 24, 4, 95, 24, 13, 26, 3, + 28, 0, 25, 32, 20, 21, 22, 23, 26, 67, + 36, 21, 29, 42, 30, 34, 45, 46, 35, 35, + 43, 44, 17, 18, 25, 9, 10, 61, 62, 63, + 17, 18, 38, 91, 21, 22, 21, 32, 19, 24, + 21, 11, 29, 5, 6, 32, 15, 39, 33, 40, + 31, 24, 21, 22, 33, 34, 25, 14, 15, 16, + 53, 54, 55, 56, 21, 41, 42, 22, 51, 52, + 22, 57, 58, 24, 37, 16, 16, 59, 60, 27, + 24, 24, 24, 17, 20, 32, 35, 33, 8, 34, + 6, 98, 46, 49, 47, 45, 92, 95, -1, -1, + 125, -1, -1, -1, -1, -1, -1, -1, -1, 48, + -1, -1, 50, 138, -1, -1, 141 }; -#define yypact_value_is_default(Yystate) \ - (!!((Yystate) == (-78))) - -#define yytable_value_is_error(Yytable_value) \ - YYID (0) + /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing + symbol of state STATE-NUM. */ +static const yytype_uint8 yystos[] = +{ + 0, 3, 48, 24, 0, 4, 21, 49, 50, 17, + 18, 32, 58, 50, 25, 51, 49, 42, 45, 46, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, + 68, 69, 70, 71, 58, 26, 52, 15, 21, 22, + 25, 71, 71, 71, 34, 12, 36, 11, 38, 39, + 40, 9, 10, 7, 8, 29, 35, 5, 6, 41, + 42, 25, 43, 44, 24, 53, 22, 22, 52, 52, + 62, 59, 63, 64, 65, 66, 67, 67, 68, 68, + 68, 68, 69, 69, 70, 70, 71, 71, 71, 14, + 15, 16, 21, 54, 73, 74, 24, 52, 37, 16, + 16, 24, 28, 52, 54, 74, 27, 54, 73, 60, + 24, 24, 55, 56, 24, 21, 24, 33, 13, 20, + 21, 22, 23, 30, 35, 57, 17, 32, 72, 21, + 22, 29, 58, 35, 20, 19, 21, 31, 33, 34, + 58, 33, 58, 34 +}; -static const yytype_int16 yycheck[] = + /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ +static const yytype_uint8 yyr1[] = { - 5, 38, 39, 17, 18, 19, 12, 12, 17, 18, - 7, 8, 13, 90, 4, 22, 93, 20, 25, 22, - 21, 22, 23, 24, 33, 3, 26, 34, 26, 32, - 31, 37, 22, 30, 43, 36, 25, 46, 47, 36, - 17, 18, 9, 10, 44, 45, 60, 61, 62, 17, - 18, 0, 89, 15, 22, 23, 33, 27, 14, 15, - 16, 23, 30, 11, 26, 33, 22, 25, 39, 27, - 35, 29, 52, 53, 54, 55, 5, 6, 42, 43, - 34, 35, 56, 57, 50, 51, 58, 59, 40, 25, - 41, 23, 25, 38, 16, 16, 33, 28, 25, 25, - 25, 17, 36, 21, 34, 8, 6, 35, 95, 47, - 45, 93, -1, -1, 90, -1, -1, 122, -1, -1, - 44, -1, -1, 46, -1, -1, -1, -1, 48, -1, - 135, -1, 49, 138 + 0, 47, 48, 49, 49, 50, 50, 51, 51, 51, + 51, 51, 52, 53, 53, 54, 54, 54, 54, 55, + 55, 55, 55, 55, 55, 55, 56, 56, 56, 57, + 57, 57, 57, 57, 58, 58, 58, 59, 60, 60, + 61, 61, 62, 62, 63, 63, 64, 64, 65, 65, + 66, 66, 66, 67, 67, 67, 67, 67, 68, 68, + 68, 69, 69, 69, 70, 70, 70, 70, 71, 71, + 71, 71, 72, 72, 72, 73, 73, 73, 74, 74, + 74 }; -/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing - symbol of state STATE-NUM. */ -static const yytype_uint8 yystos[] = + /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */ +static const yytype_uint8 yyr2[] = { - 0, 3, 49, 25, 0, 4, 22, 50, 51, 17, - 18, 33, 59, 51, 26, 52, 50, 43, 46, 47, - 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, - 69, 70, 71, 72, 59, 27, 53, 15, 23, 26, - 72, 72, 72, 35, 12, 37, 11, 39, 40, 41, - 9, 10, 7, 8, 30, 36, 5, 6, 42, 43, - 26, 44, 45, 25, 54, 23, 53, 53, 63, 60, - 64, 65, 66, 67, 68, 68, 69, 69, 69, 69, - 70, 70, 71, 71, 72, 72, 72, 14, 15, 16, - 22, 55, 74, 75, 25, 38, 16, 16, 25, 29, - 53, 55, 75, 28, 55, 74, 61, 25, 25, 56, - 57, 25, 22, 25, 34, 13, 21, 22, 23, 24, - 31, 36, 58, 17, 33, 73, 22, 23, 30, 59, - 36, 21, 20, 22, 32, 34, 35, 59, 34, 59, - 35 + 0, 2, 4, 0, 2, 4, 2, 2, 3, 4, + 3, 4, 5, 0, 2, 4, 2, 3, 2, 2, + 3, 4, 2, 9, 5, 2, 0, 2, 2, 3, + 1, 2, 2, 2, 1, 1, 3, 1, 1, 5, + 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, + 1, 3, 3, 1, 3, 3, 3, 3, 3, 3, + 1, 3, 3, 1, 3, 3, 3, 1, 1, 2, + 2, 2, 0, 2, 2, 0, 2, 2, 2, 3, + 2 }; -#define yyerrok (yyerrstatus = 0) -#define yyclearin (yychar = YYEMPTY) -#define YYEMPTY (-2) -#define YYEOF 0 - -#define YYACCEPT goto yyacceptlab -#define YYABORT goto yyabortlab -#define YYERROR goto yyerrorlab - - -/* Like YYERROR except do call yyerror. This remains here temporarily - to ease the transition to the new meaning of YYERROR, for GCC. - Once GCC version 2 has supplanted version 1, this can go. However, - YYFAIL appears to be in use. Nevertheless, it is formally deprecated - in Bison 2.4.2's NEWS entry, where a plan to phase it out is - discussed. */ - -#define YYFAIL goto yyerrlab -#if defined YYFAIL - /* This is here to suppress warnings from the GCC cpp's - -Wunused-macros. Normally we don't worry about that warning, but - some users do, and we want to make it easy for users to remove - YYFAIL uses, which will produce warnings from Bison 2.5. */ -#endif + +#define yyerrok (yyerrstatus = 0) +#define yyclearin (yychar = YYEMPTY) +#define YYEMPTY (-2) +#define YYEOF 0 + +#define YYACCEPT goto yyacceptlab +#define YYABORT goto yyabortlab +#define YYERROR goto yyerrorlab + #define YYRECOVERING() (!!yyerrstatus) @@ -768,27 +722,41 @@ do \ else \ { \ yyerror (YY_("syntax error: cannot back up")); \ - YYERROR; \ - } \ -while (YYID (0)) + YYERROR; \ + } \ +while (0) /* Error token number */ -#define YYTERROR 1 -#define YYERRCODE 256 - - -/* This macro is provided for backward compatibility. */ -#ifndef YY_LOCATION_PRINT -# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +#define YYTERROR 1 +#define YYERRCODE 256 + + +/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. + If N is 0, then set CURRENT to the empty location which ends + the previous symbol: RHS[0] (always defined). */ + +#ifndef YYLLOC_DEFAULT +# define YYLLOC_DEFAULT(Current, Rhs, N) \ + do \ + if (N) \ + { \ + (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \ + (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \ + (Current).last_line = YYRHSLOC (Rhs, N).last_line; \ + (Current).last_column = YYRHSLOC (Rhs, N).last_column; \ + } \ + else \ + { \ + (Current).first_line = (Current).last_line = \ + YYRHSLOC (Rhs, 0).last_line; \ + (Current).first_column = (Current).last_column = \ + YYRHSLOC (Rhs, 0).last_column; \ + } \ + while (0) #endif +#define YYRHSLOC(Rhs, K) ((Rhs)[K]) -/* YYLEX -- calling `yylex' with the right arguments. */ -#ifdef YYLEX_PARAM -# define YYLEX yylex (YYLEX_PARAM) -#else -# define YYLEX yylex () -#endif /* Enable debugging if requested. */ #if YYDEBUG @@ -798,50 +766,84 @@ while (YYID (0)) # define YYFPRINTF fprintf # endif -# define YYDPRINTF(Args) \ -do { \ - if (yydebug) \ - YYFPRINTF Args; \ -} while (YYID (0)) +# define YYDPRINTF(Args) \ +do { \ + if (yydebug) \ + YYFPRINTF Args; \ +} while (0) -# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ -do { \ - if (yydebug) \ - { \ - YYFPRINTF (stderr, "%s ", Title); \ - yy_symbol_print (stderr, \ - Type, Value); \ - YYFPRINTF (stderr, "\n"); \ - } \ -} while (YYID (0)) +/* YY_LOCATION_PRINT -- Print the location on the stream. + This macro was not mandated originally: define only if we know + we won't break user code: when these are the locations we know. */ -/*--------------------------------. -| Print this symbol on YYOUTPUT. | -`--------------------------------*/ +#ifndef YY_LOCATION_PRINT +# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL -/*ARGSUSED*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static void -yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) -#else -static void -yy_symbol_value_print (yyoutput, yytype, yyvaluep) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; +/* Print *YYLOCP on YYO. Private, do not rely on its existence. */ + +YY_ATTRIBUTE_UNUSED +static unsigned +yy_location_print_ (FILE *yyo, YYLTYPE const * const yylocp) +{ + unsigned res = 0; + int end_col = 0 != yylocp->last_column ? yylocp->last_column - 1 : 0; + if (0 <= yylocp->first_line) + { + res += YYFPRINTF (yyo, "%d", yylocp->first_line); + if (0 <= yylocp->first_column) + res += YYFPRINTF (yyo, ".%d", yylocp->first_column); + } + if (0 <= yylocp->last_line) + { + if (yylocp->first_line < yylocp->last_line) + { + res += YYFPRINTF (yyo, "-%d", yylocp->last_line); + if (0 <= end_col) + res += YYFPRINTF (yyo, ".%d", end_col); + } + else if (0 <= end_col && yylocp->first_column < end_col) + res += YYFPRINTF (yyo, "-%d", end_col); + } + return res; + } + +# define YY_LOCATION_PRINT(File, Loc) \ + yy_location_print_ (File, &(Loc)) + +# else +# define YY_LOCATION_PRINT(File, Loc) ((void) 0) +# endif #endif + + +# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \ +do { \ + if (yydebug) \ + { \ + YYFPRINTF (stderr, "%s ", Title); \ + yy_symbol_print (stderr, \ + Type, Value, Location); \ + YYFPRINTF (stderr, "\n"); \ + } \ +} while (0) + + +/*----------------------------------------. +| Print this symbol's value on YYOUTPUT. | +`----------------------------------------*/ + +static void +yy_symbol_value_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp) { FILE *yyo = yyoutput; YYUSE (yyo); + YYUSE (yylocationp); if (!yyvaluep) return; # ifdef YYPRINT if (yytype < YYNTOKENS) YYPRINT (yyoutput, yytoknum[yytype], *yyvaluep); -# else - YYUSE (yyoutput); # endif YYUSE (yytype); } @@ -851,24 +853,15 @@ yy_symbol_value_print (yyoutput, yytype, yyvaluep) | Print this symbol on YYOUTPUT. | `--------------------------------*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static void -yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep) -#else static void -yy_symbol_print (yyoutput, yytype, yyvaluep) - FILE *yyoutput; - int yytype; - YYSTYPE const * const yyvaluep; -#endif +yy_symbol_print (FILE *yyoutput, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp) { - if (yytype < YYNTOKENS) - YYFPRINTF (yyoutput, "token %s (", yytname[yytype]); - else - YYFPRINTF (yyoutput, "nterm %s (", yytname[yytype]); + YYFPRINTF (yyoutput, "%s %s (", + yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]); - yy_symbol_value_print (yyoutput, yytype, yyvaluep); + YY_LOCATION_PRINT (yyoutput, *yylocationp); + YYFPRINTF (yyoutput, ": "); + yy_symbol_value_print (yyoutput, yytype, yyvaluep, yylocationp); YYFPRINTF (yyoutput, ")"); } @@ -877,16 +870,8 @@ yy_symbol_print (yyoutput, yytype, yyvaluep) | TOP (included). | `------------------------------------------------------------------*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static void yy_stack_print (yytype_int16 *yybottom, yytype_int16 *yytop) -#else -static void -yy_stack_print (yybottom, yytop) - yytype_int16 *yybottom; - yytype_int16 *yytop; -#endif { YYFPRINTF (stderr, "Stack now"); for (; yybottom <= yytop; yybottom++) @@ -897,49 +882,42 @@ yy_stack_print (yybottom, yytop) YYFPRINTF (stderr, "\n"); } -# define YY_STACK_PRINT(Bottom, Top) \ -do { \ - if (yydebug) \ - yy_stack_print ((Bottom), (Top)); \ -} while (YYID (0)) +# define YY_STACK_PRINT(Bottom, Top) \ +do { \ + if (yydebug) \ + yy_stack_print ((Bottom), (Top)); \ +} while (0) /*------------------------------------------------. | Report that the YYRULE is going to be reduced. | `------------------------------------------------*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static void -yy_reduce_print (YYSTYPE *yyvsp, int yyrule) -#else static void -yy_reduce_print (yyvsp, yyrule) - YYSTYPE *yyvsp; - int yyrule; -#endif +yy_reduce_print (yytype_int16 *yyssp, YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule) { + unsigned long int yylno = yyrline[yyrule]; int yynrhs = yyr2[yyrule]; int yyi; - unsigned long int yylno = yyrline[yyrule]; YYFPRINTF (stderr, "Reducing stack by rule %d (line %lu):\n", - yyrule - 1, yylno); + yyrule - 1, yylno); /* The symbols being reduced. */ for (yyi = 0; yyi < yynrhs; yyi++) { YYFPRINTF (stderr, " $%d = ", yyi + 1); - yy_symbol_print (stderr, yyrhs[yyprhs[yyrule] + yyi], - &(yyvsp[(yyi + 1) - (yynrhs)]) - ); + yy_symbol_print (stderr, + yystos[yyssp[yyi + 1 - yynrhs]], + &(yyvsp[(yyi + 1) - (yynrhs)]) + , &(yylsp[(yyi + 1) - (yynrhs)]) ); YYFPRINTF (stderr, "\n"); } } -# define YY_REDUCE_PRINT(Rule) \ -do { \ - if (yydebug) \ - yy_reduce_print (yyvsp, Rule); \ -} while (YYID (0)) +# define YY_REDUCE_PRINT(Rule) \ +do { \ + if (yydebug) \ + yy_reduce_print (yyssp, yyvsp, yylsp, Rule); \ +} while (0) /* Nonzero means print parse trace. It is left uninitialized so that multiple parsers can coexist. */ @@ -953,7 +931,7 @@ int yydebug; /* YYINITDEPTH -- initial size of the parser's stacks. */ -#ifndef YYINITDEPTH +#ifndef YYINITDEPTH # define YYINITDEPTH 200 #endif @@ -976,15 +954,8 @@ int yydebug; # define yystrlen strlen # else /* Return the length of YYSTR. */ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static YYSIZE_T yystrlen (const char *yystr) -#else -static YYSIZE_T -yystrlen (yystr) - const char *yystr; -#endif { YYSIZE_T yylen; for (yylen = 0; yystr[yylen]; yylen++) @@ -1000,16 +971,8 @@ yystrlen (yystr) # else /* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in YYDEST. */ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) static char * yystpcpy (char *yydest, const char *yysrc) -#else -static char * -yystpcpy (yydest, yysrc) - char *yydest; - const char *yysrc; -#endif { char *yyd = yydest; const char *yys = yysrc; @@ -1039,27 +1002,27 @@ yytnamerr (char *yyres, const char *yystr) char const *yyp = yystr; for (;;) - switch (*++yyp) - { - case '\'': - case ',': - goto do_not_strip_quotes; - - case '\\': - if (*++yyp != '\\') - goto do_not_strip_quotes; - /* Fall through. */ - default: - if (yyres) - yyres[yyn] = *yyp; - yyn++; - break; - - case '"': - if (yyres) - yyres[yyn] = '\0'; - return yyn; - } + switch (*++yyp) + { + case '\'': + case ',': + goto do_not_strip_quotes; + + case '\\': + if (*++yyp != '\\') + goto do_not_strip_quotes; + /* Fall through. */ + default: + if (yyres) + yyres[yyn] = *yyp; + yyn++; + break; + + case '"': + if (yyres) + yyres[yyn] = '\0'; + return yyn; + } do_not_strip_quotes: ; } @@ -1082,11 +1045,11 @@ static int yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, yytype_int16 *yyssp, int yytoken) { - YYSIZE_T yysize0 = yytnamerr (YY_NULL, yytname[yytoken]); + YYSIZE_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]); YYSIZE_T yysize = yysize0; enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; /* Internationalized format string. */ - const char *yyformat = YY_NULL; + const char *yyformat = YY_NULLPTR; /* Arguments of yyformat. */ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* Number of reported tokens (one for the "unexpected", one per @@ -1094,10 +1057,6 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, int yycount = 0; /* There are many possibilities here to consider: - - Assume YYFAIL is not used. It's too flawed to consider. See - <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html> - for details. YYERROR is fine as it does not invoke this - function. - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected @@ -1147,7 +1106,7 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, } yyarg[yycount++] = yytname[yyx]; { - YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULL, yytname[yyx]); + YYSIZE_T yysize1 = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]); if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)) return 2; @@ -1214,26 +1173,18 @@ yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg, | Release the memory associated to this symbol. | `-----------------------------------------------*/ -/*ARGSUSED*/ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -static void -yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep) -#else static void -yydestruct (yymsg, yytype, yyvaluep) - const char *yymsg; - int yytype; - YYSTYPE *yyvaluep; -#endif +yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp) { YYUSE (yyvaluep); - + YYUSE (yylocationp); if (!yymsg) yymsg = "Deleting"; YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp); + YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN YYUSE (yytype); + YY_IGNORE_MAYBE_UNINITIALIZED_END } @@ -1242,18 +1193,14 @@ yydestruct (yymsg, yytype, yyvaluep) /* The lookahead symbol. */ int yychar; - -#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN -# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN -# define YY_IGNORE_MAYBE_UNINITIALIZED_END -#endif -#ifndef YY_INITIAL_VALUE -# define YY_INITIAL_VALUE(Value) /* Nothing. */ -#endif - /* The semantic value of the lookahead symbol. */ -YYSTYPE yylval YY_INITIAL_VALUE(yyval_default); - +YYSTYPE yylval; +/* Location data for the lookahead symbol. */ +YYLTYPE yylloc +# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL + = { 1, 1, 1, 1 } +# endif +; /* Number of syntax errors so far. */ int yynerrs; @@ -1262,35 +1209,17 @@ int yynerrs; | yyparse. | `----------*/ -#ifdef YYPARSE_PARAM -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) -int -yyparse (void *YYPARSE_PARAM) -#else -int -yyparse (YYPARSE_PARAM) - void *YYPARSE_PARAM; -#endif -#else /* ! YYPARSE_PARAM */ -#if (defined __STDC__ || defined __C99__FUNC__ \ - || defined __cplusplus || defined _MSC_VER) int yyparse (void) -#else -int -yyparse () - -#endif -#endif { int yystate; /* Number of tokens to shift before error messages enabled. */ int yyerrstatus; /* The stacks and their tools: - `yyss': related to states. - `yyvs': related to semantic values. + 'yyss': related to states. + 'yyvs': related to semantic values. + 'yyls': related to locations. Refer to the stacks through separate pointers, to allow yyoverflow to reallocate them elsewhere. */ @@ -1305,6 +1234,14 @@ yyparse () YYSTYPE *yyvs; YYSTYPE *yyvsp; + /* The location stack. */ + YYLTYPE yylsa[YYINITDEPTH]; + YYLTYPE *yyls; + YYLTYPE *yylsp; + + /* The locations where the error started and ended. */ + YYLTYPE yyerror_range[3]; + YYSIZE_T yystacksize; int yyn; @@ -1314,6 +1251,7 @@ yyparse () /* The variables used to return semantic value and location from the action routines. */ YYSTYPE yyval; + YYLTYPE yyloc; #if YYERROR_VERBOSE /* Buffer for error messages, and its allocated size. */ @@ -1322,7 +1260,7 @@ yyparse () YYSIZE_T yymsg_alloc = sizeof yymsgbuf; #endif -#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N)) +#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N), yylsp -= (N)) /* The number of symbols on the RHS of the reduced rule. Keep to zero when no symbol should be popped. */ @@ -1330,6 +1268,7 @@ yyparse () yyssp = yyss = yyssa; yyvsp = yyvs = yyvsa; + yylsp = yyls = yylsa; yystacksize = YYINITDEPTH; YYDPRINTF ((stderr, "Starting parse\n")); @@ -1338,6 +1277,7 @@ yyparse () yyerrstatus = 0; yynerrs = 0; yychar = YYEMPTY; /* Cause a token to be read. */ + yylsp[0] = yylloc; goto yysetstate; /*------------------------------------------------------------. @@ -1358,23 +1298,26 @@ yyparse () #ifdef yyoverflow { - /* Give user a chance to reallocate the stack. Use copies of - these so that the &'s don't force the real ones into - memory. */ - YYSTYPE *yyvs1 = yyvs; - yytype_int16 *yyss1 = yyss; - - /* Each stack pointer address is followed by the size of the - data in use in that stack, in bytes. This used to be a - conditional around just the two extra args, but that might - be undefined if yyoverflow is a macro. */ - yyoverflow (YY_("memory exhausted"), - &yyss1, yysize * sizeof (*yyssp), - &yyvs1, yysize * sizeof (*yyvsp), - &yystacksize); - - yyss = yyss1; - yyvs = yyvs1; + /* Give user a chance to reallocate the stack. Use copies of + these so that the &'s don't force the real ones into + memory. */ + YYSTYPE *yyvs1 = yyvs; + yytype_int16 *yyss1 = yyss; + YYLTYPE *yyls1 = yyls; + + /* Each stack pointer address is followed by the size of the + data in use in that stack, in bytes. This used to be a + conditional around just the two extra args, but that might + be undefined if yyoverflow is a macro. */ + yyoverflow (YY_("memory exhausted"), + &yyss1, yysize * sizeof (*yyssp), + &yyvs1, yysize * sizeof (*yyvsp), + &yyls1, yysize * sizeof (*yylsp), + &yystacksize); + + yyls = yyls1; + yyss = yyss1; + yyvs = yyvs1; } #else /* no yyoverflow */ # ifndef YYSTACK_RELOCATE @@ -1382,34 +1325,36 @@ yyparse () # else /* Extend the stack our own way. */ if (YYMAXDEPTH <= yystacksize) - goto yyexhaustedlab; + goto yyexhaustedlab; yystacksize *= 2; if (YYMAXDEPTH < yystacksize) - yystacksize = YYMAXDEPTH; + yystacksize = YYMAXDEPTH; { - yytype_int16 *yyss1 = yyss; - union yyalloc *yyptr = - (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); - if (! yyptr) - goto yyexhaustedlab; - YYSTACK_RELOCATE (yyss_alloc, yyss); - YYSTACK_RELOCATE (yyvs_alloc, yyvs); + yytype_int16 *yyss1 = yyss; + union yyalloc *yyptr = + (union yyalloc *) YYSTACK_ALLOC (YYSTACK_BYTES (yystacksize)); + if (! yyptr) + goto yyexhaustedlab; + YYSTACK_RELOCATE (yyss_alloc, yyss); + YYSTACK_RELOCATE (yyvs_alloc, yyvs); + YYSTACK_RELOCATE (yyls_alloc, yyls); # undef YYSTACK_RELOCATE - if (yyss1 != yyssa) - YYSTACK_FREE (yyss1); + if (yyss1 != yyssa) + YYSTACK_FREE (yyss1); } # endif #endif /* no yyoverflow */ yyssp = yyss + yysize - 1; yyvsp = yyvs + yysize - 1; + yylsp = yyls + yysize - 1; YYDPRINTF ((stderr, "Stack size increased to %lu\n", - (unsigned long int) yystacksize)); + (unsigned long int) yystacksize)); if (yyss + yystacksize - 1 <= yyssp) - YYABORT; + YYABORT; } YYDPRINTF ((stderr, "Entering state %d\n", yystate)); @@ -1438,7 +1383,7 @@ yybackup: if (yychar == YYEMPTY) { YYDPRINTF ((stderr, "Reading a token: ")); - yychar = YYLEX; + yychar = yylex (); } if (yychar <= YYEOF) @@ -1481,7 +1426,7 @@ yybackup: YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END - + *++yylsp = yylloc; goto yynewstate; @@ -1503,7 +1448,7 @@ yyreduce: yylen = yyr2[yyn]; /* If YYLEN is nonzero, implement the default value of the action: - `$$ = $1'. + '$$ = $1'. Otherwise, the following line sets YYVAL to garbage. This behavior is undocumented and Bison @@ -1512,287 +1457,303 @@ yyreduce: GCC warning that YYVAL may be used uninitialized. */ yyval = yyvsp[1-yylen]; - + /* Default location. */ + YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen); YY_REDUCE_PRINT (yyn); switch (yyn) { case 2: -/* Line 1787 of yacc.c */ -#line 110 "dtc-parser.y" +#line 105 "dtc-parser.y" /* yacc.c:1646 */ { - the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node), - guess_boot_cpuid((yyvsp[(4) - (4)].node))); + the_boot_info = build_boot_info((yyvsp[-1].re), (yyvsp[0].node), + guess_boot_cpuid((yyvsp[0].node))); } +#line 1472 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 3: -/* Line 1787 of yacc.c */ -#line 118 "dtc-parser.y" +#line 113 "dtc-parser.y" /* yacc.c:1646 */ { (yyval.re) = NULL; } +#line 1480 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 4: -/* Line 1787 of yacc.c */ -#line 122 "dtc-parser.y" +#line 117 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re)); + (yyval.re) = chain_reserve_entry((yyvsp[-1].re), (yyvsp[0].re)); } +#line 1488 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 5: -/* Line 1787 of yacc.c */ -#line 129 "dtc-parser.y" +#line 124 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer)); + (yyval.re) = build_reserve_entry((yyvsp[-2].integer), (yyvsp[-1].integer)); } +#line 1496 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 6: -/* Line 1787 of yacc.c */ -#line 133 "dtc-parser.y" +#line 128 "dtc-parser.y" /* yacc.c:1646 */ { - add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref)); - (yyval.re) = (yyvsp[(2) - (2)].re); + add_label(&(yyvsp[0].re)->labels, (yyvsp[-1].labelref)); + (yyval.re) = (yyvsp[0].re); } +#line 1505 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 7: -/* Line 1787 of yacc.c */ -#line 141 "dtc-parser.y" +#line 136 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.node) = name_node((yyvsp[(2) - (2)].node), ""); + (yyval.node) = name_node((yyvsp[0].node), ""); } +#line 1513 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 8: -/* Line 1787 of yacc.c */ -#line 145 "dtc-parser.y" +#line 140 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node)); + (yyval.node) = merge_nodes((yyvsp[-2].node), (yyvsp[0].node)); } +#line 1521 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 9: -/* Line 1787 of yacc.c */ -#line 149 "dtc-parser.y" +#line 145 "dtc-parser.y" /* yacc.c:1646 */ { - struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref)); + struct node *target = get_node_by_ref((yyvsp[-3].node), (yyvsp[-1].labelref)); + add_label(&target->labels, (yyvsp[-2].labelref)); if (target) - merge_nodes(target, (yyvsp[(3) - (3)].node)); + merge_nodes(target, (yyvsp[0].node)); else - print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref)); - (yyval.node) = (yyvsp[(1) - (3)].node); + ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref)); + (yyval.node) = (yyvsp[-3].node); } +#line 1536 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 10: -/* Line 1787 of yacc.c */ -#line 159 "dtc-parser.y" +#line 156 "dtc-parser.y" /* yacc.c:1646 */ { - struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref)); + struct node *target = get_node_by_ref((yyvsp[-2].node), (yyvsp[-1].labelref)); - if (!target) - print_error("label or path, '%s', not found", (yyvsp[(3) - (4)].labelref)); + if (target) + merge_nodes(target, (yyvsp[0].node)); else - delete_node(target); - - (yyval.node) = (yyvsp[(1) - (4)].node); + ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref)); + (yyval.node) = (yyvsp[-2].node); } +#line 1550 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 11: -/* Line 1787 of yacc.c */ -#line 173 "dtc-parser.y" +#line 166 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist)); + struct node *target = get_node_by_ref((yyvsp[-3].node), (yyvsp[-1].labelref)); + + if (target) + delete_node(target); + else + ERROR(&(yylsp[-1]), "Label or path %s not found", (yyvsp[-1].labelref)); + + + (yyval.node) = (yyvsp[-3].node); } +#line 1566 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 12: -/* Line 1787 of yacc.c */ -#line 180 "dtc-parser.y" +#line 181 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.proplist) = NULL; + (yyval.node) = build_node((yyvsp[-3].proplist), (yyvsp[-2].nodelist)); } +#line 1574 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 13: -/* Line 1787 of yacc.c */ -#line 184 "dtc-parser.y" +#line 188 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist)); + (yyval.proplist) = NULL; } +#line 1582 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 14: -/* Line 1787 of yacc.c */ -#line 191 "dtc-parser.y" +#line 192 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data)); + (yyval.proplist) = chain_property((yyvsp[0].prop), (yyvsp[-1].proplist)); } +#line 1590 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 15: -/* Line 1787 of yacc.c */ -#line 195 "dtc-parser.y" +#line 199 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data); + (yyval.prop) = build_property((yyvsp[-3].propnodename), (yyvsp[-1].data)); } +#line 1598 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 16: -/* Line 1787 of yacc.c */ -#line 199 "dtc-parser.y" +#line 203 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename)); + (yyval.prop) = build_property((yyvsp[-1].propnodename), empty_data); } +#line 1606 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 17: -/* Line 1787 of yacc.c */ -#line 203 "dtc-parser.y" +#line 207 "dtc-parser.y" /* yacc.c:1646 */ { - add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref)); - (yyval.prop) = (yyvsp[(2) - (2)].prop); + (yyval.prop) = build_property_delete((yyvsp[-1].propnodename)); } +#line 1614 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 18: -/* Line 1787 of yacc.c */ -#line 211 "dtc-parser.y" +#line 211 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data)); + add_label(&(yyvsp[0].prop)->labels, (yyvsp[-1].labelref)); + (yyval.prop) = (yyvsp[0].prop); } +#line 1623 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 19: -/* Line 1787 of yacc.c */ -#line 215 "dtc-parser.y" +#line 219 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data); + (yyval.data) = data_merge((yyvsp[-1].data), (yyvsp[0].data)); } +#line 1631 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 20: -/* Line 1787 of yacc.c */ -#line 219 "dtc-parser.y" +#line 223 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data)); + (yyval.data) = data_merge((yyvsp[-2].data), (yyvsp[-1].array).data); } +#line 1639 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 21: -/* Line 1787 of yacc.c */ -#line 223 "dtc-parser.y" +#line 227 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref)); + (yyval.data) = data_merge((yyvsp[-3].data), (yyvsp[-1].data)); } +#line 1647 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 22: -/* Line 1787 of yacc.c */ -#line 227 "dtc-parser.y" +#line 231 "dtc-parser.y" /* yacc.c:1646 */ { - FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL); + (yyval.data) = data_add_marker((yyvsp[-1].data), REF_PATH, (yyvsp[0].labelref)); + } +#line 1655 "dtc-parser.tab.c" /* yacc.c:1646 */ + break; + + case 23: +#line 235 "dtc-parser.y" /* yacc.c:1646 */ + { + FILE *f = srcfile_relative_open((yyvsp[-5].data).val, NULL); struct data d; - if ((yyvsp[(6) - (9)].integer) != 0) - if (fseek(f, (yyvsp[(6) - (9)].integer), SEEK_SET) != 0) - print_error("Couldn't seek to offset %llu in \"%s\": %s", - (unsigned long long)(yyvsp[(6) - (9)].integer), - (yyvsp[(4) - (9)].data).val, - strerror(errno)); + if ((yyvsp[-3].integer) != 0) + if (fseek(f, (yyvsp[-3].integer), SEEK_SET) != 0) + die("Couldn't seek to offset %llu in \"%s\": %s", + (unsigned long long)(yyvsp[-3].integer), (yyvsp[-5].data).val, + strerror(errno)); - d = data_copy_file(f, (yyvsp[(8) - (9)].integer)); + d = data_copy_file(f, (yyvsp[-1].integer)); - (yyval.data) = data_merge((yyvsp[(1) - (9)].data), d); + (yyval.data) = data_merge((yyvsp[-8].data), d); fclose(f); } +#line 1675 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 23: -/* Line 1787 of yacc.c */ -#line 244 "dtc-parser.y" + case 24: +#line 251 "dtc-parser.y" /* yacc.c:1646 */ { - FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL); + FILE *f = srcfile_relative_open((yyvsp[-1].data).val, NULL); struct data d = empty_data; d = data_copy_file(f, -1); - (yyval.data) = data_merge((yyvsp[(1) - (5)].data), d); + (yyval.data) = data_merge((yyvsp[-4].data), d); fclose(f); } +#line 1689 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 24: -/* Line 1787 of yacc.c */ -#line 254 "dtc-parser.y" + case 25: +#line 261 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); + (yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref)); } +#line 1697 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 25: -/* Line 1787 of yacc.c */ -#line 261 "dtc-parser.y" + case 26: +#line 268 "dtc-parser.y" /* yacc.c:1646 */ { (yyval.data) = empty_data; } +#line 1705 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 26: -/* Line 1787 of yacc.c */ -#line 265 "dtc-parser.y" + case 27: +#line 272 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = (yyvsp[(1) - (2)].data); + (yyval.data) = (yyvsp[-1].data); } +#line 1713 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 27: -/* Line 1787 of yacc.c */ -#line 269 "dtc-parser.y" + case 28: +#line 276 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); + (yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref)); } +#line 1721 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 28: -/* Line 1787 of yacc.c */ -#line 276 "dtc-parser.y" + case 29: +#line 283 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.array).data = empty_data; - (yyval.array).bits = eval_literal((yyvsp[(2) - (3)].literal), 0, 7); - - if (((yyval.array).bits != 8) && - ((yyval.array).bits != 16) && - ((yyval.array).bits != 32) && - ((yyval.array).bits != 64)) - { - print_error("Only 8, 16, 32 and 64-bit elements" - " are currently supported"); - (yyval.array).bits = 32; + unsigned long long bits; + + bits = (yyvsp[-1].integer); + + if ((bits != 8) && (bits != 16) && + (bits != 32) && (bits != 64)) { + ERROR(&(yylsp[-1]), "Array elements must be" + " 8, 16, 32 or 64-bits"); + bits = 32; } + + (yyval.array).data = empty_data; + (yyval.array).bits = bits; } +#line 1741 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 29: -/* Line 1787 of yacc.c */ -#line 291 "dtc-parser.y" + case 30: +#line 299 "dtc-parser.y" /* yacc.c:1646 */ { (yyval.array).data = empty_data; (yyval.array).bits = 32; } +#line 1750 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 30: -/* Line 1787 of yacc.c */ -#line 296 "dtc-parser.y" + case 31: +#line 304 "dtc-parser.y" /* yacc.c:1646 */ { - if ((yyvsp[(1) - (2)].array).bits < 64) { - uint64_t mask = (1ULL << (yyvsp[(1) - (2)].array).bits) - 1; + if ((yyvsp[-1].array).bits < 64) { + uint64_t mask = (1ULL << (yyvsp[-1].array).bits) - 1; /* * Bits above mask must either be all zero * (positive within range of mask) or all one @@ -1801,275 +1762,258 @@ yyreduce: * within the mask to one (i.e. | in the * mask), all bits are one. */ - if (((yyvsp[(2) - (2)].integer) > mask) && (((yyvsp[(2) - (2)].integer) | mask) != -1ULL)) - print_error( - "integer value out of range " - "%016lx (%d bits)", (yyvsp[(1) - (2)].array).bits); + if (((yyvsp[0].integer) > mask) && (((yyvsp[0].integer) | mask) != -1ULL)) + ERROR(&(yylsp[0]), "Value out of range for" + " %d-bit array element", (yyvsp[-1].array).bits); } - (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits); + (yyval.array).data = data_append_integer((yyvsp[-1].array).data, (yyvsp[0].integer), (yyvsp[-1].array).bits); } +#line 1773 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 31: -/* Line 1787 of yacc.c */ -#line 316 "dtc-parser.y" + case 32: +#line 323 "dtc-parser.y" /* yacc.c:1646 */ { - uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits); + uint64_t val = ~0ULL >> (64 - (yyvsp[-1].array).bits); - if ((yyvsp[(1) - (2)].array).bits == 32) - (yyvsp[(1) - (2)].array).data = data_add_marker((yyvsp[(1) - (2)].array).data, + if ((yyvsp[-1].array).bits == 32) + (yyvsp[-1].array).data = data_add_marker((yyvsp[-1].array).data, REF_PHANDLE, - (yyvsp[(2) - (2)].labelref)); + (yyvsp[0].labelref)); else - print_error("References are only allowed in " + ERROR(&(yylsp[0]), "References are only allowed in " "arrays with 32-bit elements."); - (yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits); - } - break; - - case 32: -/* Line 1787 of yacc.c */ -#line 330 "dtc-parser.y" - { - (yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref)); + (yyval.array).data = data_append_integer((yyvsp[-1].array).data, val, (yyvsp[-1].array).bits); } +#line 1791 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 33: -/* Line 1787 of yacc.c */ -#line 337 "dtc-parser.y" - { - (yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64); - } - break; - - case 34: -/* Line 1787 of yacc.c */ -#line 341 "dtc-parser.y" +#line 337 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal)); + (yyval.array).data = data_add_marker((yyvsp[-1].array).data, LABEL, (yyvsp[0].labelref)); } +#line 1799 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 35: -/* Line 1787 of yacc.c */ -#line 345 "dtc-parser.y" + case 36: +#line 346 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.integer) = (yyvsp[(2) - (3)].integer); + (yyval.integer) = (yyvsp[-1].integer); } +#line 1807 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 38: -/* Line 1787 of yacc.c */ -#line 356 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); } - break; - - case 40: -/* Line 1787 of yacc.c */ -#line 361 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); } + case 39: +#line 357 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-4].integer) ? (yyvsp[-2].integer) : (yyvsp[0].integer); } +#line 1813 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 42: -/* Line 1787 of yacc.c */ -#line 366 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); } + case 41: +#line 362 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) || (yyvsp[0].integer); } +#line 1819 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 44: -/* Line 1787 of yacc.c */ -#line 371 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); } + case 43: +#line 367 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) && (yyvsp[0].integer); } +#line 1825 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 46: -/* Line 1787 of yacc.c */ -#line 376 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); } + case 45: +#line 372 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) | (yyvsp[0].integer); } +#line 1831 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 48: -/* Line 1787 of yacc.c */ -#line 381 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); } + case 47: +#line 377 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) ^ (yyvsp[0].integer); } +#line 1837 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 50: -/* Line 1787 of yacc.c */ -#line 386 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); } + case 49: +#line 382 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) & (yyvsp[0].integer); } +#line 1843 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 51: -/* Line 1787 of yacc.c */ -#line 387 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); } +#line 387 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) == (yyvsp[0].integer); } +#line 1849 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 53: -/* Line 1787 of yacc.c */ -#line 392 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); } + case 52: +#line 388 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) != (yyvsp[0].integer); } +#line 1855 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 54: -/* Line 1787 of yacc.c */ -#line 393 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); } +#line 393 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) < (yyvsp[0].integer); } +#line 1861 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 55: -/* Line 1787 of yacc.c */ -#line 394 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); } +#line 394 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) > (yyvsp[0].integer); } +#line 1867 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 56: -/* Line 1787 of yacc.c */ -#line 395 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); } +#line 395 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) <= (yyvsp[0].integer); } +#line 1873 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 57: -/* Line 1787 of yacc.c */ -#line 399 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); } +#line 396 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) >= (yyvsp[0].integer); } +#line 1879 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 58: -/* Line 1787 of yacc.c */ -#line 400 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); } +#line 400 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) << (yyvsp[0].integer); } +#line 1885 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 60: -/* Line 1787 of yacc.c */ -#line 405 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); } + case 59: +#line 401 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) >> (yyvsp[0].integer); } +#line 1891 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 61: -/* Line 1787 of yacc.c */ -#line 406 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); } +#line 406 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) + (yyvsp[0].integer); } +#line 1897 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 63: -/* Line 1787 of yacc.c */ -#line 411 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); } + case 62: +#line 407 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) - (yyvsp[0].integer); } +#line 1903 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 64: -/* Line 1787 of yacc.c */ -#line 412 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); } +#line 412 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) * (yyvsp[0].integer); } +#line 1909 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 65: -/* Line 1787 of yacc.c */ -#line 413 "dtc-parser.y" - { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); } +#line 413 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) / (yyvsp[0].integer); } +#line 1915 "dtc-parser.tab.c" /* yacc.c:1646 */ break; - case 68: -/* Line 1787 of yacc.c */ -#line 419 "dtc-parser.y" - { (yyval.integer) = -(yyvsp[(2) - (2)].integer); } + case 66: +#line 414 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = (yyvsp[-2].integer) % (yyvsp[0].integer); } +#line 1921 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 69: -/* Line 1787 of yacc.c */ -#line 420 "dtc-parser.y" - { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); } +#line 420 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = -(yyvsp[0].integer); } +#line 1927 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 70: -/* Line 1787 of yacc.c */ -#line 421 "dtc-parser.y" - { (yyval.integer) = !(yyvsp[(2) - (2)].integer); } +#line 421 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = ~(yyvsp[0].integer); } +#line 1933 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 71: -/* Line 1787 of yacc.c */ -#line 426 "dtc-parser.y" - { - (yyval.data) = empty_data; - } +#line 422 "dtc-parser.y" /* yacc.c:1646 */ + { (yyval.integer) = !(yyvsp[0].integer); } +#line 1939 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 72: -/* Line 1787 of yacc.c */ -#line 430 "dtc-parser.y" +#line 427 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte)); + (yyval.data) = empty_data; } +#line 1947 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 73: -/* Line 1787 of yacc.c */ -#line 434 "dtc-parser.y" +#line 431 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref)); + (yyval.data) = data_append_byte((yyvsp[-1].data), (yyvsp[0].byte)); } +#line 1955 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 74: -/* Line 1787 of yacc.c */ -#line 441 "dtc-parser.y" +#line 435 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.nodelist) = NULL; + (yyval.data) = data_add_marker((yyvsp[-1].data), LABEL, (yyvsp[0].labelref)); } +#line 1963 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 75: -/* Line 1787 of yacc.c */ -#line 445 "dtc-parser.y" +#line 442 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist)); + (yyval.nodelist) = NULL; } +#line 1971 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 76: -/* Line 1787 of yacc.c */ -#line 449 "dtc-parser.y" +#line 446 "dtc-parser.y" /* yacc.c:1646 */ { - print_error("syntax error: properties must precede subnodes"); - YYERROR; + (yyval.nodelist) = chain_node((yyvsp[-1].node), (yyvsp[0].nodelist)); } +#line 1979 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 77: -/* Line 1787 of yacc.c */ -#line 457 "dtc-parser.y" +#line 450 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename)); + ERROR(&(yylsp[0]), "Properties must precede subnodes"); + YYERROR; } +#line 1988 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 78: -/* Line 1787 of yacc.c */ -#line 461 "dtc-parser.y" +#line 458 "dtc-parser.y" /* yacc.c:1646 */ { - (yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename)); + (yyval.node) = name_node((yyvsp[0].node), (yyvsp[-1].propnodename)); } +#line 1996 "dtc-parser.tab.c" /* yacc.c:1646 */ break; case 79: -/* Line 1787 of yacc.c */ -#line 465 "dtc-parser.y" +#line 462 "dtc-parser.y" /* yacc.c:1646 */ + { + (yyval.node) = name_node(build_node_delete(), (yyvsp[-1].propnodename)); + } +#line 2004 "dtc-parser.tab.c" /* yacc.c:1646 */ + break; + + case 80: +#line 466 "dtc-parser.y" /* yacc.c:1646 */ { - add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref)); - (yyval.node) = (yyvsp[(2) - (2)].node); + add_label(&(yyvsp[0].node)->labels, (yyvsp[-1].labelref)); + (yyval.node) = (yyvsp[0].node); } +#line 2013 "dtc-parser.tab.c" /* yacc.c:1646 */ break; -/* Line 1787 of yacc.c */ -#line 2073 "dtc-parser.tab.c" +#line 2017 "dtc-parser.tab.c" /* yacc.c:1646 */ default: break; } /* User semantic actions sometimes alter yychar, and that requires @@ -2090,8 +2034,9 @@ yyreduce: YY_STACK_PRINT (yyss, yyssp); *++yyvsp = yyval; + *++yylsp = yyloc; - /* Now `shift' the result of the reduction. Determine what state + /* Now 'shift' the result of the reduction. Determine what state that goes to, based on the state we popped back to and the rule number reduced by. */ @@ -2106,9 +2051,9 @@ yyreduce: goto yynewstate; -/*------------------------------------. -| yyerrlab -- here on detecting error | -`------------------------------------*/ +/*--------------------------------------. +| yyerrlab -- here on detecting error. | +`--------------------------------------*/ yyerrlab: /* Make sure we have latest lookahead translation. See comments at user semantic actions for why this is necessary. */ @@ -2154,25 +2099,25 @@ yyerrlab: #endif } - + yyerror_range[1] = yylloc; if (yyerrstatus == 3) { /* If just tried and failed to reuse lookahead token after an - error, discard it. */ + error, discard it. */ if (yychar <= YYEOF) - { - /* Return failure if at end of input. */ - if (yychar == YYEOF) - YYABORT; - } + { + /* Return failure if at end of input. */ + if (yychar == YYEOF) + YYABORT; + } else - { - yydestruct ("Error: discarding", - yytoken, &yylval); - yychar = YYEMPTY; - } + { + yydestruct ("Error: discarding", + yytoken, &yylval, &yylloc); + yychar = YYEMPTY; + } } /* Else will try to reuse lookahead token after shifting the error @@ -2191,7 +2136,8 @@ yyerrorlab: if (/*CONSTCOND*/ 0) goto yyerrorlab; - /* Do not reclaim the symbols of the rule which action triggered + yyerror_range[1] = yylsp[1-yylen]; + /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ YYPOPSTACK (yylen); yylen = 0; @@ -2204,29 +2150,29 @@ yyerrorlab: | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: - yyerrstatus = 3; /* Each real token shifted decrements this. */ + yyerrstatus = 3; /* Each real token shifted decrements this. */ for (;;) { yyn = yypact[yystate]; if (!yypact_value_is_default (yyn)) - { - yyn += YYTERROR; - if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) - { - yyn = yytable[yyn]; - if (0 < yyn) - break; - } - } + { + yyn += YYTERROR; + if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR) + { + yyn = yytable[yyn]; + if (0 < yyn) + break; + } + } /* Pop the current state because it cannot handle the error token. */ if (yyssp == yyss) - YYABORT; - + YYABORT; + yyerror_range[1] = *yylsp; yydestruct ("Error: popping", - yystos[yystate], yyvsp); + yystos[yystate], yyvsp, yylsp); YYPOPSTACK (1); yystate = *yyssp; YY_STACK_PRINT (yyss, yyssp); @@ -2236,6 +2182,11 @@ yyerrlab1: *++yyvsp = yylval; YY_IGNORE_MAYBE_UNINITIALIZED_END + yyerror_range[2] = yylloc; + /* Using YYLLOC is tempting, but would change the location of + the lookahead. YYLOC is available though. */ + YYLLOC_DEFAULT (yyloc, yyerror_range, 2); + *++yylsp = yyloc; /* Shift the error token. */ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp); @@ -2275,16 +2226,16 @@ yyreturn: user semantic actions for why this is necessary. */ yytoken = YYTRANSLATE (yychar); yydestruct ("Cleanup: discarding lookahead", - yytoken, &yylval); + yytoken, &yylval, &yylloc); } - /* Do not reclaim the symbols of the rule which action triggered + /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ YYPOPSTACK (yylen); YY_STACK_PRINT (yyss, yyssp); while (yyssp != yyss) { yydestruct ("Cleanup: popping", - yystos[*yyssp], yyvsp); + yystos[*yyssp], yyvsp, yylsp); YYPOPSTACK (1); } #ifndef yyoverflow @@ -2295,72 +2246,12 @@ yyreturn: if (yymsg != yymsgbuf) YYSTACK_FREE (yymsg); #endif - /* Make sure YYID is used. */ - return YYID (yyresult); + return yyresult; } +#line 472 "dtc-parser.y" /* yacc.c:1906 */ -/* Line 2050 of yacc.c */ -#line 471 "dtc-parser.y" - - -void print_error(char const *fmt, ...) -{ - va_list va; - - va_start(va, fmt); - srcpos_verror(&yylloc, fmt, va); - va_end(va); - - treesource_error = 1; -} - -void yyerror(char const *s) { - print_error("%s", s); -} - -static unsigned long long eval_literal(const char *s, int base, int bits) -{ - unsigned long long val; - char *e; - - errno = 0; - val = strtoull(s, &e, base); - if (*e) { - size_t uls = strspn(e, "UL"); - if (e[uls]) - print_error("bad characters in literal"); - } - if ((errno == ERANGE) - || ((bits < 64) && (val >= (1ULL << bits)))) - print_error("literal out of range"); - else if (errno != 0) - print_error("bad literal"); - return val; -} - -static unsigned char eval_char_literal(const char *s) +void yyerror(char const *s) { - int i = 1; - char c = s[0]; - - if (c == '\0') - { - print_error("empty character literal"); - return 0; - } - - /* - * If the first character in the character literal is a \ then process - * the remaining characters as an escape encoding. If the first - * character is neither an escape or a terminator it should be the only - * character in the literal and will be returned. - */ - if (c == '\\') - c = get_escape_char(s, &i); - - if (s[i] != '\0') - print_error("malformed character literal"); - - return c; + ERROR(&yylloc, "%s", s); } diff --git a/scripts/dtc/dtc-parser.tab.h_shipped b/scripts/dtc/dtc-parser.tab.h_shipped index b2e7a86cd85e..30867c688300 100644 --- a/scripts/dtc/dtc-parser.tab.h_shipped +++ b/scripts/dtc/dtc-parser.tab.h_shipped @@ -1,19 +1,19 @@ -/* A Bison parser, made by GNU Bison 2.7.12-4996. */ +/* A Bison parser, made by GNU Bison 3.0.2. */ /* Bison interface for Yacc-like parsers in C - - Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc. - + + Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc. + This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ @@ -26,13 +26,13 @@ special exception, which will cause the skeleton and the resulting Bison output files to be licensed under the GNU General Public License without this special exception. - + This special exception was added by the Free Software Foundation in version 2.2 of Bison. */ #ifndef YY_YY_DTC_PARSER_TAB_H_INCLUDED # define YY_YY_DTC_PARSER_TAB_H_INCLUDED -/* Enabling traces. */ +/* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif @@ -40,48 +40,44 @@ extern int yydebug; #endif -/* Tokens. */ +/* Token type. */ #ifndef YYTOKENTYPE # define YYTOKENTYPE - /* Put the tokens into the symbol table, so that GDB and other debuggers - know about them. */ - enum yytokentype { - DT_V1 = 258, - DT_MEMRESERVE = 259, - DT_LSHIFT = 260, - DT_RSHIFT = 261, - DT_LE = 262, - DT_GE = 263, - DT_EQ = 264, - DT_NE = 265, - DT_AND = 266, - DT_OR = 267, - DT_BITS = 268, - DT_DEL_PROP = 269, - DT_DEL_NODE = 270, - DT_PROPNODENAME = 271, - DT_LITERAL = 272, - DT_CHAR_LITERAL = 273, - DT_BASE = 274, - DT_BYTE = 275, - DT_STRING = 276, - DT_LABEL = 277, - DT_REF = 278, - DT_INCBIN = 279 - }; + enum yytokentype + { + DT_V1 = 258, + DT_MEMRESERVE = 259, + DT_LSHIFT = 260, + DT_RSHIFT = 261, + DT_LE = 262, + DT_GE = 263, + DT_EQ = 264, + DT_NE = 265, + DT_AND = 266, + DT_OR = 267, + DT_BITS = 268, + DT_DEL_PROP = 269, + DT_DEL_NODE = 270, + DT_PROPNODENAME = 271, + DT_LITERAL = 272, + DT_CHAR_LITERAL = 273, + DT_BYTE = 274, + DT_STRING = 275, + DT_LABEL = 276, + DT_REF = 277, + DT_INCBIN = 278 + }; #endif - +/* Value type. */ #if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED -typedef union YYSTYPE +typedef union YYSTYPE YYSTYPE; +union YYSTYPE { -/* Line 2053 of yacc.c */ -#line 40 "dtc-parser.y" +#line 38 "dtc-parser.y" /* yacc.c:1909 */ char *propnodename; - char *literal; char *labelref; - unsigned int cbase; uint8_t byte; struct data data; @@ -97,29 +93,29 @@ typedef union YYSTYPE struct reserve_info *re; uint64_t integer; - -/* Line 2053 of yacc.c */ -#line 103 "dtc-parser.tab.h" -} YYSTYPE; +#line 97 "dtc-parser.tab.h" /* yacc.c:1909 */ +}; # define YYSTYPE_IS_TRIVIAL 1 -# define yystype YYSTYPE /* obsolescent; will be withdrawn */ # define YYSTYPE_IS_DECLARED 1 #endif -extern YYSTYPE yylval; - -#ifdef YYPARSE_PARAM -#if defined __STDC__ || defined __cplusplus -int yyparse (void *YYPARSE_PARAM); -#else -int yyparse (); +/* Location type. */ +#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED +typedef struct YYLTYPE YYLTYPE; +struct YYLTYPE +{ + int first_line; + int first_column; + int last_line; + int last_column; +}; +# define YYLTYPE_IS_DECLARED 1 +# define YYLTYPE_IS_TRIVIAL 1 #endif -#else /* ! YYPARSE_PARAM */ -#if defined __STDC__ || defined __cplusplus + + +extern YYSTYPE yylval; +extern YYLTYPE yylloc; int yyparse (void); -#else -int yyparse (); -#endif -#endif /* ! YYPARSE_PARAM */ #endif /* !YY_YY_DTC_PARSER_TAB_H_INCLUDED */ diff --git a/scripts/dtc/dtc-parser.y b/scripts/dtc/dtc-parser.y index f412460f94d7..5a897e36562d 100644 --- a/scripts/dtc/dtc-parser.y +++ b/scripts/dtc/dtc-parser.y @@ -17,31 +17,27 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ - %{ #include <stdio.h> #include "dtc.h" #include "srcpos.h" -YYLTYPE yylloc; - extern int yylex(void); -extern void print_error(char const *fmt, ...); extern void yyerror(char const *s); +#define ERROR(loc, ...) \ + do { \ + srcpos_error((loc), "Error", __VA_ARGS__); \ + treesource_error = true; \ + } while (0) extern struct boot_info *the_boot_info; -extern int treesource_error; - -static unsigned long long eval_literal(const char *s, int base, int bits); -static unsigned char eval_char_literal(const char *s); +extern bool treesource_error; %} %union { char *propnodename; - char *literal; char *labelref; - unsigned int cbase; uint8_t byte; struct data data; @@ -65,9 +61,8 @@ static unsigned char eval_char_literal(const char *s); %token DT_DEL_PROP %token DT_DEL_NODE %token <propnodename> DT_PROPNODENAME -%token <literal> DT_LITERAL -%token <literal> DT_CHAR_LITERAL -%token <cbase> DT_BASE +%token <integer> DT_LITERAL +%token <integer> DT_CHAR_LITERAL %token <byte> DT_BYTE %token <data> DT_STRING %token <labelref> DT_LABEL @@ -145,6 +140,18 @@ devicetree: { $$ = merge_nodes($1, $3); } + + | devicetree DT_LABEL DT_REF nodedef + { + struct node *target = get_node_by_ref($1, $3); + + add_label(&target->labels, $2); + if (target) + merge_nodes(target, $4); + else + ERROR(&@3, "Label or path %s not found", $3); + $$ = $1; + } | devicetree DT_REF nodedef { struct node *target = get_node_by_ref($1, $2); @@ -152,17 +159,18 @@ devicetree: if (target) merge_nodes(target, $3); else - print_error("label or path, '%s', not found", $2); + ERROR(&@2, "Label or path %s not found", $2); $$ = $1; } | devicetree DT_DEL_NODE DT_REF ';' { struct node *target = get_node_by_ref($1, $3); - if (!target) - print_error("label or path, '%s', not found", $3); - else + if (target) delete_node(target); + else + ERROR(&@3, "Label or path %s not found", $3); + $$ = $1; } @@ -230,10 +238,9 @@ propdata: if ($6 != 0) if (fseek(f, $6, SEEK_SET) != 0) - print_error("Couldn't seek to offset %llu in \"%s\": %s", - (unsigned long long)$6, - $4.val, - strerror(errno)); + die("Couldn't seek to offset %llu in \"%s\": %s", + (unsigned long long)$6, $4.val, + strerror(errno)); d = data_copy_file(f, $8); @@ -274,18 +281,19 @@ propdataprefix: arrayprefix: DT_BITS DT_LITERAL '<' { - $$.data = empty_data; - $$.bits = eval_literal($2, 0, 7); - - if (($$.bits != 8) && - ($$.bits != 16) && - ($$.bits != 32) && - ($$.bits != 64)) - { - print_error("Only 8, 16, 32 and 64-bit elements" - " are currently supported"); - $$.bits = 32; + unsigned long long bits; + + bits = $2; + + if ((bits != 8) && (bits != 16) && + (bits != 32) && (bits != 64)) { + ERROR(&@2, "Array elements must be" + " 8, 16, 32 or 64-bits"); + bits = 32; } + + $$.data = empty_data; + $$.bits = bits; } | '<' { @@ -305,9 +313,8 @@ arrayprefix: * mask), all bits are one. */ if (($2 > mask) && (($2 | mask) != -1ULL)) - print_error( - "integer value out of range " - "%016lx (%d bits)", $1.bits); + ERROR(&@2, "Value out of range for" + " %d-bit array element", $1.bits); } $$.data = data_append_integer($1.data, $2, $1.bits); @@ -321,7 +328,7 @@ arrayprefix: REF_PHANDLE, $2); else - print_error("References are only allowed in " + ERROR(&@2, "References are only allowed in " "arrays with 32-bit elements."); $$.data = data_append_integer($1.data, val, $1.bits); @@ -334,13 +341,7 @@ arrayprefix: integer_prim: DT_LITERAL - { - $$ = eval_literal($1, 0, 64); - } | DT_CHAR_LITERAL - { - $$ = eval_char_literal($1); - } | '(' integer_expr ')' { $$ = $2; @@ -447,7 +448,7 @@ subnodes: } | subnode propdef { - print_error("syntax error: properties must precede subnodes"); + ERROR(&@2, "Properties must precede subnodes"); YYERROR; } ; @@ -470,63 +471,7 @@ subnode: %% -void print_error(char const *fmt, ...) -{ - va_list va; - - va_start(va, fmt); - srcpos_verror(&yylloc, fmt, va); - va_end(va); - - treesource_error = 1; -} - -void yyerror(char const *s) { - print_error("%s", s); -} - -static unsigned long long eval_literal(const char *s, int base, int bits) -{ - unsigned long long val; - char *e; - - errno = 0; - val = strtoull(s, &e, base); - if (*e) { - size_t uls = strspn(e, "UL"); - if (e[uls]) - print_error("bad characters in literal"); - } - if ((errno == ERANGE) - || ((bits < 64) && (val >= (1ULL << bits)))) - print_error("literal out of range"); - else if (errno != 0) - print_error("bad literal"); - return val; -} - -static unsigned char eval_char_literal(const char *s) +void yyerror(char const *s) { - int i = 1; - char c = s[0]; - - if (c == '\0') - { - print_error("empty character literal"); - return 0; - } - - /* - * If the first character in the character literal is a \ then process - * the remaining characters as an escape encoding. If the first - * character is neither an escape or a terminator it should be the only - * character in the literal and will be returned. - */ - if (c == '\\') - c = get_escape_char(s, &i); - - if (s[i] != '\0') - print_error("malformed character literal"); - - return c; + ERROR(&yylloc, "%s", s); } diff --git a/scripts/dtc/dtc.c b/scripts/dtc/dtc.c index e3c96536fd9d..8c4add69a765 100644 --- a/scripts/dtc/dtc.c +++ b/scripts/dtc/dtc.c @@ -48,6 +48,8 @@ static void fill_fullpaths(struct node *tree, const char *prefix) } /* Usage related data. */ +#define FDT_VERSION(version) _FDT_VERSION(version) +#define _FDT_VERSION(version) #version static const char usage_synopsis[] = "dtc [options] <input file>"; static const char usage_short_opts[] = "qI:O:o:V:d:R:S:p:fb:i:H:sW:E:hv"; static struct option const usage_long_opts[] = { @@ -82,9 +84,9 @@ static const char * const usage_opts_help[] = { "\t\tdts - device tree source text\n" "\t\tdtb - device tree blob\n" "\t\tasm - assembler source", - "\n\tBlob version to produce, defaults to %d (for dtb and asm output)", //, DEFAULT_FDT_VERSION); + "\n\tBlob version to produce, defaults to "FDT_VERSION(DEFAULT_FDT_VERSION)" (for dtb and asm output)", "\n\tOutput dependency file", - "\n\ttMake space for <number> reserve map entries (for dtb and asm output)", + "\n\tMake space for <number> reserve map entries (for dtb and asm output)", "\n\tMake the blob at least <bytes> long (extra space)", "\n\tAdd padding to the blob of <bytes> long (extra space)", "\n\tSet the physical boot cpu", @@ -109,7 +111,7 @@ int main(int argc, char *argv[]) const char *outform = "dts"; const char *outname = "-"; const char *depname = NULL; - int force = 0, sort = 0; + bool force = false, sort = false; const char *arg; int opt; FILE *outf = NULL; @@ -148,7 +150,7 @@ int main(int argc, char *argv[]) padsize = strtol(optarg, NULL, 0); break; case 'f': - force = 1; + force = true; break; case 'q': quiet++; @@ -174,7 +176,7 @@ int main(int argc, char *argv[]) break; case 's': - sort = 1; + sort = true; break; case 'W': @@ -237,7 +239,7 @@ int main(int argc, char *argv[]) if (streq(outname, "-")) { outf = stdout; } else { - outf = fopen(outname, "w"); + outf = fopen(outname, "wb"); if (! outf) die("Couldn't open output file %s: %s\n", outname, strerror(errno)); diff --git a/scripts/dtc/dtc.h b/scripts/dtc/dtc.h index 264a20cf66a8..56212c8df660 100644 --- a/scripts/dtc/dtc.h +++ b/scripts/dtc/dtc.h @@ -38,9 +38,9 @@ #include "util.h" #ifdef DEBUG -#define debug(fmt,args...) printf(fmt, ##args) +#define debug(...) printf(__VA_ARGS__) #else -#define debug(fmt,args...) +#define debug(...) #endif @@ -88,7 +88,7 @@ struct data { }; -#define empty_data ((struct data){ /* all .members = 0 or NULL */ }) +#define empty_data ((struct data){ 0 /* all .members = 0 or NULL */ }) #define for_each_marker(m) \ for (; (m); (m) = (m)->next) @@ -118,7 +118,7 @@ struct data data_append_align(struct data d, int align); struct data data_add_marker(struct data d, enum markertype type, char *ref); -int data_is_one_string(struct data d); +bool data_is_one_string(struct data d); /* DT constraints */ @@ -127,13 +127,13 @@ int data_is_one_string(struct data d); /* Live trees */ struct label { - int deleted; + bool deleted; char *label; struct label *next; }; struct property { - int deleted; + bool deleted; char *name; struct data val; @@ -143,7 +143,7 @@ struct property { }; struct node { - int deleted; + bool deleted; char *name; struct property *proplist; struct node *children; @@ -247,8 +247,8 @@ void sort_tree(struct boot_info *bi); /* Checks */ -void parse_checks_option(bool warn, bool error, const char *optarg); -void process_checks(int force, struct boot_info *bi); +void parse_checks_option(bool warn, bool error, const char *arg); +void process_checks(bool force, struct boot_info *bi); /* Flattened trees */ diff --git a/scripts/dtc/flattree.c b/scripts/dtc/flattree.c index 665dad7bb465..bd99fa2d33b8 100644 --- a/scripts/dtc/flattree.c +++ b/scripts/dtc/flattree.c @@ -261,7 +261,7 @@ static void flatten_tree(struct node *tree, struct emitter *emit, { struct property *prop; struct node *child; - int seen_name_prop = 0; + bool seen_name_prop = false; if (tree->deleted) return; @@ -279,7 +279,7 @@ static void flatten_tree(struct node *tree, struct emitter *emit, int nameoff; if (streq(prop->name, "name")) - seen_name_prop = 1; + seen_name_prop = true; nameoff = stringtable_insert(strbuf, prop->name); diff --git a/scripts/dtc/fstree.c b/scripts/dtc/fstree.c index e464727c8808..6d1beec9581d 100644 --- a/scripts/dtc/fstree.c +++ b/scripts/dtc/fstree.c @@ -37,26 +37,26 @@ static struct node *read_fstree(const char *dirname) tree = build_node(NULL, NULL); while ((de = readdir(d)) != NULL) { - char *tmpnam; + char *tmpname; if (streq(de->d_name, ".") || streq(de->d_name, "..")) continue; - tmpnam = join_path(dirname, de->d_name); + tmpname = join_path(dirname, de->d_name); - if (lstat(tmpnam, &st) < 0) - die("stat(%s): %s\n", tmpnam, strerror(errno)); + if (lstat(tmpname, &st) < 0) + die("stat(%s): %s\n", tmpname, strerror(errno)); if (S_ISREG(st.st_mode)) { struct property *prop; FILE *pfile; - pfile = fopen(tmpnam, "r"); + pfile = fopen(tmpname, "rb"); if (! pfile) { fprintf(stderr, "WARNING: Cannot open %s: %s\n", - tmpnam, strerror(errno)); + tmpname, strerror(errno)); } else { prop = build_property(xstrdup(de->d_name), data_copy_file(pfile, @@ -67,12 +67,12 @@ static struct node *read_fstree(const char *dirname) } else if (S_ISDIR(st.st_mode)) { struct node *newchild; - newchild = read_fstree(tmpnam); + newchild = read_fstree(tmpname); newchild = name_node(newchild, xstrdup(de->d_name)); add_child(tree, newchild); } - free(tmpnam); + free(tmpname); } closedir(d); @@ -88,3 +88,4 @@ struct boot_info *dt_from_fs(const char *dirname) return build_boot_info(NULL, tree, guess_boot_cpuid(tree)); } + diff --git a/scripts/dtc/libfdt/Makefile.libfdt b/scripts/dtc/libfdt/Makefile.libfdt index 91126c000a1e..09c322ed82ba 100644 --- a/scripts/dtc/libfdt/Makefile.libfdt +++ b/scripts/dtc/libfdt/Makefile.libfdt @@ -6,5 +6,6 @@ LIBFDT_soname = libfdt.$(SHAREDLIB_EXT).1 LIBFDT_INCLUDES = fdt.h libfdt.h libfdt_env.h LIBFDT_VERSION = version.lds -LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c fdt_empty_tree.c +LIBFDT_SRCS = fdt.c fdt_ro.c fdt_wip.c fdt_sw.c fdt_rw.c fdt_strerror.c fdt_empty_tree.c \ + fdt_addresses.c LIBFDT_OBJS = $(LIBFDT_SRCS:%.c=%.o) diff --git a/scripts/dtc/libfdt/fdt.c b/scripts/dtc/libfdt/fdt.c index e56833ae9b6f..2ce6a44179de 100644 --- a/scripts/dtc/libfdt/fdt.c +++ b/scripts/dtc/libfdt/fdt.c @@ -92,7 +92,7 @@ const void *fdt_offset_ptr(const void *fdt, int offset, unsigned int len) uint32_t fdt_next_tag(const void *fdt, int startoffset, int *nextoffset) { - const uint32_t *tagp, *lenp; + const fdt32_t *tagp, *lenp; uint32_t tag; int offset = startoffset; const char *p; @@ -198,6 +198,34 @@ int fdt_next_node(const void *fdt, int offset, int *depth) return offset; } +int fdt_first_subnode(const void *fdt, int offset) +{ + int depth = 0; + + offset = fdt_next_node(fdt, offset, &depth); + if (offset < 0 || depth != 1) + return -FDT_ERR_NOTFOUND; + + return offset; +} + +int fdt_next_subnode(const void *fdt, int offset) +{ + int depth = 1; + + /* + * With respect to the parent, the depth of the next subnode will be + * the same as the last. + */ + do { + offset = fdt_next_node(fdt, offset, &depth); + if (offset < 0 || depth < 1) + return -FDT_ERR_NOTFOUND; + } while (depth > 1); + + return offset; +} + const char *_fdt_find_string(const char *strtab, int tabsize, const char *s) { int len = strlen(s) + 1; diff --git a/scripts/dtc/libfdt/fdt.h b/scripts/dtc/libfdt/fdt.h index 48ccfd910000..526aedb51556 100644 --- a/scripts/dtc/libfdt/fdt.h +++ b/scripts/dtc/libfdt/fdt.h @@ -1,48 +1,99 @@ #ifndef _FDT_H #define _FDT_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * Copyright 2012 Kim Phillips, Freescale Semiconductor. + * + * libfdt is dual licensed: you can use it either under the terms of + * the GPL, or the BSD license, at your option. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this library; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + * + * Alternatively, + * + * b) Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ #ifndef __ASSEMBLY__ struct fdt_header { - uint32_t magic; /* magic word FDT_MAGIC */ - uint32_t totalsize; /* total size of DT block */ - uint32_t off_dt_struct; /* offset to structure */ - uint32_t off_dt_strings; /* offset to strings */ - uint32_t off_mem_rsvmap; /* offset to memory reserve map */ - uint32_t version; /* format version */ - uint32_t last_comp_version; /* last compatible version */ + fdt32_t magic; /* magic word FDT_MAGIC */ + fdt32_t totalsize; /* total size of DT block */ + fdt32_t off_dt_struct; /* offset to structure */ + fdt32_t off_dt_strings; /* offset to strings */ + fdt32_t off_mem_rsvmap; /* offset to memory reserve map */ + fdt32_t version; /* format version */ + fdt32_t last_comp_version; /* last compatible version */ /* version 2 fields below */ - uint32_t boot_cpuid_phys; /* Which physical CPU id we're + fdt32_t boot_cpuid_phys; /* Which physical CPU id we're booting on */ /* version 3 fields below */ - uint32_t size_dt_strings; /* size of the strings block */ + fdt32_t size_dt_strings; /* size of the strings block */ /* version 17 fields below */ - uint32_t size_dt_struct; /* size of the structure block */ + fdt32_t size_dt_struct; /* size of the structure block */ }; struct fdt_reserve_entry { - uint64_t address; - uint64_t size; + fdt64_t address; + fdt64_t size; }; struct fdt_node_header { - uint32_t tag; + fdt32_t tag; char name[0]; }; struct fdt_property { - uint32_t tag; - uint32_t len; - uint32_t nameoff; + fdt32_t tag; + fdt32_t len; + fdt32_t nameoff; char data[0]; }; #endif /* !__ASSEMBLY */ #define FDT_MAGIC 0xd00dfeed /* 4: version, 4: total size */ -#define FDT_TAGSIZE sizeof(uint32_t) +#define FDT_TAGSIZE sizeof(fdt32_t) #define FDT_BEGIN_NODE 0x1 /* Start node: full name */ #define FDT_END_NODE 0x2 /* End node */ @@ -51,10 +102,10 @@ struct fdt_property { #define FDT_NOP 0x4 /* nop */ #define FDT_END 0x9 -#define FDT_V1_SIZE (7*sizeof(uint32_t)) -#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(uint32_t)) -#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(uint32_t)) +#define FDT_V1_SIZE (7*sizeof(fdt32_t)) +#define FDT_V2_SIZE (FDT_V1_SIZE + sizeof(fdt32_t)) +#define FDT_V3_SIZE (FDT_V2_SIZE + sizeof(fdt32_t)) #define FDT_V16_SIZE FDT_V3_SIZE -#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(uint32_t)) +#define FDT_V17_SIZE (FDT_V16_SIZE + sizeof(fdt32_t)) #endif /* _FDT_H */ diff --git a/scripts/dtc/libfdt/fdt_empty_tree.c b/scripts/dtc/libfdt/fdt_empty_tree.c index f2ae9b77c285..f72d13b1d19c 100644 --- a/scripts/dtc/libfdt/fdt_empty_tree.c +++ b/scripts/dtc/libfdt/fdt_empty_tree.c @@ -81,3 +81,4 @@ int fdt_create_empty_tree(void *buf, int bufsize) return fdt_open_into(buf, buf, bufsize); } + diff --git a/scripts/dtc/libfdt/fdt_ro.c b/scripts/dtc/libfdt/fdt_ro.c index 02b6d687537f..a65e4b5b72b6 100644 --- a/scripts/dtc/libfdt/fdt_ro.c +++ b/scripts/dtc/libfdt/fdt_ro.c @@ -154,9 +154,9 @@ int fdt_subnode_offset(const void *fdt, int parentoffset, return fdt_subnode_offset_namelen(fdt, parentoffset, name, strlen(name)); } -int fdt_path_offset(const void *fdt, const char *path) +int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen) { - const char *end = path + strlen(path); + const char *end = path + namelen; const char *p = path; int offset = 0; @@ -164,7 +164,7 @@ int fdt_path_offset(const void *fdt, const char *path) /* see if we have an alias */ if (*path != '/') { - const char *q = strchr(path, '/'); + const char *q = memchr(path, '/', end - p); if (!q) q = end; @@ -177,14 +177,15 @@ int fdt_path_offset(const void *fdt, const char *path) p = q; } - while (*p) { + while (p < end) { const char *q; - while (*p == '/') + while (*p == '/') { p++; - if (! *p) - return offset; - q = strchr(p, '/'); + if (p == end) + return offset; + } + q = memchr(p, '/', end - p); if (! q) q = end; @@ -198,6 +199,11 @@ int fdt_path_offset(const void *fdt, const char *path) return offset; } +int fdt_path_offset(const void *fdt, const char *path) +{ + return fdt_path_offset_namelen(fdt, path, strlen(path)); +} + const char *fdt_get_name(const void *fdt, int nodeoffset, int *len) { const struct fdt_node_header *nh = _fdt_offset_ptr(fdt, nodeoffset); @@ -322,7 +328,7 @@ const void *fdt_getprop(const void *fdt, int nodeoffset, uint32_t fdt_get_phandle(const void *fdt, int nodeoffset) { - const uint32_t *php; + const fdt32_t *php; int len; /* FIXME: This is a bit sub-optimal, since we potentially scan @@ -515,8 +521,7 @@ int fdt_node_offset_by_phandle(const void *fdt, uint32_t phandle) return offset; /* error from fdt_next_node() */ } -static int _fdt_stringlist_contains(const char *strlist, int listlen, - const char *str) +int fdt_stringlist_contains(const char *strlist, int listlen, const char *str) { int len = strlen(str); const char *p; @@ -542,7 +547,7 @@ int fdt_node_check_compatible(const void *fdt, int nodeoffset, prop = fdt_getprop(fdt, nodeoffset, "compatible", &len); if (!prop) return len; - if (_fdt_stringlist_contains(prop, len, compatible)) + if (fdt_stringlist_contains(prop, len, compatible)) return 0; else return 1; diff --git a/scripts/dtc/libfdt/fdt_rw.c b/scripts/dtc/libfdt/fdt_rw.c index 24437dfc32b8..70adec6c371b 100644 --- a/scripts/dtc/libfdt/fdt_rw.c +++ b/scripts/dtc/libfdt/fdt_rw.c @@ -84,9 +84,9 @@ static int _fdt_rw_check_header(void *fdt) #define FDT_RW_CHECK_HEADER(fdt) \ { \ - int err; \ - if ((err = _fdt_rw_check_header(fdt)) != 0) \ - return err; \ + int __err; \ + if ((__err = _fdt_rw_check_header(fdt)) != 0) \ + return __err; \ } static inline int _fdt_data_size(void *fdt) @@ -339,7 +339,7 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset, int nodelen; int err; uint32_t tag; - uint32_t *endtag; + fdt32_t *endtag; FDT_RW_CHECK_HEADER(fdt); @@ -366,7 +366,7 @@ int fdt_add_subnode_namelen(void *fdt, int parentoffset, nh->tag = cpu_to_fdt32(FDT_BEGIN_NODE); memset(nh->name, 0, FDT_TAGALIGN(namelen+1)); memcpy(nh->name, name, namelen); - endtag = (uint32_t *)((char *)nh + nodelen - FDT_TAGSIZE); + endtag = (fdt32_t *)((char *)nh + nodelen - FDT_TAGSIZE); *endtag = cpu_to_fdt32(FDT_END_NODE); return offset; diff --git a/scripts/dtc/libfdt/fdt_sw.c b/scripts/dtc/libfdt/fdt_sw.c index 55ebebf1eb20..6a804859fd0c 100644 --- a/scripts/dtc/libfdt/fdt_sw.c +++ b/scripts/dtc/libfdt/fdt_sw.c @@ -107,6 +107,38 @@ int fdt_create(void *buf, int bufsize) return 0; } +int fdt_resize(void *fdt, void *buf, int bufsize) +{ + size_t headsize, tailsize; + char *oldtail, *newtail; + + FDT_SW_CHECK_HEADER(fdt); + + headsize = fdt_off_dt_struct(fdt); + tailsize = fdt_size_dt_strings(fdt); + + if ((headsize + tailsize) > bufsize) + return -FDT_ERR_NOSPACE; + + oldtail = (char *)fdt + fdt_totalsize(fdt) - tailsize; + newtail = (char *)buf + bufsize - tailsize; + + /* Two cases to avoid clobbering data if the old and new + * buffers partially overlap */ + if (buf <= fdt) { + memmove(buf, fdt, headsize); + memmove(newtail, oldtail, tailsize); + } else { + memmove(newtail, oldtail, tailsize); + memmove(buf, fdt, headsize); + } + + fdt_set_off_dt_strings(buf, bufsize); + fdt_set_totalsize(buf, bufsize); + + return 0; +} + int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size) { struct fdt_reserve_entry *re; @@ -153,7 +185,7 @@ int fdt_begin_node(void *fdt, const char *name) int fdt_end_node(void *fdt) { - uint32_t *en; + fdt32_t *en; FDT_SW_CHECK_HEADER(fdt); @@ -213,7 +245,7 @@ int fdt_property(void *fdt, const char *name, const void *val, int len) int fdt_finish(void *fdt) { char *p = (char *)fdt; - uint32_t *end; + fdt32_t *end; int oldstroffset, newstroffset; uint32_t tag; int offset, nextoffset; diff --git a/scripts/dtc/libfdt/fdt_wip.c b/scripts/dtc/libfdt/fdt_wip.c index 6025fa1fe8fe..c5bbb68d3273 100644 --- a/scripts/dtc/libfdt/fdt_wip.c +++ b/scripts/dtc/libfdt/fdt_wip.c @@ -74,7 +74,7 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, static void _fdt_nop_region(void *start, int len) { - uint32_t *p; + fdt32_t *p; for (p = start; (char *)p < ((char *)start + len); p++) *p = cpu_to_fdt32(FDT_NOP); diff --git a/scripts/dtc/libfdt/libfdt.h b/scripts/dtc/libfdt/libfdt.h index 73f49759a5e7..ea35ac3c9be4 100644 --- a/scripts/dtc/libfdt/libfdt.h +++ b/scripts/dtc/libfdt/libfdt.h @@ -51,8 +51,8 @@ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#include <libfdt_env.h> -#include <fdt.h> +#include "libfdt_env.h" +#include "fdt.h" #define FDT_FIRST_SUPPORTED_VERSION 0x10 #define FDT_LAST_SUPPORTED_VERSION 0x11 @@ -116,7 +116,12 @@ * Should never be returned, if it is, it indicates a bug in * libfdt itself. */ -#define FDT_ERR_MAX 13 +/* Errors in device tree content */ +#define FDT_ERR_BADNCELLS 14 + /* FDT_ERR_BADNCELLS: Device tree has a #address-cells, #size-cells + * or similar property with a bad format or value */ + +#define FDT_ERR_MAX 14 /**********************************************************************/ /* Low-level functions (you probably don't need these) */ @@ -136,6 +141,28 @@ uint32_t fdt_next_tag(const void *fdt, int offset, int *nextoffset); int fdt_next_node(const void *fdt, int offset, int *depth); +/** + * fdt_first_subnode() - get offset of first direct subnode + * + * @fdt: FDT blob + * @offset: Offset of node to check + * @return offset of first subnode, or -FDT_ERR_NOTFOUND if there is none + */ +int fdt_first_subnode(const void *fdt, int offset); + +/** + * fdt_next_subnode() - get offset of next direct subnode + * + * After first calling fdt_first_subnode(), call this function repeatedly to + * get direct subnodes of a parent node. + * + * @fdt: FDT blob + * @offset: Offset of previous subnode + * @return offset of next subnode, or -FDT_ERR_NOTFOUND if there are no more + * subnodes + */ +int fdt_next_subnode(const void *fdt, int offset); + /**********************************************************************/ /* General functions */ /**********************************************************************/ @@ -296,6 +323,17 @@ int fdt_subnode_offset_namelen(const void *fdt, int parentoffset, int fdt_subnode_offset(const void *fdt, int parentoffset, const char *name); /** + * fdt_path_offset_namelen - find a tree node by its full path + * @fdt: pointer to the device tree blob + * @path: full path of the node to locate + * @namelen: number of characters of path to consider + * + * Identical to fdt_path_offset(), but only consider the first namelen + * characters of path as the path name. + */ +int fdt_path_offset_namelen(const void *fdt, const char *path, int namelen); + +/** * fdt_path_offset - find a tree node by its full path * @fdt: pointer to the device tree blob * @path: full path of the node to locate @@ -582,7 +620,7 @@ const char *fdt_get_alias_namelen(const void *fdt, * value of the property named 'name' in the node /aliases. * * returns: - * a pointer to the expansion of the alias named 'name', of it exists + * a pointer to the expansion of the alias named 'name', if it exists * NULL, if the given alias or the /aliases node does not exist */ const char *fdt_get_alias(const void *fdt, const char *name); @@ -816,6 +854,75 @@ int fdt_node_check_compatible(const void *fdt, int nodeoffset, int fdt_node_offset_by_compatible(const void *fdt, int startoffset, const char *compatible); +/** + * fdt_stringlist_contains - check a string list property for a string + * @strlist: Property containing a list of strings to check + * @listlen: Length of property + * @str: String to search for + * + * This is a utility function provided for convenience. The list contains + * one or more strings, each terminated by \0, as is found in a device tree + * "compatible" property. + * + * @return: 1 if the string is found in the list, 0 not found, or invalid list + */ +int fdt_stringlist_contains(const char *strlist, int listlen, const char *str); + +/**********************************************************************/ +/* Read-only functions (addressing related) */ +/**********************************************************************/ + +/** + * FDT_MAX_NCELLS - maximum value for #address-cells and #size-cells + * + * This is the maximum value for #address-cells, #size-cells and + * similar properties that will be processed by libfdt. IEE1275 + * requires that OF implementations handle values up to 4. + * Implementations may support larger values, but in practice higher + * values aren't used. + */ +#define FDT_MAX_NCELLS 4 + +/** + * fdt_address_cells - retrieve address size for a bus represented in the tree + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to find the address size for + * + * When the node has a valid #address-cells property, returns its value. + * + * returns: + * 0 <= n < FDT_MAX_NCELLS, on success + * 2, if the node has no #address-cells property + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid #address-cells property + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_address_cells(const void *fdt, int nodeoffset); + +/** + * fdt_size_cells - retrieve address range size for a bus represented in the + * tree + * @fdt: pointer to the device tree blob + * @nodeoffset: offset of the node to find the address range size for + * + * When the node has a valid #size-cells property, returns its value. + * + * returns: + * 0 <= n < FDT_MAX_NCELLS, on success + * 2, if the node has no #address-cells property + * -FDT_ERR_BADNCELLS, if the node has a badly formatted or invalid #size-cells property + * -FDT_ERR_BADMAGIC, + * -FDT_ERR_BADVERSION, + * -FDT_ERR_BADSTATE, + * -FDT_ERR_BADSTRUCTURE, + * -FDT_ERR_TRUNCATED, standard meanings + */ +int fdt_size_cells(const void *fdt, int nodeoffset); + + /**********************************************************************/ /* Write-in-place functions */ /**********************************************************************/ @@ -882,8 +989,8 @@ int fdt_setprop_inplace(void *fdt, int nodeoffset, const char *name, static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset, const char *name, uint32_t val) { - val = cpu_to_fdt32(val); - return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val)); + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp)); } /** @@ -917,8 +1024,8 @@ static inline int fdt_setprop_inplace_u32(void *fdt, int nodeoffset, static inline int fdt_setprop_inplace_u64(void *fdt, int nodeoffset, const char *name, uint64_t val) { - val = cpu_to_fdt64(val); - return fdt_setprop_inplace(fdt, nodeoffset, name, &val, sizeof(val)); + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_setprop_inplace(fdt, nodeoffset, name, &tmp, sizeof(tmp)); } /** @@ -987,19 +1094,20 @@ int fdt_nop_node(void *fdt, int nodeoffset); /**********************************************************************/ int fdt_create(void *buf, int bufsize); +int fdt_resize(void *fdt, void *buf, int bufsize); int fdt_add_reservemap_entry(void *fdt, uint64_t addr, uint64_t size); int fdt_finish_reservemap(void *fdt); int fdt_begin_node(void *fdt, const char *name); int fdt_property(void *fdt, const char *name, const void *val, int len); static inline int fdt_property_u32(void *fdt, const char *name, uint32_t val) { - val = cpu_to_fdt32(val); - return fdt_property(fdt, name, &val, sizeof(val)); + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_property(fdt, name, &tmp, sizeof(tmp)); } static inline int fdt_property_u64(void *fdt, const char *name, uint64_t val) { - val = cpu_to_fdt64(val); - return fdt_property(fdt, name, &val, sizeof(val)); + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_property(fdt, name, &tmp, sizeof(tmp)); } static inline int fdt_property_cell(void *fdt, const char *name, uint32_t val) { @@ -1154,8 +1262,8 @@ int fdt_setprop(void *fdt, int nodeoffset, const char *name, static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name, uint32_t val) { - val = cpu_to_fdt32(val); - return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val)); + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); } /** @@ -1189,8 +1297,8 @@ static inline int fdt_setprop_u32(void *fdt, int nodeoffset, const char *name, static inline int fdt_setprop_u64(void *fdt, int nodeoffset, const char *name, uint64_t val) { - val = cpu_to_fdt64(val); - return fdt_setprop(fdt, nodeoffset, name, &val, sizeof(val)); + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_setprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); } /** @@ -1296,8 +1404,8 @@ int fdt_appendprop(void *fdt, int nodeoffset, const char *name, static inline int fdt_appendprop_u32(void *fdt, int nodeoffset, const char *name, uint32_t val) { - val = cpu_to_fdt32(val); - return fdt_appendprop(fdt, nodeoffset, name, &val, sizeof(val)); + fdt32_t tmp = cpu_to_fdt32(val); + return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); } /** @@ -1331,8 +1439,8 @@ static inline int fdt_appendprop_u32(void *fdt, int nodeoffset, static inline int fdt_appendprop_u64(void *fdt, int nodeoffset, const char *name, uint64_t val) { - val = cpu_to_fdt64(val); - return fdt_appendprop(fdt, nodeoffset, name, &val, sizeof(val)); + fdt64_t tmp = cpu_to_fdt64(val); + return fdt_appendprop(fdt, nodeoffset, name, &tmp, sizeof(tmp)); } /** diff --git a/scripts/dtc/libfdt/libfdt_env.h b/scripts/dtc/libfdt/libfdt_env.h index 213d7fb81c42..9dea97dfff81 100644 --- a/scripts/dtc/libfdt/libfdt_env.h +++ b/scripts/dtc/libfdt/libfdt_env.h @@ -1,29 +1,111 @@ #ifndef _LIBFDT_ENV_H #define _LIBFDT_ENV_H +/* + * libfdt - Flat Device Tree manipulation + * Copyright (C) 2006 David Gibson, IBM Corporation. + * Copyright 2012 Kim Phillips, Freescale Semiconductor. + * + * libfdt is dual licensed: you can use it either under the terms of + * the GPL, or the BSD license, at your option. + * + * a) This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public + * License along with this library; if not, write to the Free + * Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, + * MA 02110-1301 USA + * + * Alternatively, + * + * b) Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * 1. Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, + * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ #include <stddef.h> #include <stdint.h> #include <string.h> -#define EXTRACT_BYTE(n) ((unsigned long long)((uint8_t *)&x)[n]) -static inline uint16_t fdt16_to_cpu(uint16_t x) +#ifdef __CHECKER__ +#define __force __attribute__((force)) +#define __bitwise __attribute__((bitwise)) +#else +#define __force +#define __bitwise +#endif + +typedef uint16_t __bitwise fdt16_t; +typedef uint32_t __bitwise fdt32_t; +typedef uint64_t __bitwise fdt64_t; + +#define EXTRACT_BYTE(x, n) ((unsigned long long)((uint8_t *)&x)[n]) +#define CPU_TO_FDT16(x) ((EXTRACT_BYTE(x, 0) << 8) | EXTRACT_BYTE(x, 1)) +#define CPU_TO_FDT32(x) ((EXTRACT_BYTE(x, 0) << 24) | (EXTRACT_BYTE(x, 1) << 16) | \ + (EXTRACT_BYTE(x, 2) << 8) | EXTRACT_BYTE(x, 3)) +#define CPU_TO_FDT64(x) ((EXTRACT_BYTE(x, 0) << 56) | (EXTRACT_BYTE(x, 1) << 48) | \ + (EXTRACT_BYTE(x, 2) << 40) | (EXTRACT_BYTE(x, 3) << 32) | \ + (EXTRACT_BYTE(x, 4) << 24) | (EXTRACT_BYTE(x, 5) << 16) | \ + (EXTRACT_BYTE(x, 6) << 8) | EXTRACT_BYTE(x, 7)) + +static inline uint16_t fdt16_to_cpu(fdt16_t x) +{ + return (__force uint16_t)CPU_TO_FDT16(x); +} +static inline fdt16_t cpu_to_fdt16(uint16_t x) { - return (EXTRACT_BYTE(0) << 8) | EXTRACT_BYTE(1); + return (__force fdt16_t)CPU_TO_FDT16(x); } -#define cpu_to_fdt16(x) fdt16_to_cpu(x) -static inline uint32_t fdt32_to_cpu(uint32_t x) +static inline uint32_t fdt32_to_cpu(fdt32_t x) { - return (EXTRACT_BYTE(0) << 24) | (EXTRACT_BYTE(1) << 16) | (EXTRACT_BYTE(2) << 8) | EXTRACT_BYTE(3); + return (__force uint32_t)CPU_TO_FDT32(x); +} +static inline fdt32_t cpu_to_fdt32(uint32_t x) +{ + return (__force fdt32_t)CPU_TO_FDT32(x); } -#define cpu_to_fdt32(x) fdt32_to_cpu(x) -static inline uint64_t fdt64_to_cpu(uint64_t x) +static inline uint64_t fdt64_to_cpu(fdt64_t x) +{ + return (__force uint64_t)CPU_TO_FDT64(x); +} +static inline fdt64_t cpu_to_fdt64(uint64_t x) { - return (EXTRACT_BYTE(0) << 56) | (EXTRACT_BYTE(1) << 48) | (EXTRACT_BYTE(2) << 40) | (EXTRACT_BYTE(3) << 32) - | (EXTRACT_BYTE(4) << 24) | (EXTRACT_BYTE(5) << 16) | (EXTRACT_BYTE(6) << 8) | EXTRACT_BYTE(7); + return (__force fdt64_t)CPU_TO_FDT64(x); } -#define cpu_to_fdt64(x) fdt64_to_cpu(x) +#undef CPU_TO_FDT64 +#undef CPU_TO_FDT32 +#undef CPU_TO_FDT16 #undef EXTRACT_BYTE #endif /* _LIBFDT_ENV_H */ diff --git a/scripts/dtc/libfdt/libfdt_internal.h b/scripts/dtc/libfdt/libfdt_internal.h index 381133ba81df..02cfa6fb612d 100644 --- a/scripts/dtc/libfdt/libfdt_internal.h +++ b/scripts/dtc/libfdt/libfdt_internal.h @@ -57,9 +57,9 @@ #define FDT_CHECK_HEADER(fdt) \ { \ - int err; \ - if ((err = fdt_check_header(fdt)) != 0) \ - return err; \ + int __err; \ + if ((__err = fdt_check_header(fdt)) != 0) \ + return __err; \ } int _fdt_check_node_offset(const void *fdt, int offset); diff --git a/scripts/dtc/livetree.c b/scripts/dtc/livetree.c index b61465fb2f33..e229b84432f9 100644 --- a/scripts/dtc/livetree.c +++ b/scripts/dtc/livetree.c @@ -511,7 +511,9 @@ struct node *get_node_by_phandle(struct node *tree, cell_t phandle) struct node *get_node_by_ref(struct node *tree, const char *ref) { - if (ref[0] == '/') + if (streq(ref, "/")) + return tree; + else if (ref[0] == '/') return get_node_by_path(tree, ref); else return get_node_by_label(tree, ref); diff --git a/scripts/dtc/srcpos.c b/scripts/dtc/srcpos.c index c20bc5315bc1..f534c22a888d 100644 --- a/scripts/dtc/srcpos.c +++ b/scripts/dtc/srcpos.c @@ -34,7 +34,7 @@ struct search_path { static struct search_path *search_path_head, **search_path_tail; -static char *dirname(const char *path) +static char *get_dirname(const char *path) { const char *slash = strrchr(path, '/'); @@ -77,7 +77,7 @@ static char *try_open(const char *dirname, const char *fname, FILE **fp) else fullname = join_path(dirname, fname); - *fp = fopen(fullname, "r"); + *fp = fopen(fullname, "rb"); if (!*fp) { free(fullname); fullname = NULL; @@ -150,7 +150,7 @@ void srcfile_push(const char *fname) srcfile = xmalloc(sizeof(*srcfile)); srcfile->f = srcfile_relative_open(fname, &srcfile->name); - srcfile->dir = dirname(srcfile->name); + srcfile->dir = get_dirname(srcfile->name); srcfile->prev = current_srcfile; srcfile->lineno = 1; @@ -159,7 +159,7 @@ void srcfile_push(const char *fname) current_srcfile = srcfile; } -int srcfile_pop(void) +bool srcfile_pop(void) { struct srcfile_state *srcfile = current_srcfile; @@ -177,7 +177,7 @@ int srcfile_pop(void) * fix this we could either allocate all the files from a * table, or use a pool allocator. */ - return current_srcfile ? 1 : 0; + return current_srcfile ? true : false; } void srcfile_add_search_path(const char *dirname) @@ -290,42 +290,27 @@ srcpos_string(struct srcpos *pos) return pos_str; } -void -srcpos_verror(struct srcpos *pos, char const *fmt, va_list va) +void srcpos_verror(struct srcpos *pos, const char *prefix, + const char *fmt, va_list va) { - const char *srcstr; - - srcstr = srcpos_string(pos); + char *srcstr; - fprintf(stderr, "Error: %s ", srcstr); - vfprintf(stderr, fmt, va); - fprintf(stderr, "\n"); -} + srcstr = srcpos_string(pos); -void -srcpos_error(struct srcpos *pos, char const *fmt, ...) -{ - va_list va; + fprintf(stderr, "%s: %s ", prefix, srcstr); + vfprintf(stderr, fmt, va); + fprintf(stderr, "\n"); - va_start(va, fmt); - srcpos_verror(pos, fmt, va); - va_end(va); + free(srcstr); } - -void -srcpos_warn(struct srcpos *pos, char const *fmt, ...) +void srcpos_error(struct srcpos *pos, const char *prefix, + const char *fmt, ...) { - const char *srcstr; va_list va; - va_start(va, fmt); - - srcstr = srcpos_string(pos); - - fprintf(stderr, "Warning: %s ", srcstr); - vfprintf(stderr, fmt, va); - fprintf(stderr, "\n"); + va_start(va, fmt); + srcpos_verror(pos, prefix, fmt, va); va_end(va); } diff --git a/scripts/dtc/srcpos.h b/scripts/dtc/srcpos.h index 93a27123c2e9..f81827bd684a 100644 --- a/scripts/dtc/srcpos.h +++ b/scripts/dtc/srcpos.h @@ -21,6 +21,7 @@ #define _SRCPOS_H_ #include <stdio.h> +#include <stdbool.h> struct srcfile_state { FILE *f; @@ -55,7 +56,7 @@ extern struct srcfile_state *current_srcfile; /* = NULL */ FILE *srcfile_relative_open(const char *fname, char **fullnamep); void srcfile_push(const char *fname); -int srcfile_pop(void); +bool srcfile_pop(void); /** * Add a new directory to the search path for input files @@ -106,12 +107,12 @@ extern struct srcpos *srcpos_copy(struct srcpos *pos); extern char *srcpos_string(struct srcpos *pos); extern void srcpos_dump(struct srcpos *pos); -extern void srcpos_verror(struct srcpos *pos, char const *, va_list va) - __attribute__((format(printf, 2, 0))); -extern void srcpos_error(struct srcpos *pos, char const *, ...) - __attribute__((format(printf, 2, 3))); -extern void srcpos_warn(struct srcpos *pos, char const *, ...) - __attribute__((format(printf, 2, 3))); +extern void srcpos_verror(struct srcpos *pos, const char *prefix, + const char *fmt, va_list va) + __attribute__((format(printf, 3, 0))); +extern void srcpos_error(struct srcpos *pos, const char *prefix, + const char *fmt, ...) + __attribute__((format(printf, 3, 4))); extern void srcpos_set_line(char *f, int l); diff --git a/scripts/dtc/treesource.c b/scripts/dtc/treesource.c index 5740e6992d37..a55d1d128cce 100644 --- a/scripts/dtc/treesource.c +++ b/scripts/dtc/treesource.c @@ -26,12 +26,12 @@ extern int yyparse(void); extern YYLTYPE yylloc; struct boot_info *the_boot_info; -int treesource_error; +bool treesource_error; struct boot_info *dt_from_source(const char *fname) { the_boot_info = NULL; - treesource_error = 0; + treesource_error = false; srcfile_push(fname); yyin = current_srcfile->f; @@ -54,9 +54,9 @@ static void write_prefix(FILE *f, int level) fputc('\t', f); } -static int isstring(char c) +static bool isstring(char c) { - return (isprint(c) + return (isprint((unsigned char)c) || (c == '\0') || strchr("\a\b\t\n\v\f\r", c)); } @@ -109,7 +109,7 @@ static void write_propval_string(FILE *f, struct data val) break; case '\0': fprintf(f, "\", "); - while (m && (m->offset < i)) { + while (m && (m->offset <= (i + 1))) { if (m->type == LABEL) { assert(m->offset == (i+1)); fprintf(f, "%s: ", m->ref); @@ -119,7 +119,7 @@ static void write_propval_string(FILE *f, struct data val) fprintf(f, "\""); break; default: - if (isprint(c)) + if (isprint((unsigned char)c)) fprintf(f, "%c", c); else fprintf(f, "\\x%02hhx", c); @@ -178,7 +178,7 @@ static void write_propval_bytes(FILE *f, struct data val) m = m->next; } - fprintf(f, "%02hhx", *bp++); + fprintf(f, "%02hhx", (unsigned char)(*bp++)); if ((const void *)bp >= propend) break; fprintf(f, " "); @@ -281,3 +281,4 @@ void dt_to_source(FILE *f, struct boot_info *bi) write_tree_source_node(f, bi->dt, 0); } + diff --git a/scripts/dtc/update-dtc-source.sh b/scripts/dtc/update-dtc-source.sh index feb01ef26be4..f5cde799db03 100755 --- a/scripts/dtc/update-dtc-source.sh +++ b/scripts/dtc/update-dtc-source.sh @@ -34,6 +34,7 @@ DTC_SOURCE="checks.c data.c dtc.c dtc.h flattree.c fstree.c livetree.c srcpos.c srcpos.h treesource.c util.c util.h version_gen.h Makefile.dtc \ dtc-lexer.l dtc-parser.y" DTC_GENERATED="dtc-lexer.lex.c dtc-parser.tab.c dtc-parser.tab.h" +LIBFDT_SOURCE="Makefile.libfdt fdt.c fdt.h fdt_empty_tree.c fdt_ro.c fdt_rw.c fdt_strerror.c fdt_sw.c fdt_wip.c libfdt.h libfdt_env.h libfdt_internal.h" # Build DTC cd $DTC_UPSTREAM_PATH @@ -50,5 +51,13 @@ for f in $DTC_GENERATED; do cp ${DTC_UPSTREAM_PATH}/$f ${f}_shipped git add ${f}_shipped done +for f in $LIBFDT_SOURCE; do + cp ${DTC_UPSTREAM_PATH}/libfdt/${f} libfdt/${f} + git add libfdt/${f} +done + +sed -i -- 's/#include <libfdt_env.h>/#include "libfdt_env.h"/g' ./libfdt/libfdt.h +sed -i -- 's/#include <fdt.h>/#include "fdt.h"/g' ./libfdt/libfdt.h +git add ./libfdt/libfdt.h git commit -e -v -m "scripts/dtc: Update to upstream version [CHANGEME]" diff --git a/scripts/dtc/util.c b/scripts/dtc/util.c index 3055c16e980d..9d65226df9e4 100644 --- a/scripts/dtc/util.c +++ b/scripts/dtc/util.c @@ -39,11 +39,11 @@ char *xstrdup(const char *s) { int len = strlen(s) + 1; - char *dup = xmalloc(len); + char *d = xmalloc(len); - memcpy(dup, s, len); + memcpy(d, s, len); - return dup; + return d; } char *join_path(const char *path, const char *name) @@ -70,7 +70,7 @@ char *join_path(const char *path, const char *name) return str; } -int util_is_printable_string(const void *data, int len) +bool util_is_printable_string(const void *data, int len) { const char *s = data; const char *ss, *se; @@ -87,7 +87,7 @@ int util_is_printable_string(const void *data, int len) while (s < se) { ss = s; - while (s < se && *s && isprint(*s)) + while (s < se && *s && isprint((unsigned char)*s)) s++; /* not zero, or not done yet */ @@ -219,10 +219,6 @@ int utilfdt_read_err_len(const char *filename, char **buffp, off_t *len) if (offset == bufsize) { bufsize *= 2; buf = xrealloc(buf, bufsize); - if (!buf) { - ret = ENOMEM; - break; - } } ret = read(fd, &buf[offset], bufsize - offset); @@ -375,9 +371,9 @@ void utilfdt_print_data(const char *data, int len) const uint32_t *cell = (const uint32_t *)data; printf(" = <"); - for (i = 0; i < len; i += 4) + for (i = 0, len /= 4; i < len; i++) printf("0x%08x%s", fdt32_to_cpu(cell[i]), - i < (len - 4) ? " " : ""); + i < (len - 1) ? " " : ""); printf(">"); } else { printf(" = ["); diff --git a/scripts/dtc/util.h b/scripts/dtc/util.h index 8f40b4499359..f800b6011fb1 100644 --- a/scripts/dtc/util.h +++ b/scripts/dtc/util.h @@ -2,6 +2,7 @@ #define _UTIL_H #include <stdarg.h> +#include <stdbool.h> #include <getopt.h> /* @@ -33,6 +34,7 @@ static inline void __attribute__((noreturn)) die(const char *str, ...) va_start(ap, str); fprintf(stderr, "FATAL ERROR: "); vfprintf(stderr, str, ap); + va_end(ap); exit(1); } @@ -68,7 +70,7 @@ extern char *join_path(const char *path, const char *name); * @param len The string length including terminator * @return 1 if a valid printable string, 0 if not */ -int util_is_printable_string(const void *data, int len); +bool util_is_printable_string(const void *data, int len); /* * Parse an escaped character starting at index i in string s. The resulting diff --git a/scripts/dtc/version_gen.h b/scripts/dtc/version_gen.h index 54d4e904433a..5b8c7d53d608 100644 --- a/scripts/dtc/version_gen.h +++ b/scripts/dtc/version_gen.h @@ -1 +1 @@ -#define DTC_VERSION "DTC 1.4.0-dirty" +#define DTC_VERSION "DTC 1.4.1-g9d3649bd" diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py index 3c947f0c5dad..927d0d2a3145 100644 --- a/scripts/gdb/linux/dmesg.py +++ b/scripts/gdb/linux/dmesg.py @@ -12,7 +12,6 @@ # import gdb -import string from linux import utils diff --git a/scripts/gdb/linux/lists.py b/scripts/gdb/linux/lists.py new file mode 100644 index 000000000000..3a3775bc162b --- /dev/null +++ b/scripts/gdb/linux/lists.py @@ -0,0 +1,92 @@ +# +# gdb helper commands and functions for Linux kernel debugging +# +# list tools +# +# Copyright (c) Thiebaud Weksteen, 2015 +# +# Authors: +# Thiebaud Weksteen <[email protected]> +# +# This work is licensed under the terms of the GNU GPL version 2. +# + +import gdb + +from linux import utils + +list_head = utils.CachedType("struct list_head") + + +def list_check(head): + nb = 0 + if (head.type == list_head.get_type().pointer()): + head = head.dereference() + elif (head.type != list_head.get_type()): + raise gdb.GdbError('argument must be of type (struct list_head [*])') + c = head + try: + gdb.write("Starting with: {}\n".format(c)) + except gdb.MemoryError: + gdb.write('head is not accessible\n') + return + while True: + p = c['prev'].dereference() + n = c['next'].dereference() + try: + if p['next'] != c.address: + gdb.write('prev.next != current: ' + 'current@{current_addr}={current} ' + 'prev@{p_addr}={p}\n'.format( + current_addr=c.address, + current=c, + p_addr=p.address, + p=p, + )) + return + except gdb.MemoryError: + gdb.write('prev is not accessible: ' + 'current@{current_addr}={current}\n'.format( + current_addr=c.address, + current=c + )) + return + try: + if n['prev'] != c.address: + gdb.write('next.prev != current: ' + 'current@{current_addr}={current} ' + 'next@{n_addr}={n}\n'.format( + current_addr=c.address, + current=c, + n_addr=n.address, + n=n, + )) + return + except gdb.MemoryError: + gdb.write('next is not accessible: ' + 'current@{current_addr}={current}\n'.format( + current_addr=c.address, + current=c + )) + return + c = n + nb += 1 + if c == head: + gdb.write("list is consistent: {} node(s)\n".format(nb)) + return + + +class LxListChk(gdb.Command): + """Verify a list consistency""" + + def __init__(self): + super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA, + gdb.COMPLETE_EXPRESSION) + + def invoke(self, arg, from_tty): + argv = gdb.string_to_argv(arg) + if len(argv) != 1: + raise gdb.GdbError("lx-list-check takes one argument") + list_check(gdb.parse_and_eval(argv[0])) + +LxListChk() diff --git a/scripts/gdb/linux/symbols.py b/scripts/gdb/linux/symbols.py index cd5bea965d4e..627750cb420d 100644 --- a/scripts/gdb/linux/symbols.py +++ b/scripts/gdb/linux/symbols.py @@ -14,9 +14,8 @@ import gdb import os import re -import string -from linux import modules, utils +from linux import modules if hasattr(gdb, 'Breakpoint'): @@ -97,7 +96,7 @@ lx-symbols command.""" return "" attrs = sect_attrs['attrs'] section_name_to_address = { - attrs[n]['name'].string() : attrs[n]['address'] + attrs[n]['name'].string(): attrs[n]['address'] for n in range(int(sect_attrs['nsections']))} args = [] for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]: @@ -124,7 +123,7 @@ lx-symbols command.""" addr=module_addr, sections=self._section_arguments(module)) gdb.execute(cmdline, to_string=True) - if not module_name in self.loaded_modules: + if module_name not in self.loaded_modules: self.loaded_modules.append(module_name) else: gdb.write("no module object found for '{0}'\n".format(module_name)) @@ -164,7 +163,7 @@ lx-symbols command.""" self.load_all_symbols() if hasattr(gdb, 'Breakpoint'): - if not self.breakpoint is None: + if self.breakpoint is not None: self.breakpoint.delete() self.breakpoint = None self.breakpoint = LoadModuleBreakpoint( diff --git a/scripts/gdb/linux/tasks.py b/scripts/gdb/linux/tasks.py index e2037d9bb7eb..862a4ae24d49 100644 --- a/scripts/gdb/linux/tasks.py +++ b/scripts/gdb/linux/tasks.py @@ -18,8 +18,8 @@ from linux import utils task_type = utils.CachedType("struct task_struct") + def task_lists(): - global task_type task_ptr_type = task_type.get_type().pointer() init_task = gdb.parse_and_eval("init_task").address t = g = init_task @@ -38,6 +38,7 @@ def task_lists(): if t == init_task: return + def get_task_by_pid(pid): for task in task_lists(): if int(task['pid']) == pid: @@ -65,13 +66,28 @@ return that task_struct variable which PID matches.""" LxTaskByPidFunc() +class LxPs(gdb.Command): + """Dump Linux tasks.""" + + def __init__(self): + super(LxPs, self).__init__("lx-ps", gdb.COMMAND_DATA) + + def invoke(self, arg, from_tty): + for task in task_lists(): + gdb.write("{address} {pid} {comm}\n".format( + address=task, + pid=task["pid"], + comm=task["comm"].string())) + +LxPs() + + thread_info_type = utils.CachedType("struct thread_info") ia64_task_size = None def get_thread_info(task): - global thread_info_type thread_info_ptr_type = thread_info_type.get_type().pointer() if utils.is_target_arch("ia64"): global ia64_task_size diff --git a/scripts/gdb/linux/utils.py b/scripts/gdb/linux/utils.py index 128c306db3ee..0893b326a28b 100644 --- a/scripts/gdb/linux/utils.py +++ b/scripts/gdb/linux/utils.py @@ -83,7 +83,7 @@ def get_target_endianness(): elif "big endian" in endian: target_endianness = BIG_ENDIAN else: - raise gdb.GdgError("unknown endianness '{0}'".format(str(endian))) + raise gdb.GdbError("unknown endianness '{0}'".format(str(endian))) return target_endianness @@ -151,6 +151,6 @@ def get_gdbserver_type(): gdbserver_type = GDBSERVER_QEMU elif probe_kgdb(): gdbserver_type = GDBSERVER_KGDB - if not gdbserver_type is None and hasattr(gdb, 'events'): + if gdbserver_type is not None and hasattr(gdb, 'events'): gdb.events.exited.connect(exit_handler) return gdbserver_type diff --git a/scripts/gdb/vmlinux-gdb.py b/scripts/gdb/vmlinux-gdb.py index 48489285f119..ce82bf5c3943 100644 --- a/scripts/gdb/vmlinux-gdb.py +++ b/scripts/gdb/vmlinux-gdb.py @@ -28,3 +28,4 @@ else: import linux.dmesg import linux.tasks import linux.cpus + import linux.lists diff --git a/scripts/tags.sh b/scripts/tags.sh index cdb491d84503..c0a932dff329 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -154,7 +154,7 @@ exuberant() { all_target_sources | xargs $1 -a \ -I __initdata,__exitdata,__initconst, \ - -I __cpuinitdata,__initdata_memblock \ + -I __initdata_memblock \ -I __refdata,__attribute,__maybe_unused,__always_unused \ -I __acquires,__releases,__deprecated \ -I __read_mostly,__aligned,____cacheline_aligned \ diff --git a/sound/core/ctljack.c b/sound/core/ctljack.c index 9149a4aefa95..84a3cd683068 100644 --- a/sound/core/ctljack.c +++ b/sound/core/ctljack.c @@ -41,8 +41,11 @@ static int get_available_index(struct snd_card *card, const char *name) sid.iface = SNDRV_CTL_ELEM_IFACE_CARD; strlcpy(sid.name, name, sizeof(sid.name)); - while (snd_ctl_find_id(card, &sid)) + while (snd_ctl_find_id(card, &sid)) { sid.index++; + /* reset numid; otherwise snd_ctl_find_id() hits this again */ + sid.numid = 0; + } return sid.index; } diff --git a/sound/core/init.c b/sound/core/init.c index 3e0cebacefe1..20f37fb3800e 100644 --- a/sound/core/init.c +++ b/sound/core/init.c @@ -109,13 +109,12 @@ static void snd_card_id_read(struct snd_info_entry *entry, static int init_info_for_card(struct snd_card *card) { - int err; struct snd_info_entry *entry; entry = snd_info_create_card_entry(card, "id", card->proc_root); if (!entry) { dev_dbg(card->dev, "unable to create card entry\n"); - return err; + return -ENOMEM; } entry->c.text.read = snd_card_id_read; card->proc_id = entry; diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 082509eb805d..f05cb6a8cbe0 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c @@ -124,7 +124,7 @@ static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size) dmab->addr = 0; if (dev->of_node) - pool = of_get_named_gen_pool(dev->of_node, "iram", 0); + pool = of_gen_pool_get(dev->of_node, "iram", 0); if (!pool) return; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9913e24d6699..745535d1840a 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2180,6 +2180,8 @@ static const struct pci_device_id azx_ids[] = { { PCI_DEVICE(0x1022, 0x780d), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, /* ATI HDMI */ + { PCI_DEVICE(0x1002, 0x1308), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, { PCI_DEVICE(0x1002, 0x7919), @@ -2188,6 +2190,8 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, { PCI_DEVICE(0x1002, 0x970f), .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, + { PCI_DEVICE(0x1002, 0x9840), + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, { PCI_DEVICE(0x1002, 0xaa00), .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, { PCI_DEVICE(0x1002, 0xaa08), diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index f8527342a150..2f2433845d04 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -591,7 +591,7 @@ static int eld_proc_new(struct hdmi_spec_per_pin *per_pin, int index) static void eld_proc_free(struct hdmi_spec_per_pin *per_pin) { - if (!per_pin->codec->bus->shutdown && per_pin->proc_entry) { + if (!per_pin->codec->bus->shutdown) { snd_info_free_entry(per_pin->proc_entry); per_pin->proc_entry = NULL; } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 431a20b17df4..b3b44681d3cf 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4464,6 +4464,7 @@ enum { ALC269_FIXUP_LIFEBOOK, ALC269_FIXUP_LIFEBOOK_EXTMIC, ALC269_FIXUP_LIFEBOOK_HP_PIN, + ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT, ALC269_FIXUP_AMIC, ALC269_FIXUP_DMIC, ALC269VB_FIXUP_AMIC, @@ -4484,6 +4485,7 @@ enum { ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, ALC269_FIXUP_HEADSET_MODE, ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, + ALC269_FIXUP_ASPIRE_HEADSET_MIC, ALC269_FIXUP_ASUS_X101_FUNC, ALC269_FIXUP_ASUS_X101_VERB, ALC269_FIXUP_ASUS_X101, @@ -4511,6 +4513,7 @@ enum { ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC, ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, ALC292_FIXUP_TPT440_DOCK, + ALC292_FIXUP_TPT440_DOCK2, ALC283_FIXUP_BXBT2807_MIC, ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, ALC282_FIXUP_ASPIRE_V5_PINS, @@ -4521,6 +4524,8 @@ enum { ALC288_FIXUP_DELL_HEADSET_MODE, ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, ALC288_FIXUP_DELL_XPS_13_GPIO6, + ALC288_FIXUP_DELL_XPS_13, + ALC288_FIXUP_DISABLE_AAMIX, ALC292_FIXUP_DELL_E7X, ALC292_FIXUP_DISABLE_AAMIX, ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -4630,6 +4635,10 @@ static const struct hda_fixup alc269_fixups[] = { { } }, }, + [ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_pincfg_no_hp_to_lineout, + }, [ALC269_FIXUP_AMIC] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -4758,6 +4767,15 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_headset_mode_no_hp_mic, }, + [ALC269_FIXUP_ASPIRE_HEADSET_MIC] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + { 0x19, 0x01a1913c }, /* headset mic w/o jack detect */ + { } + }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MODE, + }, [ALC286_FIXUP_SONY_MIC_NO_PRESENCE] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -4960,6 +4978,12 @@ static const struct hda_fixup alc269_fixups[] = { .chain_id = ALC269_FIXUP_HEADSET_MODE }, [ALC292_FIXUP_TPT440_DOCK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc269_fixup_pincfg_no_hp_to_lineout, + .chained = true, + .chain_id = ALC292_FIXUP_TPT440_DOCK2 + }, + [ALC292_FIXUP_TPT440_DOCK2] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { { 0x16, 0x21211010 }, /* dock headphone */ @@ -5046,9 +5070,23 @@ static const struct hda_fixup alc269_fixups[] = { .chained = true, .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE }, + [ALC288_FIXUP_DISABLE_AAMIX] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_disable_aamix, + .chained = true, + .chain_id = ALC288_FIXUP_DELL_XPS_13_GPIO6 + }, + [ALC288_FIXUP_DELL_XPS_13] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_dell_xps13, + .chained = true, + .chain_id = ALC288_FIXUP_DISABLE_AAMIX + }, [ALC292_FIXUP_DISABLE_AAMIX] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_disable_aamix, + .chained = true, + .chain_id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE }, [ALC292_FIXUP_DELL_E7X] = { .type = HDA_FIXUP_FUNC, @@ -5073,6 +5111,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), + SND_PCI_QUIRK(0x1025, 0x072d, "Acer Aspire V5-571G", ALC269_FIXUP_ASPIRE_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x080d, "Acer Aspire V5-122P", ALC269_FIXUP_ASPIRE_HEADSET_MIC), SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK), SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572), @@ -5086,10 +5126,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), + SND_PCI_QUIRK(0x1028, 0x062e, "Dell Latitude E7450", ALC292_FIXUP_DELL_E7X), SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), - SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC292_FIXUP_DELL_E7X), + SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -5173,6 +5214,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ), SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX), SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK), + SND_PCI_QUIRK(0x10cf, 0x159f, "Lifebook E780", ALC269_FIXUP_LIFEBOOK_NO_HP_TO_LINEOUT), SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN), SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC), diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index 0521be8d46a8..da5366405eda 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c @@ -241,7 +241,9 @@ static int via_pin_power_ctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); - ucontrol->value.enumerated.item[0] = codec->power_save_node; + struct via_spec *spec = codec->spec; + + ucontrol->value.enumerated.item[0] = spec->gen.power_down_unused; return 0; } @@ -252,9 +254,9 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol, struct via_spec *spec = codec->spec; bool val = !!ucontrol->value.enumerated.item[0]; - if (val == codec->power_save_node) + if (val == spec->gen.power_down_unused) return 0; - codec->power_save_node = val; + /* codec->power_save_node = val; */ /* widget PM seems yet broken */ spec->gen.power_down_unused = val; analog_low_current_mode(codec); return 1; |