diff options
420 files changed, 11526 insertions, 5897 deletions
diff --git a/Documentation/ABI/testing/dell-smbios-wmi b/Documentation/ABI/testing/dell-smbios-wmi new file mode 100644 index 000000000000..fc919ce16008 --- /dev/null +++ b/Documentation/ABI/testing/dell-smbios-wmi @@ -0,0 +1,41 @@ +What: /dev/wmi/dell-smbios +Date: November 2017 +KernelVersion: 4.15 +Contact: "Mario Limonciello" <[email protected]> +Description: + Perform SMBIOS calls on supported Dell machines. + through the Dell ACPI-WMI interface. + + IOCTL's and buffer formats are defined in: + <uapi/linux/wmi.h> + + 1) To perform an SMBIOS call from userspace, you'll need to + first determine the minimum size of the calling interface + buffer for your machine. + Platforms that contain larger buffers can return larger + objects from the system firmware. + Commonly this size is either 4k or 32k. + + To determine the size of the buffer read() a u64 dword from + the WMI character device /dev/wmi/dell-smbios. + + 2) After you've determined the minimum size of the calling + interface buffer, you can allocate a structure that represents + the structure documented above. + + 3) In the 'length' object store the size of the buffer you + determined above and allocated. + + 4) In this buffer object, prepare as necessary for the SMBIOS + call you're interested in. Typically SMBIOS buffers have + "class", "select", and "input" defined to values that coincide + with the data you are interested in. + Documenting class/select/input values is outside of the scope + of this documentation. Check with the libsmbios project for + further documentation on these values. + + 6) Run the call by using ioctl() as described in the header. + + 7) The output will be returned in the buffer object. + + 8) Be sure to free up your allocated object. diff --git a/Documentation/ABI/testing/sysfs-platform-dell-smbios b/Documentation/ABI/testing/sysfs-platform-dell-smbios new file mode 100644 index 000000000000..205d3b6361e0 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-dell-smbios @@ -0,0 +1,21 @@ +What: /sys/devices/platform/<platform>/tokens/* +Date: November 2017 +KernelVersion: 4.15 +Contact: "Mario Limonciello" <[email protected]> +Description: + A read-only description of Dell platform tokens + available on the machine. + + Each token attribute is available as a pair of + sysfs attributes readable by a process with + CAP_SYS_ADMIN. + + For example the token ID "5" would be available + as the following attributes: + + 0005_location + 0005_value + + Tokens will vary from machine to machine, and + only tokens available on that machine will be + displayed. diff --git a/Documentation/ABI/testing/sysfs-platform-intel-wmi-thunderbolt b/Documentation/ABI/testing/sysfs-platform-intel-wmi-thunderbolt new file mode 100644 index 000000000000..8af65059d519 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-intel-wmi-thunderbolt @@ -0,0 +1,11 @@ +What: /sys/devices/platform/<platform>/force_power +Date: September 2017 +KernelVersion: 4.15 +Contact: "Mario Limonciello" <[email protected]> +Description: + Modify the platform force power state, influencing + Thunderbolt controllers to turn on or off when no + devices are connected (write-only) + There are two available states: + * 0 -> Force power disabled + * 1 -> Force power enabled diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 62436bd5f34a..b44217290e57 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3246,13 +3246,15 @@ instead using the legacy FADT method profile= [KNL] Enable kernel profiling via /proc/profile - Format: [schedule,]<number> + Format: [<profiletype>,]<number> + Param: <profiletype>: "schedule", "sleep", or "kvm" + [defaults to kernel profiling] Param: "schedule" - profile schedule points. - Param: <number> - step/bucket size as a power of 2 for - statistical time based profiling. Param: "sleep" - profile D-state sleeping (millisecs). Requires CONFIG_SCHEDSTATS Param: "kvm" - profile VM exits. + Param: <number> - step/bucket size as a power of 2 for + statistical time based profiling. prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk before loading. diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst index 5c62d11d77e8..de50a8561774 100644 --- a/Documentation/admin-guide/thunderbolt.rst +++ b/Documentation/admin-guide/thunderbolt.rst @@ -221,3 +221,18 @@ The driver will create one virtual ethernet interface per Thunderbolt port which are named like ``thunderbolt0`` and so on. From this point you can either use standard userspace tools like ``ifconfig`` to configure the interface or let your GUI to handle it automatically. + +Forcing power +------------- +Many OEMs include a method that can be used to force the power of a +thunderbolt controller to an "On" state even if nothing is connected. +If supported by your machine this will be exposed by the WMI bus with +a sysfs attribute called "force_power". + +For example the intel-wmi-thunderbolt driver exposes this attribute in: + /sys/devices/platform/PNP0C14:00/wmi_bus/wmi_bus-PNP0C14:00/86CCFD48-205E-4A77-9C48-2021CBEDE341/force_power + + To force the power to on, write 1 to this attribute file. + To disable force power, write 0 to this attribute file. + +Note: it's currently not possible to query the force power state of a platform. diff --git a/Documentation/devicetree/bindings/display/google,goldfish-fb.txt b/Documentation/devicetree/bindings/display/google,goldfish-fb.txt new file mode 100644 index 000000000000..751fa9f51e5d --- /dev/null +++ b/Documentation/devicetree/bindings/display/google,goldfish-fb.txt @@ -0,0 +1,17 @@ +Android Goldfish framebuffer + +Android Goldfish framebuffer device used by Android emulator. + +Required properties: + +- compatible : should contain "google,goldfish-fb" +- reg : <registers mapping> +- interrupts : <interrupt mapping> + +Example: + + display-controller@1f008000 { + compatible = "google,goldfish-fb"; + interrupts = <0x10>; + reg = <0x1f008000 0x100>; + }; diff --git a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt index f79854783c2c..5bf77f6dd19d 100644 --- a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt +++ b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt @@ -129,7 +129,7 @@ Optional properties: example: -display@di0 { +disp0 { compatible = "fsl,imx-parallel-display"; edid = [edid-data]; interface-pix-fmt = "rgb24"; diff --git a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt index f248056da24c..bb2075df9b38 100644 --- a/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/cadence-quadspi.txt @@ -1,7 +1,9 @@ * Cadence Quad SPI controller Required properties: -- compatible : Should be "cdns,qspi-nor". +- compatible : should be one of the following: + Generic default - "cdns,qspi-nor". + For TI 66AK2G SoC - "ti,k2g-qspi", "cdns,qspi-nor". - reg : Contains two entries, each of which is a tuple consisting of a physical address and length. The first entry is the address and length of the controller register set. The second entry is the @@ -14,6 +16,9 @@ Required properties: Optional properties: - cdns,is-decoded-cs : Flag to indicate whether decoder is used or not. +- cdns,rclk-en : Flag to indicate that QSPI return clock is used to latch + the read data rather than the QSPI clock. Make sure that QSPI return + clock is populated on the board before using this property. Optional subnodes: Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional diff --git a/Documentation/devicetree/bindings/mtd/denali-nand.txt b/Documentation/devicetree/bindings/mtd/denali-nand.txt index 504291d2e5c2..0ee8edb60efc 100644 --- a/Documentation/devicetree/bindings/mtd/denali-nand.txt +++ b/Documentation/devicetree/bindings/mtd/denali-nand.txt @@ -29,7 +29,7 @@ nand: nand@ff900000 { #address-cells = <1>; #size-cells = <1>; compatible = "altr,socfpga-denali-nand"; - reg = <0xff900000 0x100000>, <0xffb80000 0x10000>; + reg = <0xff900000 0x20>, <0xffb80000 0x1000>; reg-names = "nand_data", "denali_reg"; interrupts = <0 144 4>; }; diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt index 4cab5d85cf6f..376fa2f50e6b 100644 --- a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt +++ b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt @@ -14,6 +14,7 @@ Required properties: at25df641 at26df081a en25s64 + mr25h128 mr25h256 mr25h10 mr25h40 diff --git a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt index 840f9405dcf0..56d3668e2c50 100644 --- a/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/mtk-quadspi.txt @@ -1,13 +1,16 @@ * Serial NOR flash controller for MTK MT81xx (and similar) Required properties: -- compatible: The possible values are: - "mediatek,mt2701-nor" - "mediatek,mt7623-nor" +- compatible: For mt8173, compatible should be "mediatek,mt8173-nor", + and it's the fallback compatible for other Soc. + For every other SoC, should contain both the SoC-specific compatible + string and "mediatek,mt8173-nor". + The possible values are: + "mediatek,mt2701-nor", "mediatek,mt8173-nor" + "mediatek,mt2712-nor", "mediatek,mt8173-nor" + "mediatek,mt7622-nor", "mediatek,mt8173-nor" + "mediatek,mt7623-nor", "mediatek,mt8173-nor" "mediatek,mt8173-nor" - For mt8173, compatible should be "mediatek,mt8173-nor". - For every other SoC, should contain both the SoC-specific compatible string - and "mediatek,mt8173-nor". - reg: physical base address and length of the controller's register - clocks: the phandle of the clocks needed by the nor controller - clock-names: the names of the clocks diff --git a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt index d9b655f11048..d4ee4da58463 100644 --- a/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt +++ b/Documentation/devicetree/bindings/mtd/pxa3xx-nand.txt @@ -5,9 +5,13 @@ Required properties: - compatible: Should be set to one of the following: marvell,pxa3xx-nand marvell,armada370-nand + marvell,armada-8k-nand - reg: The register base for the controller - interrupts: The interrupt to map - #address-cells: Set to <1> if the node includes partitions + - marvell,system-controller: Set to retrieve the syscon node that handles + NAND controller related registers (only required + with marvell,armada-8k-nand compatible). Optional properties: diff --git a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt index 7e94b802395d..74c118015980 100644 --- a/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt +++ b/Documentation/devicetree/bindings/pwm/renesas,pwm-rcar.txt @@ -9,6 +9,7 @@ Required Properties: - "renesas,pwm-r8a7794": for R-Car E2 - "renesas,pwm-r8a7795": for R-Car H3 - "renesas,pwm-r8a7796": for R-Car M3-W + - "renesas,pwm-r8a77995": for R-Car D3 - reg: base address and length of the registers block for the PWM. - #pwm-cells: should be 2. See pwm.txt in this directory for a description of the cells format. diff --git a/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt index 323cf26374cb..c797bc9d77d2 100644 --- a/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt +++ b/Documentation/devicetree/bindings/rtc/imxdi-rtc.txt @@ -1,20 +1,20 @@ * i.MX25 Real Time Clock controller -This binding supports the following chips: i.MX25, i.MX53 - Required properties: - compatible: should be: "fsl,imx25-rtc" - reg: physical base address of the controller and length of memory mapped region. +- clocks: should contain the phandle for the rtc clock - interrupts: rtc alarm interrupt Optional properties: -- interrupts: dryice security violation interrupt +- interrupts: dryice security violation interrupt (second entry) Example: -rtc@80056000 { - compatible = "fsl,imx53-rtc", "fsl,imx25-rtc"; - reg = <0x80056000 2000>; - interrupts = <29 56>; +rtc@53ffc000 { + compatible = "fsl,imx25-rtc"; + reg = <0x53ffc000 0x4000>; + clocks = <&clks 81>; + interrupts = <25 56>; }; diff --git a/Documentation/devicetree/bindings/rtc/pcf85363.txt b/Documentation/devicetree/bindings/rtc/pcf85363.txt new file mode 100644 index 000000000000..76fdabc59742 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/pcf85363.txt @@ -0,0 +1,17 @@ +NXP PCF85363 Real Time Clock +============================ + +Required properties: +- compatible: Should contain "nxp,pcf85363". +- reg: I2C address for chip. + +Optional properties: +- interrupts: IRQ line for the RTC (not implemented). + +Example: + +pcf85363: pcf85363@51 { + compatible = "nxp,pcf85363"; + reg = <0x51>; +}; + diff --git a/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt b/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt new file mode 100644 index 000000000000..09fe8f51476f --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/rtc-mt7622.txt @@ -0,0 +1,21 @@ +Device-Tree bindings for MediaTek SoC based RTC + +Required properties: +- compatible : Should be + "mediatek,mt7622-rtc", "mediatek,soc-rtc" : for MT7622 SoC +- reg : Specifies base physical address and size of the registers; +- interrupts : Should contain the interrupt for RTC alarm; +- clocks : Specifies list of clock specifiers, corresponding to + entries in clock-names property; +- clock-names : Should contain "rtc" entries + +Example: + +rtc: rtc@10212800 { + compatible = "mediatek,mt7622-rtc", + "mediatek,soc-rtc"; + reg = <0 0x10212800 0 0x200>; + interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_LOW>; + clocks = <&topckgen CLK_TOP_RTC>; + clock-names = "rtc"; +}; diff --git a/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt new file mode 100644 index 000000000000..7c170da0d4b7 --- /dev/null +++ b/Documentation/devicetree/bindings/rtc/sprd,sc27xx-rtc.txt @@ -0,0 +1,27 @@ +Spreadtrum SC27xx Real Time Clock + +Required properties: +- compatible: should be "sprd,sc2731-rtc". +- reg: address offset of rtc register. +- interrupt-parent: phandle for the interrupt controller. +- interrupts: rtc alarm interrupt. + +Example: + + sc2731_pmic: pmic@0 { + compatible = "sprd,sc2731"; + reg = <0>; + spi-max-frequency = <26000000>; + interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; + interrupt-controller; + #interrupt-cells = <2>; + #address-cells = <1>; + #size-cells = <0>; + + rtc@280 { + compatible = "sprd,sc2731-rtc"; + reg = <0x280>; + interrupt-parent = <&sc2731_pmic>; + interrupts = <2 IRQ_TYPE_LEVEL_HIGH>; + }; + }; diff --git a/Documentation/devicetree/bindings/trivial-devices.txt b/Documentation/devicetree/bindings/trivial-devices.txt index 27dce08edd73..5f3143f97098 100644 --- a/Documentation/devicetree/bindings/trivial-devices.txt +++ b/Documentation/devicetree/bindings/trivial-devices.txt @@ -55,7 +55,6 @@ epson,rx8010 I2C-BUS INTERFACE REAL TIME CLOCK MODULE epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE emmicro,em3027 EM Microelectronic EM3027 Real-time Clock fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer -fsl,mc13892 MC13892: Power Management Integrated Circuit (PMIC) for i.MX35/51 fsl,mma7660 MMA7660FC: 3-Axis Orientation/Motion Detection Sensor fsl,mma8450 MMA8450Q: Xtrinsic Low-power, 3-axis Xtrinsic Accelerometer fsl,mpl3115 MPL3115: Absolute Digital Pressure Sensor @@ -73,7 +72,6 @@ maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs maxim,max6621 PECI-to-I2C translator for PECI-to-SMBus/I2C protocol conversion maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface -mc,rv3029c2 Real Time Clock Module with I2C-Bus mcube,mc3230 mCube 3-axis 8-bit digital accelerometer memsic,mxc6225 MEMSIC 2-axis 8-bit digital accelerometer microchip,mcp4531-502 Microchip 7-bit Single I2C Digital Potentiometer (5k) @@ -142,6 +140,7 @@ microchip,mcp4662-503 Microchip 8-bit Dual I2C Digital Potentiometer with NV Mem microchip,mcp4662-104 Microchip 8-bit Dual I2C Digital Potentiometer with NV Memory (100k) microchip,tc654 PWM Fan Speed Controller With Fan Fault Detection microchip,tc655 PWM Fan Speed Controller With Fan Fault Detection +microcrystal,rv3029 Real Time Clock Module with I2C-Bus miramems,da226 MiraMEMS DA226 2-axis 14-bit digital accelerometer miramems,da280 MiraMEMS DA280 3-axis 14-bit digital accelerometer miramems,da311 MiraMEMS DA311 3-axis 12-bit digital accelerometer diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt index ce02cebac26a..1b27cebb47f4 100644 --- a/Documentation/devicetree/bindings/usb/usb-device.txt +++ b/Documentation/devicetree/bindings/usb/usb-device.txt @@ -4,24 +4,35 @@ Usually, we only use device tree for hard wired USB device. The reference binding doc is from: http://www.devicetree.org/open-firmware/bindings/usb/usb-1_0.ps + Required properties: -- compatible: usbVID,PID. The textual representation of VID, PID shall - be in lower case hexadecimal with leading zeroes suppressed. The - other compatible strings from the above standard binding could also - be used, but a device adhering to this binding may leave out all except - for usbVID,PID. -- reg: the port number which this device is connecting to, the range - is 1-31. +- compatible: "usbVID,PID", where VID is the vendor id and PID the product id. + The textual representation of VID and PID shall be in lower case hexadecimal + with leading zeroes suppressed. The other compatible strings from the above + standard binding could also be used, but a device adhering to this binding + may leave out all except for "usbVID,PID". +- reg: the number of the USB hub port or the USB host-controller port to which + this device is attached. The range is 1-255. + + +Required properties for hub nodes with device nodes: +- #address-cells: shall be 1 +- #size-cells: shall be 0 -Example: -&usb1 { +Required properties for host-controller nodes with device nodes: +- #address-cells: shall be 1 +- #size-cells: shall be 0 + + +Example: +&usb1 { /* host controller */ #address-cells = <1>; #size-cells = <0>; - hub: genesys@1 { + hub@1 { /* hub connected to port 1 */ compatible = "usb5e3,608"; reg = <1>; }; -} +}; diff --git a/Documentation/process/5.Posting.rst b/Documentation/process/5.Posting.rst index 1b7728b19ea7..645fa9c7388a 100644 --- a/Documentation/process/5.Posting.rst +++ b/Documentation/process/5.Posting.rst @@ -213,6 +213,11 @@ The tags in common use are: which can be found in Documentation/process/submitting-patches.rst. Code without a proper signoff cannot be merged into the mainline. + - Co-Developed-by: states that the patch was also created by another developer + along with the original author. This is useful at times when multiple + people work on a single patch. Note, this person also needs to have a + Signed-off-by: line in the patch as well. + - Acked-by: indicates an agreement by another developer (often a maintainer of the relevant code) that the patch is appropriate for inclusion into the kernel. diff --git a/Documentation/security/keys/core.rst b/Documentation/security/keys/core.rst index 1266eeae45f6..9ce7256c6edb 100644 --- a/Documentation/security/keys/core.rst +++ b/Documentation/security/keys/core.rst @@ -628,12 +628,12 @@ The keyctl syscall functions are: defined key type will return its data as is. If a key type does not implement this function, error EOPNOTSUPP will result. - As much of the data as can be fitted into the buffer will be copied to - userspace if the buffer pointer is not NULL. - - On a successful return, the function will always return the amount of data - available rather than the amount copied. + If the specified buffer is too small, then the size of the buffer required + will be returned. Note that in this case, the contents of the buffer may + have been overwritten in some undefined way. + Otherwise, on success, the function will return the amount of data copied + into the buffer. * Instantiate a partially constructed key:: diff --git a/Documentation/svga.txt b/Documentation/svga.txt index 119f1515b1ac..b6c2f9acca92 100644 --- a/Documentation/svga.txt +++ b/Documentation/svga.txt @@ -67,8 +67,7 @@ The menu looks like:: <name-of-detected-video-adapter> tells what video adapter did Linux detect -- it's either a generic adapter name (MDA, CGA, HGC, EGA, VGA, VESA VGA [a VGA with VESA-compliant BIOS]) or a chipset name (e.g., Trident). Direct detection -of chipsets is turned off by default (see CONFIG_VIDEO_SVGA in chapter 4 to see -how to enable it if you really want) as it's inherently unreliable due to +of chipsets is turned off by default as it's inherently unreliable due to absolutely insane PC design. "0 0F00 80x25" means that the first menu item (the menu items are numbered @@ -138,7 +137,7 @@ The ID numbers can be divided to those regions:: 0x0f05 VGA 80x30 (480 scans, 16-point font) 0x0f06 VGA 80x34 (480 scans, 14-point font) 0x0f07 VGA 80x60 (480 scans, 8-point font) - 0x0f08 Graphics hack (see the CONFIG_VIDEO_HACK paragraph below) + 0x0f08 Graphics hack (see the VIDEO_GFX_HACK paragraph below) 0x1000 to 0x7fff - modes specified by resolution. The code has a "0xRRCC" form where RR is a number of rows and CC is a number of columns. @@ -160,58 +159,22 @@ end of the display. Options ~~~~~~~ -Some options can be set in the source text (in arch/i386/boot/video.S). -All of them are simple #define's -- change them to #undef's when you want to -switch them off. Currently supported: - -CONFIG_VIDEO_SVGA - enables autodetection of SVGA cards. This is switched -off by default as it's a bit unreliable due to terribly bad PC design. If you -really want to have the adapter autodetected (maybe in case the ``scan`` feature -doesn't work on your machine), switch this on and don't cry if the results -are not completely sane. In case you really need this feature, please drop me -a mail as I think of removing it some day. - -CONFIG_VIDEO_VESA - enables autodetection of VESA modes. If it doesn't work -on your machine (or displays a "Error: Scanning of VESA modes failed" message), -you can switch it off and report as a bug. - -CONFIG_VIDEO_COMPACT - enables compacting of the video mode list. If there -are more modes with the same screen size, only the first one is kept (see above -for more info on mode ordering). However, in very strange cases it's possible -that the first "version" of the mode doesn't work although some of the others -do -- in this case turn this switch off to see the rest. - -CONFIG_VIDEO_RETAIN - enables retaining of screen contents when switching -video modes. Works only with some boot loaders which leave enough room for the -buffer. (If you have old LILO, you can adjust heap_end_ptr and loadflags -in setup.S, but it's better to upgrade the boot loader...) - -CONFIG_VIDEO_LOCAL - enables inclusion of "local modes" in the list. The -local modes are added automatically to the beginning of the list not depending -on hardware configuration. The local modes are listed in the source text after -the "local_mode_table:" line. The comment before this line describes the format -of the table (which also includes a video card name to be displayed on the -top of the menu). - -CONFIG_VIDEO_400_HACK - force setting of 400 scan lines for standard VGA -modes. This option is intended to be used on certain buggy BIOSes which draw -some useless logo using font download and then fail to reset the correct mode. -Don't use unless needed as it forces resetting the video card. - -CONFIG_VIDEO_GFX_HACK - includes special hack for setting of graphics modes -to be used later by special drivers (e.g., 800x600 on IBM ThinkPad -- see -ftp://ftp.phys.keio.ac.jp/pub/XFree86/800x600/XF86Configs/XF86Config.IBM_TP560). +Build options for arch/x86/boot/* are selected by the kernel kconfig +utility and the kernel .config file. + +VIDEO_GFX_HACK - includes special hack for setting of graphics modes +to be used later by special drivers. Allows to set _any_ BIOS mode including graphic ones and forcing specific text screen resolution instead of peeking it from BIOS variables. Don't use unless you think you know what you're doing. To activate this setup, use -mode number 0x0f08 (see section 3). +mode number 0x0f08 (see the Mode IDs section above). Still doesn't work? ~~~~~~~~~~~~~~~~~~~ When the mode detection doesn't work (e.g., the mode list is incorrect or the machine hangs instead of displaying the menu), try to switch off some of -the configuration options listed in section 4. If it fails, you can still use +the configuration options listed under "Options". If it fails, you can still use your kernel with the video mode set directly via the kernel parameter. In either case, please send me a bug report containing what _exactly_ @@ -228,10 +191,6 @@ contains the most common video BIOS bug called "incorrect vertical display end setting". Adding 0x8000 to the mode ID might fix the problem. Unfortunately, this must be done manually -- no autodetection mechanisms are available. -If you have a VGA card and your display still looks as on EGA, your BIOS -is probably broken and you need to set the CONFIG_VIDEO_400_HACK switch to -force setting of the correct mode. - History ~~~~~~~ diff --git a/Documentation/switchtec.txt b/Documentation/switchtec.txt index a0a9c7b3d4d5..f788264921ff 100644 --- a/Documentation/switchtec.txt +++ b/Documentation/switchtec.txt @@ -78,3 +78,15 @@ The following IOCTLs are also supported by the device: between PCI Function Framework number (used by the event system) and Switchtec Logic Port ID and Partition number (which is more user friendly). + + +Non-Transparent Bridge (NTB) Driver +=================================== + +An NTB driver is provided for the switchtec hardware in switchtec_ntb. +Currently, it only supports switches configured with exactly 2 +partitions. It also requires the following configuration settings: + +* Both partitions must be able to access each other's GAS spaces. + Thus, the bits in the GAS Access Vector under Management Settings + must be set to support this. diff --git a/Documentation/translations/ko_KR/memory-barriers.txt b/Documentation/translations/ko_KR/memory-barriers.txt index ec3b46e27b7a..0a0930ab4156 100644 --- a/Documentation/translations/ko_KR/memory-barriers.txt +++ b/Documentation/translations/ko_KR/memory-barriers.txt @@ -82,7 +82,7 @@ Documentation/memory-barriers.txt - SMP 배리어 짝맞추기. - 메모리 배리어 시퀀스의 예. - 읽기 메모리 배리어 vs 로드 예측. - - 이행성 + - Multicopy 원자성. (*) 명시적 커널 배리어. @@ -656,6 +656,11 @@ Documentation/RCU/rcu_dereference.txt 파일을 주의 깊게 읽어 주시기 � 해줍니다. +데이터 의존성에 의해 제공되는 이 순서규칙은 이를 포함하고 있는 CPU 에 +지역적임을 알아두시기 바랍니다. 더 많은 정보를 위해선 "Multicopy 원자성" +섹션을 참고하세요. + + 데이터 의존성 배리어는 매우 중요한데, 예를 들어 RCU 시스템에서 그렇습니다. include/linux/rcupdate.h 의 rcu_assign_pointer() 와 rcu_dereference() 를 참고하세요. 여기서 데이터 의존성 배리어는 RCU 로 관리되는 포인터의 타겟을 현재 @@ -864,38 +869,10 @@ CPU 는 b 로부터의 로드 오퍼레이션이 a 로부터의 로드 오퍼레 주어진 if 문의 then 절과 else 절에게만 (그리고 이 두 절 내에서 호출되는 함수들에게까지) 적용되지, 이 if 문을 뒤따르는 코드에는 적용되지 않습니다. -마지막으로, 컨트롤 의존성은 이행성 (transitivity) 을 제공하지 -않습니다-. 이건 -'x' 와 'y' 가 둘 다 0 이라는 초기값을 가졌다는 가정 하의 두개의 예제로 -보이겠습니다: - - CPU 0 CPU 1 - ======================= ======================= - r1 = READ_ONCE(x); r2 = READ_ONCE(y); - if (r1 > 0) if (r2 > 0) - WRITE_ONCE(y, 1); WRITE_ONCE(x, 1); - - assert(!(r1 == 1 && r2 == 1)); - -이 두 CPU 예제에서 assert() 의 조건은 항상 참일 것입니다. 그리고, 만약 컨트롤 -의존성이 이행성을 (실제로는 그러지 않지만) 보장한다면, 다음의 CPU 가 추가되어도 -아래의 assert() 조건은 참이 될것입니다: - CPU 2 - ===================== - WRITE_ONCE(x, 2); +컨트롤 의존성에 의해 제공되는 이 순서규칙은 이를 포함하고 있는 CPU 에 +지역적입니다. 더 많은 정보를 위해선 "Multicopy 원자성" 섹션을 참고하세요. - assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */ - -하지만 컨트롤 의존성은 이행성을 제공하지 -않기- 때문에, 세개의 CPU 예제가 실행 -완료된 후에 위의 assert() 의 조건은 거짓으로 평가될 수 있습니다. 세개의 CPU -예제가 순서를 지키길 원한다면, CPU 0 와 CPU 1 코드의 로드와 스토어 사이, "if" -문 바로 다음에 smp_mb()를 넣어야 합니다. 더 나아가서, 최초의 두 CPU 예제는 -매우 위험하므로 사용되지 않아야 합니다. - -이 두개의 예제는 다음 논문: -http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와 -이 사이트: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html 에 나온 LB 와 WWC -리트머스 테스트입니다. 요약하자면: @@ -930,8 +907,8 @@ http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와 (*) 컨트롤 의존성은 보통 다른 타입의 배리어들과 짝을 맞춰 사용됩니다. - (*) 컨트롤 의존성은 이행성을 제공하지 -않습니다-. 이행성이 필요하다면, - smp_mb() 를 사용하세요. + (*) 컨트롤 의존성은 multicopy 원자성을 제공하지 -않습니다-. 모든 CPU 들이 + 특정 스토어를 동시에 보길 원한다면, smp_mb() 를 사용하세요. (*) 컴파일러는 컨트롤 의존성을 이해하고 있지 않습니다. 따라서 컴파일러가 여러분의 코드를 망가뜨리지 않도록 하는건 여러분이 해야 하는 일입니다. @@ -943,13 +920,14 @@ SMP 배리어 짝맞추기 CPU 간 상호작용을 다룰 때에 일부 타입의 메모리 배리어는 항상 짝을 맞춰 사용되어야 합니다. 적절하게 짝을 맞추지 않은 코드는 사실상 에러에 가깝습니다. -범용 배리어들은 범용 배리어끼리도 짝을 맞추지만 이행성이 없는 대부분의 다른 -타입의 배리어들과도 짝을 맞춥니다. ACQUIRE 배리어는 RELEASE 배리어와 짝을 -맞춥니다만, 둘 다 범용 배리어를 포함해 다른 배리어들과도 짝을 맞출 수 있습니다. -쓰기 배리어는 데이터 의존성 배리어나 컨트롤 의존성, ACQUIRE 배리어, RELEASE -배리어, 읽기 배리어, 또는 범용 배리어와 짝을 맞춥니다. 비슷하게 읽기 배리어나 -컨트롤 의존성, 또는 데이터 의존성 배리어는 쓰기 배리어나 ACQUIRE 배리어, -RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과 같습니다: +범용 배리어들은 범용 배리어끼리도 짝을 맞추지만 multicopy 원자성이 없는 +대부분의 다른 타입의 배리어들과도 짝을 맞춥니다. ACQUIRE 배리어는 RELEASE +배리어와 짝을 맞춥니다만, 둘 다 범용 배리어를 포함해 다른 배리어들과도 짝을 +맞출 수 있습니다. 쓰기 배리어는 데이터 의존성 배리어나 컨트롤 의존성, ACQUIRE +배리어, RELEASE 배리어, 읽기 배리어, 또는 범용 배리어와 짝을 맞춥니다. +비슷하게 읽기 배리어나 컨트롤 의존성, 또는 데이터 의존성 배리어는 쓰기 배리어나 +ACQUIRE 배리어, RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과 +같습니다: CPU 1 CPU 2 =============== =============== @@ -975,7 +953,7 @@ RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과 같 =============== =============================== r1 = READ_ONCE(y); <범용 배리어> - WRITE_ONCE(y, 1); if (r2 = READ_ONCE(x)) { + WRITE_ONCE(x, 1); if (r2 = READ_ONCE(x)) { <묵시적 컨트롤 의존성> WRITE_ONCE(y, 1); } @@ -1361,57 +1339,74 @@ A 의 로드 두개가 모두 B 의 로드 뒤에 있지만, 서로 다른 값� : : +-------+ -이행성 ------- +MULTICOPY 원자성 +---------------- -이행성(transitivity)은 실제의 컴퓨터 시스템에서 항상 제공되지는 않는, 순서 -맞추기에 대한 상당히 직관적인 개념입니다. 다음의 예가 이행성을 보여줍니다: +Multicopy 원자성은 실제의 컴퓨터 시스템에서 항상 제공되지는 않는, 순서 맞추기에 +대한 상당히 직관적인 개념으로, 특정 스토어가 모든 CPU 들에게 동시에 보여지게 +됨을, 달리 말하자면 모든 CPU 들이 모든 스토어들이 보여지는 순서를 동의하게 되는 +것입니다. 하지만, 완전한 multicopy 원자성의 사용은 가치있는 하드웨어 +최적화들을 무능하게 만들어버릴 수 있어서, 보다 완화된 형태의 ``다른 multicopy +원자성'' 라는 이름의, 특정 스토어가 모든 -다른- CPU 들에게는 동시에 보여지게 +하는 보장을 대신 제공합니다. 이 문서의 뒷부분들은 이 완화된 형태에 대해 논하게 +됩니다만, 단순히 ``multicopy 원자성'' 이라고 부르겠습니다. + +다음의 예가 multicopy 원자성을 보입니다: CPU 1 CPU 2 CPU 3 ======================= ======================= ======================= { X = 0, Y = 0 } - STORE X=1 LOAD X STORE Y=1 - <범용 배리어> <범용 배리어> - LOAD Y LOAD X - -CPU 2 의 X 로드가 1을 리턴했고 Y 로드가 0을 리턴했다고 해봅시다. 이는 CPU 2 의 -X 로드가 CPU 1 의 X 스토어 뒤에 이루어졌고 CPU 2 의 Y 로드는 CPU 3 의 Y 스토어 -전에 이루어졌음을 의미합니다. 그럼 "CPU 3 의 X 로드는 0을 리턴할 수 있나요?" - -CPU 2 의 X 로드는 CPU 1 의 스토어 후에 이루어졌으니, CPU 3 의 X 로드는 1을 -리턴하는게 자연스럽습니다. 이런 생각이 이행성의 한 예입니다: CPU A 에서 실행된 -로드가 CPU B 에서의 같은 변수에 대한 로드를 뒤따른다면, CPU A 의 로드는 CPU B -의 로드가 내놓은 값과 같거나 그 후의 값을 내놓아야 합니다. - -리눅스 커널에서 범용 배리어의 사용은 이행성을 보장합니다. 따라서, 앞의 예에서 -CPU 2 의 X 로드가 1을, Y 로드는 0을 리턴했다면, CPU 3 의 X 로드는 반드시 1을 -리턴합니다. - -하지만, 읽기나 쓰기 배리어에 대해서는 이행성이 보장되지 -않습니다-. 예를 들어, -앞의 예에서 CPU 2 의 범용 배리어가 아래처럼 읽기 배리어로 바뀐 경우를 생각해 -봅시다: + STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1) + <범용 배리어> <읽기 배리어> + STORE Y=r1 LOAD X + +CPU 2 의 Y 로의 스토어에 사용되는 X 로드의 결과가 1 이었고 CPU 3 의 Y 로드가 +1을 리턴했다고 해봅시다. 이는 CPU 1 의 X 로의 스토어가 CPU 2 의 X 로부터의 +로드를 앞서고 CPU 2 의 Y 로의 스토어가 CPU 3 의 Y 로부터의 로드를 앞섬을 +의미합니다. 또한, 여기서의 메모리 배리어들은 CPU 2 가 자신의 로드를 자신의 +스토어 전에 수행하고, CPU 3 가 Y 로부터의 로드를 X 로부터의 로드 전에 수행함을 +보장합니다. 그럼 "CPU 3 의 X 로부터의 로드는 0 을 리턴할 수 있을까요?" + +CPU 3 의 X 로드가 CPU 2 의 로드보다 뒤에 이루어졌으므로, CPU 3 의 X 로부터의 +로드는 1 을 리턴한다고 예상하는게 당연합니다. 이런 예상은 multicopy +원자성으로부터 나옵니다: CPU B 에서 수행된 로드가 CPU A 의 같은 변수로부터의 +로드를 뒤따른다면 (그리고 CPU A 가 자신이 읽은 값으로 먼저 해당 변수에 스토어 +하지 않았다면) multicopy 원자성을 제공하는 시스템에서는, CPU B 의 로드가 CPU A +의 로드와 같은 값 또는 그 나중 값을 리턴해야만 합니다. 하지만, 리눅스 커널은 +시스템들이 multicopy 원자성을 제공할 것을 요구하지 않습니다. + +앞의 범용 메모리 배리어의 사용은 모든 multicopy 원자성의 부족을 보상해줍니다. +앞의 예에서, CPU 2 의 X 로부터의 로드가 1 을 리턴했고 CPU 3 의 Y 로부터의 +로드가 1 을 리턴했다면, CPU 3 의 X 로부터의 로드는 1을 리턴해야만 합니다. + +하지만, 의존성, 읽기 배리어, 쓰기 배리어는 항상 non-multicopy 원자성을 보상해 +주지는 않습니다. 예를 들어, CPU 2 의 범용 배리어가 앞의 예에서 사라져서 +아래처럼 데이터 의존성만 남게 되었다고 해봅시다: CPU 1 CPU 2 CPU 3 ======================= ======================= ======================= { X = 0, Y = 0 } - STORE X=1 LOAD X STORE Y=1 - <읽기 배리어> <범용 배리어> - LOAD Y LOAD X - -이 코드는 이행성을 갖지 않습니다: 이 예에서는, CPU 2 의 X 로드가 1을 -리턴하고, Y 로드는 0을 리턴하지만 CPU 3 의 X 로드가 0을 리턴하는 것도 완전히 -합법적입니다. - -CPU 2 의 읽기 배리어가 자신의 읽기는 순서를 맞춰줘도, CPU 1 의 스토어와의 -순서를 맞춰준다고는 보장할 수 없다는게 핵심입니다. 따라서, CPU 1 과 CPU 2 가 -버퍼나 캐시를 공유하는 시스템에서 이 예제 코드가 실행된다면, CPU 2 는 CPU 1 이 -쓴 값에 좀 빨리 접근할 수 있을 것입니다. 따라서 CPU 1 과 CPU 2 의 접근으로 -조합된 순서를 모든 CPU 가 동의할 수 있도록 하기 위해 범용 배리어가 필요합니다. - -범용 배리어는 "글로벌 이행성"을 제공해서, 모든 CPU 들이 오퍼레이션들의 순서에 -동의하게 할 것입니다. 반면, release-acquire 조합은 "로컬 이행성" 만을 -제공해서, 해당 조합이 사용된 CPU 들만이 해당 액세스들의 조합된 순서에 동의함이 -보장됩니다. 예를 들어, 존경스런 Herman Hollerith 의 C 코드로 보면: + STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1) + <데이터 의존성> <읽기 배리어> + STORE Y=r1 LOAD X (reads 0) + +이 변화는 non-multicopy 원자성이 만연하게 합니다: 이 예에서, CPU 2 의 X +로부터의 로드가 1을 리턴하고, CPU 3 의 Y 로부터의 로드가 1 을 리턴하는데, CPU 3 +의 X 로부터의 로드가 0 을 리턴하는게 완전히 합법적입니다. + +핵심은, CPU 2 의 데이터 의존성이 자신의 로드와 스토어를 순서짓지만, CPU 1 의 +스토어에 대한 순서는 보장하지 않는다는 것입니다. 따라서, 이 예제가 CPU 1 과 +CPU 2 가 스토어 버퍼나 한 수준의 캐시를 공유하는, multicopy 원자성을 제공하지 +않는 시스템에서 수행된다면 CPU 2 는 CPU 1 의 쓰기에 이른 접근을 할 수도 +있습니다. 따라서, 모든 CPU 들이 여러 접근들의 조합된 순서에 대해서 동의하게 +하기 위해서는 범용 배리어가 필요합니다. + +범용 배리어는 non-multicopy 원자성만 보상할 수 있는게 아니라, -모든- CPU 들이 +-모든- 오퍼레이션들의 순서를 동일하게 인식하게 하는 추가적인 순서 보장을 +만들어냅니다. 반대로, release-acquire 짝의 연결은 이런 추가적인 순서는 +제공하지 않는데, 해당 연결에 들어있는 CPU 들만이 메모리 접근의 조합된 순서에 +대해 동의할 것으로 보장됨을 의미합니다. 예를 들어, 존경스런 Herman Hollerith +의 코드를 C 코드로 변환하면: int u, v, x, y, z; @@ -1444,8 +1439,7 @@ CPU 2 의 읽기 배리어가 자신의 읽기는 순서를 맞춰줘도, CPU 1 } cpu0(), cpu1(), 그리고 cpu2() 는 smp_store_release()/smp_load_acquire() 쌍의 -연결을 통한 로컬 이행성에 동참하고 있으므로, 다음과 같은 결과는 나오지 않을 -겁니다: +연결에 참여되어 있으므로, 다음과 같은 결과는 나오지 않을 겁니다: r0 == 1 && r1 == 1 && r2 == 1 @@ -1454,8 +1448,9 @@ cpu0() 의 쓰기를 봐야만 하므로, 다음과 같은 결과도 없을 겁� r1 == 1 && r5 == 0 -하지만, release-acquire 타동성은 동참한 CPU 들에만 적용되므로 cpu3() 에는 -적용되지 않습니다. 따라서, 다음과 같은 결과가 가능합니다: +하지만, release-acquire 에 의해 제공되는 순서는 해당 연결에 동참한 CPU 들에만 +적용되므로 cpu3() 에, 적어도 스토어들 외에는 적용되지 않습니다. 따라서, 다음과 +같은 결과가 가능합니다: r0 == 0 && r1 == 1 && r2 == 1 && r3 == 0 && r4 == 0 @@ -1482,8 +1477,8 @@ u 로의 스토어를 cpu1() 의 v 로부터의 로드 뒤에 일어난 것으� 이런 결과는 어떤 것도 재배치 되지 않는, 순차적 일관성을 가진 가상의 시스템에서도 일어날 수 있음을 기억해 두시기 바랍니다. -다시 말하지만, 당신의 코드가 글로벌 이행성을 필요로 한다면, 범용 배리어를 -사용하십시오. +다시 말하지만, 당신의 코드가 모든 오퍼레이션들의 완전한 순서를 필요로 한다면, +범용 배리어를 사용하십시오. ================== @@ -3046,6 +3041,9 @@ AMD64 Architecture Programmer's Manual Volume 2: System Programming Chapter 7.1: Memory-Access Ordering Chapter 7.4: Buffering and Combining Memory Writes +ARM Architecture Reference Manual (ARMv8, for ARMv8-A architecture profile) + Chapter B2: The AArch64 Application Level Memory Model + IA-32 Intel Architecture Software Developer's Manual, Volume 3: System Programming Guide Chapter 7.1: Locked Atomic Operations @@ -3057,6 +3055,8 @@ The SPARC Architecture Manual, Version 9 Appendix D: Formal Specification of the Memory Models Appendix J: Programming with the Memory Models +Storage in the PowerPC (Stone and Fitzgerald) + UltraSPARC Programmer Reference Manual Chapter 5: Memory Accesses and Cacheability Chapter 15: Sparc-V9 Memory Models diff --git a/MAINTAINERS b/MAINTAINERS index 16137acd7f2f..aa71ab52fd76 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -384,6 +384,7 @@ ACPI WMI DRIVER S: Orphan F: drivers/platform/x86/wmi.c +F: include/uapi/linux/wmi.h AD1889 ALSA SOUND DRIVER M: Thibaut Varene <[email protected]> @@ -1589,10 +1590,13 @@ F: drivers/rtc/rtc-armada38x.c ARM/Mediatek RTC DRIVER M: Eddie Huang <[email protected]> +M: Sean Wang <[email protected]> L: [email protected] (moderated for non-subscribers) L: [email protected] (moderated for non-subscribers) S: Maintained +F: Documentation/devicetree/bindings/rtc/rtc-mt7622.txt F: drivers/rtc/rtc-mt6397.c +F: drivers/rtc/rtc-mt7622.c ARM/Mediatek SoC support M: Matthias Brugger <[email protected]> @@ -4030,6 +4034,26 @@ M: "Maciej W. Rozycki" <[email protected]> S: Maintained F: drivers/net/fddi/defxx.* +DELL SMBIOS DRIVER +M: Pali Rohár <[email protected]> +M: Mario Limonciello <[email protected]> +S: Maintained +F: drivers/platform/x86/dell-smbios.* + +DELL SMBIOS SMM DRIVER +M: Mario Limonciello <[email protected]> +S: Maintained +F: drivers/platform/x86/dell-smbios-smm.c + +DELL SMBIOS WMI DRIVER +M: Mario Limonciello <[email protected]> +S: Maintained +F: drivers/platform/x86/dell-smbios-wmi.c +F: tools/wmi/dell-smbios-example.c + DELL LAPTOP DRIVER M: Matthew Garrett <[email protected]> M: Pali Rohár <[email protected]> @@ -4059,12 +4083,17 @@ S: Maintained F: Documentation/dcdbas.txt F: drivers/firmware/dcdbas.* -DELL WMI EXTRAS DRIVER +DELL WMI NOTIFICATIONS DRIVER M: Matthew Garrett <[email protected]> M: Pali Rohár <[email protected]> S: Maintained F: drivers/platform/x86/dell-wmi.c +DELL WMI DESCRIPTOR DRIVER +M: Mario Limonciello <[email protected]> +S: Maintained +F: drivers/platform/x86/dell-wmi-descriptor.c + DELTA ST MEDIA DRIVER M: Hugues Fruchet <[email protected]> @@ -7181,6 +7210,11 @@ F: Documentation/wimax/README.i2400m F: drivers/net/wimax/i2400m/ F: include/uapi/linux/wimax/i2400m.h +INTEL WMI THUNDERBOLT FORCE POWER DRIVER +M: Mario Limonciello <[email protected]> +S: Maintained +F: drivers/platform/x86/intel-wmi-thunderbolt.c + INTEL(R) TRACE HUB M: Alexander Shishkin <[email protected]> S: Supported @@ -7442,7 +7476,7 @@ JFS FILESYSTEM M: Dave Kleikamp <[email protected]> W: http://jfs.sourceforge.net/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git +T: git git://github.com/kleikamp/linux-shaggy.git S: Maintained F: Documentation/filesystems/jfs.txt F: fs/jfs/ @@ -9695,12 +9729,11 @@ S: Supported F: drivers/ntb/hw/idt/ NTB INTEL DRIVER -M: Jon Mason <[email protected]> M: Dave Jiang <[email protected]> S: Supported -W: https://github.com/jonmason/ntb/wiki -T: git git://github.com/jonmason/ntb.git +W: https://github.com/davejiang/linux/wiki +T: git https://github.com/davejiang/linux.git F: drivers/ntb/hw/intel/ NTFS FILESYSTEM @@ -10412,6 +10445,8 @@ F: Documentation/switchtec.txt F: Documentation/ABI/testing/sysfs-class-switchtec F: drivers/pci/switch/switchtec* F: include/uapi/linux/switchtec_ioctl.h +F: include/linux/switchtec.h +F: drivers/ntb/hw/mscc/ PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) M: Thomas Petazzoni <[email protected]> @@ -10630,6 +10665,12 @@ S: Maintained F: crypto/pcrypt.c F: include/crypto/pcrypt.h +PEAQ WMI HOTKEYS DRIVER +M: Hans de Goede <[email protected]> +S: Maintained +F: drivers/platform/x86/peaq-wmi.c + PER-CPU MEMORY ALLOCATOR M: Tejun Heo <[email protected]> M: Christoph Lameter <[email protected]> diff --git a/arch/arm/mach-pxa/cm-x255.c b/arch/arm/mach-pxa/cm-x255.c index b592f79a1742..fa8e7dd4d898 100644 --- a/arch/arm/mach-pxa/cm-x255.c +++ b/arch/arm/mach-pxa/cm-x255.c @@ -14,7 +14,7 @@ #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/mtd/nand-gpio.h> - +#include <linux/gpio/machine.h> #include <linux/spi/spi.h> #include <linux/spi/pxa2xx_spi.h> @@ -176,6 +176,17 @@ static inline void cmx255_init_nor(void) {} #endif #if defined(CONFIG_MTD_NAND_GPIO) || defined(CONFIG_MTD_NAND_GPIO_MODULE) + +static struct gpiod_lookup_table cmx255_nand_gpiod_table = { + .dev_id = "gpio-nand", + .table = { + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CS, "nce", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_CLE, "cle", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_ALE, "ale", GPIO_ACTIVE_HIGH), + GPIO_LOOKUP("gpio-pxa", GPIO_NAND_RB, "rdy", GPIO_ACTIVE_HIGH), + }, +}; + static struct resource cmx255_nand_resource[] = { [0] = { .start = PXA_CS1_PHYS, @@ -198,11 +209,6 @@ static struct mtd_partition cmx255_nand_parts[] = { }; static struct gpio_nand_platdata cmx255_nand_platdata = { - .gpio_nce = GPIO_NAND_CS, - .gpio_cle = GPIO_NAND_CLE, - .gpio_ale = GPIO_NAND_ALE, - .gpio_rdy = GPIO_NAND_RB, - .gpio_nwp = -1, .parts = cmx255_nand_parts, .num_parts = ARRAY_SIZE(cmx255_nand_parts), .chip_delay = 25, @@ -220,6 +226,7 @@ static struct platform_device cmx255_nand = { static void __init cmx255_init_nand(void) { + gpiod_add_lookup_table(&cmx255_nand_gpiod_table); platform_device_register(&cmx255_nand); } #else diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index d9280482a2f8..c68add8df3ae 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -10,7 +10,6 @@ config UML select HAVE_DEBUG_KMEMLEAK select GENERIC_IRQ_SHOW select GENERIC_CPU_DEVICES - select GENERIC_IO select GENERIC_CLOCKEVENTS select HAVE_GCC_PLUGINS select TTY # Needed for line.c diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c index 1063b644efcd..e284d9cb9237 100644 --- a/crypto/asymmetric_keys/pkcs7_key_type.c +++ b/crypto/asymmetric_keys/pkcs7_key_type.c @@ -19,6 +19,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("PKCS#7 testing key type"); +MODULE_AUTHOR("Red Hat, Inc."); static unsigned pkcs7_usage; module_param_named(usage, pkcs7_usage, uint, S_IWUSR | S_IRUGO); diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index d140d8bb2c96..c1ca1e86f5c4 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c @@ -11,6 +11,7 @@ #define pr_fmt(fmt) "PKCS7: "fmt #include <linux/kernel.h> +#include <linux/module.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> @@ -19,6 +20,10 @@ #include "pkcs7_parser.h" #include "pkcs7-asn1.h" +MODULE_DESCRIPTION("PKCS#7 parser"); +MODULE_AUTHOR("Red Hat, Inc."); +MODULE_LICENSE("GPL"); + struct pkcs7_parse_context { struct pkcs7_message *msg; /* Message being constructed */ struct pkcs7_signed_info *sinfo; /* SignedInfo being constructed */ diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index d916235d6cf5..bc3035ef27a2 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c @@ -22,6 +22,8 @@ #include <crypto/public_key.h> #include <crypto/akcipher.h> +MODULE_DESCRIPTION("In-software asymmetric public-key subtype"); +MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); /* diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c index eea71dc9686c..c9013582c026 100644 --- a/crypto/asymmetric_keys/x509_public_key.c +++ b/crypto/asymmetric_keys/x509_public_key.c @@ -275,4 +275,5 @@ module_init(x509_key_init); module_exit(x509_key_exit); MODULE_DESCRIPTION("X.509 certificate parser"); +MODULE_AUTHOR("Red Hat, Inc."); MODULE_LICENSE("GPL"); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index adc877dfef5c..38fc5f397fde 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -348,7 +348,6 @@ struct rbd_client_id { struct rbd_mapping { u64 size; u64 features; - bool read_only; }; /* @@ -450,12 +449,11 @@ static DEFINE_IDA(rbd_dev_id_ida); static struct workqueue_struct *rbd_wq; /* - * Default to false for now, as single-major requires >= 0.75 version of - * userspace rbd utility. + * single-major requires >= 0.75 version of userspace rbd utility. */ -static bool single_major = false; +static bool single_major = true; module_param(single_major, bool, S_IRUGO); -MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)"); +MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)"); static int rbd_img_request_submit(struct rbd_img_request *img_request); @@ -608,9 +606,6 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) struct rbd_device *rbd_dev = bdev->bd_disk->private_data; bool removing = false; - if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only) - return -EROFS; - spin_lock_irq(&rbd_dev->lock); if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) removing = true; @@ -640,46 +635,24 @@ static void rbd_release(struct gendisk *disk, fmode_t mode) static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg) { - int ret = 0; - int val; - bool ro; - bool ro_changed = false; + int ro; - /* get_user() may sleep, so call it before taking rbd_dev->lock */ - if (get_user(val, (int __user *)(arg))) + if (get_user(ro, (int __user *)arg)) return -EFAULT; - ro = val ? true : false; - /* Snapshot doesn't allow to write*/ + /* Snapshots can't be marked read-write */ if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro) return -EROFS; - spin_lock_irq(&rbd_dev->lock); - /* prevent others open this device */ - if (rbd_dev->open_count > 1) { - ret = -EBUSY; - goto out; - } - - if (rbd_dev->mapping.read_only != ro) { - rbd_dev->mapping.read_only = ro; - ro_changed = true; - } - -out: - spin_unlock_irq(&rbd_dev->lock); - /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */ - if (ret == 0 && ro_changed) - set_disk_ro(rbd_dev->disk, ro ? 1 : 0); - - return ret; + /* Let blkdev_roset() handle it */ + return -ENOTTY; } static int rbd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct rbd_device *rbd_dev = bdev->bd_disk->private_data; - int ret = 0; + int ret; switch (cmd) { case BLKROSET: @@ -4050,15 +4023,8 @@ static void rbd_queue_workfn(struct work_struct *work) goto err_rq; } - /* Only reads are allowed to a read-only device */ - - if (op_type != OBJ_OP_READ) { - if (rbd_dev->mapping.read_only) { - result = -EROFS; - goto err_rq; - } - rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP); - } + rbd_assert(op_type == OBJ_OP_READ || + rbd_dev->spec->snap_id == CEPH_NOSNAP); /* * Quit early if the mapped snapshot no longer exists. It's @@ -4423,7 +4389,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) /* enable the discard support */ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); q->limits.discard_granularity = segment_size; - q->limits.discard_alignment = segment_size; blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); @@ -5994,7 +5959,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) goto err_out_disk; set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); - set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); + set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only); ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id); if (ret) @@ -6145,7 +6110,6 @@ static ssize_t do_rbd_add(struct bus_type *bus, struct rbd_options *rbd_opts = NULL; struct rbd_spec *spec = NULL; struct rbd_client *rbdc; - bool read_only; int rc; if (!try_module_get(THIS_MODULE)) @@ -6194,11 +6158,8 @@ static ssize_t do_rbd_add(struct bus_type *bus, } /* If we are mapping a snapshot it must be marked read-only */ - - read_only = rbd_dev->opts->read_only; if (rbd_dev->spec->snap_id != CEPH_NOSNAP) - read_only = true; - rbd_dev->mapping.read_only = read_only; + rbd_dev->opts->read_only = true; rc = rbd_dev_device_setup(rbd_dev); if (rc) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index c21adf60a7f2..057e1ecd83ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -59,12 +59,6 @@ static bool check_atom_bios(uint8_t *bios, size_t size) return false; } - tmp = bios[0x18] | (bios[0x19] << 8); - if (bios[tmp + 0x14] != 0x0) { - DRM_INFO("Not an x86 BIOS ROM\n"); - return false; - } - bios_header_start = bios[0x48] | (bios[0x49] << 8); if (!bios_header_start) { DRM_INFO("Can't locate bios header\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 6c78623e1386..a57cec737c18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1495,8 +1495,11 @@ out: memset(wait, 0, sizeof(*wait)); wait->out.status = (r > 0); wait->out.first_signaled = first; - /* set return value 0 to indicate success */ - r = array[first]->error; + + if (first < fence_count && array[first]) + r = array[first]->error; + else + r = 0; err_free_fence_array: for (i = 0; i < fence_count; i++) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 2d792cdc094c..2c574374d9b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1837,6 +1837,9 @@ static int amdgpu_fini(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = false; } + if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) + amdgpu_ucode_fini_bo(adev); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.sw) continue; @@ -3261,9 +3264,9 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, pm_pg_lock = (*pos >> 23) & 1; if (*pos & (1ULL << 62)) { - se_bank = (*pos >> 24) & 0x3FF; - sh_bank = (*pos >> 34) & 0x3FF; - instance_bank = (*pos >> 44) & 0x3FF; + se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; + sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; + instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; if (se_bank == 0x3FF) se_bank = 0xFFFFFFFF; @@ -3337,9 +3340,9 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, pm_pg_lock = (*pos >> 23) & 1; if (*pos & (1ULL << 62)) { - se_bank = (*pos >> 24) & 0x3FF; - sh_bank = (*pos >> 34) & 0x3FF; - instance_bank = (*pos >> 44) & 0x3FF; + se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24; + sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34; + instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44; if (se_bank == 0x3FF) se_bank = 0xFFFFFFFF; @@ -3687,12 +3690,12 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf, return -EINVAL; /* decode offset */ - offset = (*pos & 0x7F); - se = ((*pos >> 7) & 0xFF); - sh = ((*pos >> 15) & 0xFF); - cu = ((*pos >> 23) & 0xFF); - wave = ((*pos >> 31) & 0xFF); - simd = ((*pos >> 37) & 0xFF); + offset = (*pos & GENMASK_ULL(6, 0)); + se = (*pos & GENMASK_ULL(14, 7)) >> 7; + sh = (*pos & GENMASK_ULL(22, 15)) >> 15; + cu = (*pos & GENMASK_ULL(30, 23)) >> 23; + wave = (*pos & GENMASK_ULL(36, 31)) >> 31; + simd = (*pos & GENMASK_ULL(44, 37)) >> 37; /* switch to the specific se/sh/cu */ mutex_lock(&adev->grbm_idx_mutex); @@ -3737,14 +3740,14 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf, return -EINVAL; /* decode offset */ - offset = (*pos & 0xFFF); /* in dwords */ - se = ((*pos >> 12) & 0xFF); - sh = ((*pos >> 20) & 0xFF); - cu = ((*pos >> 28) & 0xFF); - wave = ((*pos >> 36) & 0xFF); - simd = ((*pos >> 44) & 0xFF); - thread = ((*pos >> 52) & 0xFF); - bank = ((*pos >> 60) & 1); + offset = *pos & GENMASK_ULL(11, 0); + se = (*pos & GENMASK_ULL(19, 12)) >> 12; + sh = (*pos & GENMASK_ULL(27, 20)) >> 20; + cu = (*pos & GENMASK_ULL(35, 28)) >> 28; + wave = (*pos & GENMASK_ULL(43, 36)) >> 36; + simd = (*pos & GENMASK_ULL(51, 44)) >> 44; + thread = (*pos & GENMASK_ULL(59, 52)) >> 52; + bank = (*pos & GENMASK_ULL(61, 60)) >> 60; data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL); if (!data) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index a418df1b9422..e87eedcc0da9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -63,6 +63,11 @@ retry: flags, NULL, resv, 0, &bo); if (r) { if (r != -ERESTARTSYS) { + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + goto retry; + } + if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { initial_domain |= AMDGPU_GEM_DOMAIN_GTT; goto retry; @@ -323,7 +328,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages); if (r) - goto unlock_mmap_sem; + goto release_object; r = amdgpu_bo_reserve(bo, true); if (r) @@ -348,9 +353,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, free_pages: release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages); -unlock_mmap_sem: - up_read(¤t->mm->mmap_sem); - release_object: drm_gem_object_put_unlocked(gobj); @@ -556,9 +558,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { dev_err(&dev->pdev->dev, - "va_address 0x%lX is in reserved area 0x%X\n", - (unsigned long)args->va_address, - AMDGPU_VA_RESERVED_SIZE); + "va_address 0x%LX is in reserved area 0x%LX\n", + args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 33535d347734..00e0ce10862f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -71,12 +71,6 @@ static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man) { struct amdgpu_gtt_mgr *mgr = man->priv; - spin_lock(&mgr->lock); - if (!drm_mm_clean(&mgr->mm)) { - spin_unlock(&mgr->lock); - return -EBUSY; - } - drm_mm_takedown(&mgr->mm); spin_unlock(&mgr->lock); kfree(mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index d6df5728df7f..6c570d4e4516 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -946,6 +946,10 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; + /* no skipping for powerplay */ + if (adev->powerplay.cgs_device) + return effective_mode; + /* Skip limit attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 5f5aa5fddc16..033fba2def6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -164,9 +164,6 @@ static int amdgpu_pp_hw_fini(void *handle) ret = adev->powerplay.ip_funcs->hw_fini( adev->powerplay.pp_handle); - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) - amdgpu_ucode_fini_bo(adev); - return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 90af8e82b16a..ae9c106979d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c @@ -169,10 +169,14 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, int flags) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + struct dma_buf *buf; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) return ERR_PTR(-EPERM); - return drm_gem_prime_export(dev, gobj, flags); + buf = drm_gem_prime_export(dev, gobj, flags); + if (!IS_ERR(buf)) + buf->file->f_mapping = dev->anon_inode->i_mapping; + return buf; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 447d446b5015..7714f4a6c8b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -442,8 +442,6 @@ static int psp_hw_fini(void *handle) if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) return 0; - amdgpu_ucode_fini_bo(adev); - psp_ring_destroy(psp, PSP_RING_TYPE__KM); amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index aa914256b4bc..bae77353447b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -94,7 +94,8 @@ struct amdgpu_bo_list_entry; #define AMDGPU_MMHUB 1 /* hardcode that limit for now */ -#define AMDGPU_VA_RESERVED_SIZE (8 << 20) +#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20) + /* max vmids dedicated for process */ #define AMDGPU_VM_MAX_RESERVED_VMID 1 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 26e900627971..4acca92f6a52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -68,11 +68,6 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) struct amdgpu_vram_mgr *mgr = man->priv; spin_lock(&mgr->lock); - if (!drm_mm_clean(&mgr->mm)) { - spin_unlock(&mgr->lock); - return -EBUSY; - } - drm_mm_takedown(&mgr->mm); spin_unlock(&mgr->lock); kfree(mgr); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 00868764a0dd..5c8a7a48a4ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4670,6 +4670,14 @@ static int gfx_v7_0_sw_fini(void *handle) gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + if (adev->gfx.rlc.cp_table_size) { + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); + } gfx_v7_0_free_microcode(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index b8002ac3e536..9ecdf621a74a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2118,6 +2118,15 @@ static int gfx_v8_0_sw_fini(void *handle) gfx_v8_0_mec_fini(adev); gfx_v8_0_rlc_fini(adev); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); + } gfx_v8_0_free_microcode(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 7f15bb2c5233..da43813d67a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -207,6 +207,12 @@ static const u32 golden_settings_gc_9_1_rv1[] = SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800 }; +static const u32 golden_settings_gc_9_x_common[] = +{ + SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000, + SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382 +}; + #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 @@ -242,6 +248,9 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev) default: break; } + + amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common, + (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common)); } static void gfx_v9_0_scratch_init(struct amdgpu_device *adev) @@ -988,12 +997,22 @@ static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, start + SQIND_WAVE_SGPRS_OFFSET, size, dst); } +static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd, + uint32_t wave, uint32_t thread, + uint32_t start, uint32_t size, + uint32_t *dst) +{ + wave_read_regs( + adev, simd, wave, thread, + start + SQIND_WAVE_VGPRS_OFFSET, size, dst); +} static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, .select_se_sh = &gfx_v9_0_select_se_sh, .read_wave_data = &gfx_v9_0_read_wave_data, .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, + .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, }; static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -1449,6 +1468,14 @@ static int gfx_v9_0_sw_fini(void *handle) gfx_v9_0_mec_fini(adev); gfx_v9_0_ngg_fini(adev); + amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj, + &adev->gfx.rlc.clear_state_gpu_addr, + (void **)&adev->gfx.rlc.cs_ptr); + if (adev->asic_type == CHIP_RAVEN) { + amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, + &adev->gfx.rlc.cp_table_gpu_addr, + (void **)&adev->gfx.rlc.cp_table_ptr); + } gfx_v9_0_free_microcode(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 621699331e09..c8f1aebeac7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -392,7 +392,16 @@ static int gmc_v9_0_early_init(void *handle) static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 }; + /* + * The latest engine allocation on gfx9 is: + * Engine 0, 1: idle + * Engine 2, 3: firmware + * Engine 4~13: amdgpu ring, subject to change when ring number changes + * Engine 14~15: idle + * Engine 16: kfd tlb invalidation + * Engine 17: Gart flushes + */ + unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; unsigned i; for(i = 0; i < adev->num_rings; ++i) { @@ -405,9 +414,9 @@ static int gmc_v9_0_late_init(void *handle) ring->funcs->vmhub); } - /* Engine 17 is used for GART flushes */ + /* Engine 16 is used for KFD and 17 for GART flushes */ for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) - BUG_ON(vm_inv_eng[i] > 17); + BUG_ON(vm_inv_eng[i] > 16); return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index a129bc5b1844..c6febbf0bf69 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1486,7 +1486,7 @@ int atomctrl_get_leakage_vddc_base_on_leakage(struct pp_hwmgr *hwmgr, if (vddci_id_buf[i] == virtual_voltage_id) { for (j = 0; j < profile->ucLeakageBinNum; j++) { if (efuse_voltage_id <= leakage_bin[j]) { - *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i]; + *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i]; break; } } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c index d1af1483c69b..a651ebcf44fd 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/process_pptables_v1_0.c @@ -830,9 +830,9 @@ static int init_over_drive_limits( const ATOM_Tonga_POWERPLAYTABLE *powerplay_table) { hwmgr->platform_descriptor.overdriveLimit.engineClock = - le16_to_cpu(powerplay_table->ulMaxODEngineClock); + le32_to_cpu(powerplay_table->ulMaxODEngineClock); hwmgr->platform_descriptor.overdriveLimit.memoryClock = - le16_to_cpu(powerplay_table->ulMaxODMemoryClock); + le32_to_cpu(powerplay_table->ulMaxODMemoryClock); hwmgr->platform_descriptor.minOverdriveVDDC = 0; hwmgr->platform_descriptor.maxOverdriveVDDC = 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 4466469cf8ab..e33ec7fc5d09 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -3778,7 +3778,7 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr, - PPSMC_MSG_SCLKDPM_UnfreezeLevel), + PPSMC_MSG_MCLKDPM_UnfreezeLevel), "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", return -EINVAL); } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 4f79c21f27ed..f8d838c2c8ee 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -753,6 +753,7 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) uint32_t config_telemetry = 0; struct pp_atomfwctrl_voltage_table vol_table; struct cgs_system_info sys_info = {0}; + uint32_t reg; data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL); if (data == NULL) @@ -859,6 +860,16 @@ static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) advanceFanControlParameters.usFanPWMMinLimit * hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100; + reg = soc15_get_register_offset(DF_HWID, 0, + mmDF_CS_AON0_DramBaseAddress0_BASE_IDX, + mmDF_CS_AON0_DramBaseAddress0); + data->mem_channels = (cgs_read_register(hwmgr->device, reg) & + DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> + DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; + PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number), + "Mem Channel Index Exceeded maximum!", + return -EINVAL); + return result; } @@ -1777,7 +1788,7 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); int result = 0; - uint32_t i, j, reg, mem_channels; + uint32_t i, j; for (i = 0; i < dpm_table->count; i++) { result = vega10_populate_single_memory_level(hwmgr, @@ -1801,20 +1812,10 @@ static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) i++; } - reg = soc15_get_register_offset(DF_HWID, 0, - mmDF_CS_AON0_DramBaseAddress0_BASE_IDX, - mmDF_CS_AON0_DramBaseAddress0); - mem_channels = (cgs_read_register(hwmgr->device, reg) & - DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >> - DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT; - PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number), - "Mem Channel Index Exceeded maximum!", - return -1); - - pp_table->NumMemoryChannels = cpu_to_le16(mem_channels); + pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels); pp_table->MemoryChannelWidth = - cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH * - channel_number[mem_channels]); + (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH * + channel_number[data->mem_channels]); pp_table->LowestUclkReservedForUlv = (uint8_t)(data->lowest_uclk_reserved_for_ulv); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h index b4b461c3b8ee..8f7358cc3327 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h @@ -389,6 +389,7 @@ struct vega10_hwmgr { uint32_t config_telemetry; uint32_t smu_version; uint32_t acg_loop_state; + uint32_t mem_channels; }; #define VEGA10_DPM2_NEAR_TDP_DEC 10 diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 704fc8934616..25f4b2e9a44f 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -234,6 +234,10 @@ int drm_connector_init(struct drm_device *dev, config->link_status_property, 0); + drm_object_attach_property(&connector->base, + config->non_desktop_property, + 0); + if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { drm_object_attach_property(&connector->base, config->prop_crtc_id, 0); } @@ -763,6 +767,10 @@ DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, * value of link-status is "GOOD". If something fails during or after modeset, * the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers * should update this value using drm_mode_connector_set_link_status_property(). + * non_desktop: + * Indicates the output should be ignored for purposes of displaying a + * standard desktop environment or console. This is most likely because + * the output device is not rectilinear. * * Connectors also have one standardized atomic property: * @@ -811,6 +819,11 @@ int drm_connector_create_standard_properties(struct drm_device *dev) return -ENOMEM; dev->mode_config.link_status_property = prop; + prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop"); + if (!prop) + return -ENOMEM; + dev->mode_config.non_desktop_property = prop; + return 0; } @@ -1194,6 +1207,10 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, if (edid) size = EDID_LENGTH * (1 + edid->extensions); + drm_object_property_set_value(&connector->base, + dev->mode_config.non_desktop_property, + connector->display_info.non_desktop); + ret = drm_property_replace_global_blob(dev, &connector->edid_blob_ptr, size, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 00ddabfbf980..2e8fb51282ef 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -82,6 +82,8 @@ #define EDID_QUIRK_FORCE_6BPC (1 << 10) /* Force 10bpc */ #define EDID_QUIRK_FORCE_10BPC (1 << 11) +/* Non desktop display (i.e. HMD) */ +#define EDID_QUIRK_NON_DESKTOP (1 << 12) struct detailed_mode_closure { struct drm_connector *connector; @@ -157,6 +159,9 @@ static const struct edid_quirk { /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/ { "ETR", 13896, EDID_QUIRK_FORCE_8BPC }, + + /* HTC Vive VR Headset */ + { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP }, }; /* @@ -4393,7 +4398,7 @@ static void drm_parse_cea_ext(struct drm_connector *connector, } static void drm_add_display_info(struct drm_connector *connector, - struct edid *edid) + struct edid *edid, u32 quirks) { struct drm_display_info *info = &connector->display_info; @@ -4407,6 +4412,8 @@ static void drm_add_display_info(struct drm_connector *connector, info->max_tmds_clock = 0; info->dvi_dual = false; + info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); + if (edid->revision < 3) return; @@ -4627,7 +4634,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) * To avoid multiple parsing of same block, lets parse that map * from sink info, before parsing CEA modes. */ - drm_add_display_info(connector, edid); + drm_add_display_info(connector, edid, quirks); /* * EDID spec says modes should be preferred in this order: diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 116d1f1337c7..07374008f146 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2033,6 +2033,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict) { bool enable; + if (connector->display_info.non_desktop) + return false; + if (strict) enable = connector->status == connector_status_connected; else @@ -2052,7 +2055,8 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper, connector = fb_helper->connector_info[i]->connector; enabled[i] = drm_connector_enabled(connector, true); DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, - enabled[i] ? "yes" : "no"); + connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no"); + any_enabled |= enabled[i]; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 58e9e0601a61..faf17b83b910 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -210,7 +210,6 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) return PTR_ERR(fsl_dev->state); } - clk_disable_unprepare(fsl_dev->pix_clk); clk_disable_unprepare(fsl_dev->clk); return 0; @@ -233,6 +232,7 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) if (fsl_dev->tcon) fsl_tcon_bypass_enable(fsl_dev->tcon); fsl_dcu_drm_init_planes(fsl_dev->drm); + enable_irq(fsl_dev->irq); drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); console_lock(); @@ -240,7 +240,6 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) console_unlock(); drm_kms_helper_poll_enable(fsl_dev->drm); - enable_irq(fsl_dev->irq); return 0; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index edd7d8127d19..c54806d08dd7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -102,7 +102,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, { struct drm_encoder *encoder = &fsl_dev->encoder; struct drm_connector *connector = &fsl_dev->connector.base; - struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config; int ret; fsl_dev->connector.encoder = encoder; @@ -122,10 +121,6 @@ static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev, if (ret < 0) goto err_sysfs; - drm_object_property_set_value(&connector->base, - mode_config->dpms_property, - DRM_MODE_DPMS_OFF); - ret = drm_panel_attach(panel, connector); if (ret) { dev_err(fsl_dev->dev, "failed to attach panel\n"); diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 53e0b24beda6..9a9961802f5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -115,7 +115,7 @@ static void imx_drm_crtc_reset(struct drm_crtc *crtc) if (crtc->state) { if (crtc->state->mode_blob) - drm_property_unreference_blob(crtc->state->mode_blob); + drm_property_blob_put(crtc->state->mode_blob); state = to_imx_crtc_state(crtc->state); memset(state, 0, sizeof(*state)); diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 8def97d75030..aedecda9728a 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -183,7 +183,7 @@ static int imx_pd_register(struct drm_device *drm, &imx_pd_connector_helper_funcs); drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + DRM_MODE_CONNECTOR_DPI); } if (imxpd->panel) diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 2fcf805d3a16..33b821d6d018 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -245,7 +245,6 @@ static int radeonfb_create(struct drm_fb_helper *helper, } info->par = rfbdev; - info->skip_vt_switch = true; ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); if (ret) { diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index 4bcacd3f4861..b0a1dedac802 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -174,9 +174,9 @@ struct tegra_sor { struct reset_control *rst; struct clk *clk_parent; - struct clk *clk_brick; struct clk *clk_safe; - struct clk *clk_src; + struct clk *clk_out; + struct clk *clk_pad; struct clk *clk_dp; struct clk *clk; @@ -255,7 +255,7 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent) clk_disable_unprepare(sor->clk); - err = clk_set_parent(sor->clk, parent); + err = clk_set_parent(sor->clk_out, parent); if (err < 0) return err; @@ -266,24 +266,24 @@ static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent) return 0; } -struct tegra_clk_sor_brick { +struct tegra_clk_sor_pad { struct clk_hw hw; struct tegra_sor *sor; }; -static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw) +static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw) { - return container_of(hw, struct tegra_clk_sor_brick, hw); + return container_of(hw, struct tegra_clk_sor_pad, hw); } -static const char * const tegra_clk_sor_brick_parents[] = { +static const char * const tegra_clk_sor_pad_parents[] = { "pll_d2_out0", "pll_dp" }; -static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index) +static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index) { - struct tegra_clk_sor_brick *brick = to_brick(hw); - struct tegra_sor *sor = brick->sor; + struct tegra_clk_sor_pad *pad = to_pad(hw); + struct tegra_sor *sor = pad->sor; u32 value; value = tegra_sor_readl(sor, SOR_CLK_CNTRL); @@ -304,10 +304,10 @@ static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index) return 0; } -static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw) +static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw) { - struct tegra_clk_sor_brick *brick = to_brick(hw); - struct tegra_sor *sor = brick->sor; + struct tegra_clk_sor_pad *pad = to_pad(hw); + struct tegra_sor *sor = pad->sor; u8 parent = U8_MAX; u32 value; @@ -328,33 +328,33 @@ static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw) return parent; } -static const struct clk_ops tegra_clk_sor_brick_ops = { - .set_parent = tegra_clk_sor_brick_set_parent, - .get_parent = tegra_clk_sor_brick_get_parent, +static const struct clk_ops tegra_clk_sor_pad_ops = { + .set_parent = tegra_clk_sor_pad_set_parent, + .get_parent = tegra_clk_sor_pad_get_parent, }; -static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor, - const char *name) +static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, + const char *name) { - struct tegra_clk_sor_brick *brick; + struct tegra_clk_sor_pad *pad; struct clk_init_data init; struct clk *clk; - brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL); - if (!brick) + pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL); + if (!pad) return ERR_PTR(-ENOMEM); - brick->sor = sor; + pad->sor = sor; init.name = name; init.flags = 0; - init.parent_names = tegra_clk_sor_brick_parents; - init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents); - init.ops = &tegra_clk_sor_brick_ops; + init.parent_names = tegra_clk_sor_pad_parents; + init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents); + init.ops = &tegra_clk_sor_pad_ops; - brick->hw.init = &init; + pad->hw.init = &init; - clk = devm_clk_register(sor->dev, &brick->hw); + clk = devm_clk_register(sor->dev, &pad->hw); return clk; } @@ -998,8 +998,10 @@ static int tegra_sor_power_down(struct tegra_sor *sor) /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); - if (err < 0) + if (err < 0) { dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + return err; + } value = tegra_sor_readl(sor, SOR_DP_PADCTL0); value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | @@ -2007,8 +2009,10 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) /* switch to safe parent clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_safe); - if (err < 0) + if (err < 0) { dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + return; + } div = clk_get_rate(sor->clk) / 1000000 * 4; @@ -2111,13 +2115,17 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) tegra_sor_writel(sor, value, SOR_XBAR_CTRL); /* switch to parent clock */ - err = clk_set_parent(sor->clk_src, sor->clk_parent); - if (err < 0) - dev_err(sor->dev, "failed to set source clock: %d\n", err); - - err = tegra_sor_set_parent_clock(sor, sor->clk_src); - if (err < 0) + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { dev_err(sor->dev, "failed to set parent clock: %d\n", err); + return; + } + + err = tegra_sor_set_parent_clock(sor, sor->clk_pad); + if (err < 0) { + dev_err(sor->dev, "failed to set pad clock: %d\n", err); + return; + } value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe); @@ -2628,11 +2636,24 @@ static int tegra_sor_probe(struct platform_device *pdev) } if (sor->soc->supports_hdmi || sor->soc->supports_dp) { - sor->clk_src = devm_clk_get(&pdev->dev, "source"); - if (IS_ERR(sor->clk_src)) { - err = PTR_ERR(sor->clk_src); - dev_err(sor->dev, "failed to get source clock: %d\n", - err); + struct device_node *np = pdev->dev.of_node; + const char *name; + + /* + * For backwards compatibility with Tegra210 device trees, + * fall back to the old clock name "source" if the new "out" + * clock is not available. + */ + if (of_property_match_string(np, "clock-names", "out") < 0) + name = "source"; + else + name = "out"; + + sor->clk_out = devm_clk_get(&pdev->dev, name); + if (IS_ERR(sor->clk_out)) { + err = PTR_ERR(sor->clk_out); + dev_err(sor->dev, "failed to get %s clock: %d\n", + name, err); goto remove; } } @@ -2658,16 +2679,60 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } + /* + * Starting with Tegra186, the BPMP provides an implementation for + * the pad output clock, so we have to look it up from device tree. + */ + sor->clk_pad = devm_clk_get(&pdev->dev, "pad"); + if (IS_ERR(sor->clk_pad)) { + if (sor->clk_pad != ERR_PTR(-ENOENT)) { + err = PTR_ERR(sor->clk_pad); + goto remove; + } + + /* + * If the pad output clock is not available, then we assume + * we're on Tegra210 or earlier and have to provide our own + * implementation. + */ + sor->clk_pad = NULL; + } + + /* + * The bootloader may have set up the SOR such that it's module clock + * is sourced by one of the display PLLs. However, that doesn't work + * without properly having set up other bits of the SOR. + */ + err = clk_set_parent(sor->clk_out, sor->clk_safe); + if (err < 0) { + dev_err(&pdev->dev, "failed to use safe clock: %d\n", err); + goto remove; + } + platform_set_drvdata(pdev, sor); pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); - sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick"); - pm_runtime_put(&pdev->dev); + /* + * On Tegra210 and earlier, provide our own implementation for the + * pad output clock. + */ + if (!sor->clk_pad) { + err = pm_runtime_get_sync(&pdev->dev); + if (err < 0) { + dev_err(&pdev->dev, "failed to get runtime PM: %d\n", + err); + goto remove; + } + + sor->clk_pad = tegra_clk_sor_pad_register(sor, + "sor1_pad_clkout"); + pm_runtime_put(&pdev->dev); + } - if (IS_ERR(sor->clk_brick)) { - err = PTR_ERR(sor->clk_brick); - dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err); + if (IS_ERR(sor->clk_pad)) { + err = PTR_ERR(sor->clk_pad); + dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n", + err); goto remove; } diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig index 28fed7e206d0..81ac82455ce4 100644 --- a/drivers/gpu/drm/tilcdc/Kconfig +++ b/drivers/gpu/drm/tilcdc/Kconfig @@ -12,14 +12,3 @@ config DRM_TILCDC controller, for example AM33xx in beagle-bone, DA8xx, or OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver. -config DRM_TILCDC_SLAVE_COMPAT - bool "Support device tree blobs using TI LCDC Slave binding" - depends on DRM_TILCDC - default y - select OF_RESOLVE - select OF_OVERLAY - help - Choose this option if you need a kernel that is compatible - with device tree blobs using the obsolete "ti,tilcdc,slave" - binding. If you find "ti,tilcdc,slave"-string from your DTB, - you probably need this. Otherwise you do not. diff --git a/drivers/gpu/drm/tilcdc/Makefile b/drivers/gpu/drm/tilcdc/Makefile index b9e1108e5b4e..87f9480e43b0 100644 --- a/drivers/gpu/drm/tilcdc/Makefile +++ b/drivers/gpu/drm/tilcdc/Makefile @@ -3,9 +3,6 @@ ifeq (, $(findstring -W,$(EXTRA_CFLAGS))) ccflags-y += -Werror endif -obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \ - tilcdc_slave_compat.dtb.o - tilcdc-y := \ tilcdc_plane.o \ tilcdc_crtc.o \ diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c deleted file mode 100644 index d2b9e5f04724..000000000000 --- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.c +++ /dev/null @@ -1,264 +0,0 @@ -/* - * Copyright (C) 2015 Texas Instruments - * Author: Jyri Sarha <[email protected]> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - */ - -/* - * To support the old "ti,tilcdc,slave" binding the binding has to be - * transformed to the new external encoder binding. - */ - -#include <linux/kernel.h> -#include <linux/of.h> -#include <linux/of_graph.h> -#include <linux/of_fdt.h> -#include <linux/slab.h> -#include <linux/list.h> - -#include "tilcdc_slave_compat.h" - -struct kfree_table { - int total; - int num; - void **table; -}; - -static int __init kfree_table_init(struct kfree_table *kft) -{ - kft->total = 32; - kft->num = 0; - kft->table = kmalloc(kft->total * sizeof(*kft->table), - GFP_KERNEL); - if (!kft->table) - return -ENOMEM; - - return 0; -} - -static int __init kfree_table_add(struct kfree_table *kft, void *p) -{ - if (kft->num == kft->total) { - void **old = kft->table; - - kft->total *= 2; - kft->table = krealloc(old, kft->total * sizeof(*kft->table), - GFP_KERNEL); - if (!kft->table) { - kft->table = old; - kfree(p); - return -ENOMEM; - } - } - kft->table[kft->num++] = p; - return 0; -} - -static void __init kfree_table_free(struct kfree_table *kft) -{ - int i; - - for (i = 0; i < kft->num; i++) - kfree(kft->table[i]); - - kfree(kft->table); -} - -static -struct property * __init tilcdc_prop_dup(const struct property *prop, - struct kfree_table *kft) -{ - struct property *nprop; - - nprop = kzalloc(sizeof(*nprop), GFP_KERNEL); - if (!nprop || kfree_table_add(kft, nprop)) - return NULL; - - nprop->name = kstrdup(prop->name, GFP_KERNEL); - if (!nprop->name || kfree_table_add(kft, nprop->name)) - return NULL; - - nprop->value = kmemdup(prop->value, prop->length, GFP_KERNEL); - if (!nprop->value || kfree_table_add(kft, nprop->value)) - return NULL; - - nprop->length = prop->length; - - return nprop; -} - -static void __init tilcdc_copy_props(struct device_node *from, - struct device_node *to, - const char * const props[], - struct kfree_table *kft) -{ - struct property *prop; - int i; - - for (i = 0; props[i]; i++) { - prop = of_find_property(from, props[i], NULL); - if (!prop) - continue; - - prop = tilcdc_prop_dup(prop, kft); - if (!prop) - continue; - - prop->next = to->properties; - to->properties = prop; - } -} - -static int __init tilcdc_prop_str_update(struct property *prop, - const char *str, - struct kfree_table *kft) -{ - prop->value = kstrdup(str, GFP_KERNEL); - if (kfree_table_add(kft, prop->value) || !prop->value) - return -ENOMEM; - prop->length = strlen(str)+1; - return 0; -} - -static void __init tilcdc_node_disable(struct device_node *node) -{ - struct property *prop; - - prop = kzalloc(sizeof(*prop), GFP_KERNEL); - if (!prop) - return; - - prop->name = "status"; - prop->value = "disabled"; - prop->length = strlen((char *)prop->value)+1; - - of_update_property(node, prop); -} - -static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft) -{ - const int size = __dtb_tilcdc_slave_compat_end - - __dtb_tilcdc_slave_compat_begin; - static void *overlay_data; - struct device_node *overlay; - - if (!size) { - pr_warn("%s: No overlay data\n", __func__); - return NULL; - } - - overlay_data = kmemdup(__dtb_tilcdc_slave_compat_begin, - size, GFP_KERNEL); - if (!overlay_data || kfree_table_add(kft, overlay_data)) - return NULL; - - of_fdt_unflatten_tree(overlay_data, NULL, &overlay); - if (!overlay) { - pr_warn("%s: Unfattening overlay tree failed\n", __func__); - return NULL; - } - - return overlay; -} - -static const struct of_device_id tilcdc_slave_of_match[] __initconst = { - { .compatible = "ti,tilcdc,slave", }, - {}, -}; - -static const struct of_device_id tilcdc_of_match[] __initconst = { - { .compatible = "ti,am33xx-tilcdc", }, - {}, -}; - -static const struct of_device_id tilcdc_tda998x_of_match[] __initconst = { - { .compatible = "nxp,tda998x", }, - {}, -}; - -static const char * const tilcdc_slave_props[] __initconst = { - "pinctrl-names", - "pinctrl-0", - "pinctrl-1", - NULL -}; - -static void __init tilcdc_convert_slave_node(void) -{ - struct device_node *slave = NULL, *lcdc = NULL; - struct device_node *i2c = NULL, *fragment = NULL; - struct device_node *overlay, *encoder; - struct property *prop; - /* For all memory needed for the overlay tree. This memory can - be freed after the overlay has been applied. */ - struct kfree_table kft; - int ovcs_id, ret; - - if (kfree_table_init(&kft)) - return; - - lcdc = of_find_matching_node(NULL, tilcdc_of_match); - slave = of_find_matching_node(NULL, tilcdc_slave_of_match); - - if (!slave || !of_device_is_available(lcdc)) - goto out; - - i2c = of_parse_phandle(slave, "i2c", 0); - if (!i2c) { - pr_err("%s: Can't find i2c node trough phandle\n", __func__); - goto out; - } - - overlay = tilcdc_get_overlay(&kft); - if (!overlay) - goto out; - - encoder = of_find_matching_node(overlay, tilcdc_tda998x_of_match); - if (!encoder) { - pr_err("%s: Failed to find tda998x node\n", __func__); - goto out; - } - - tilcdc_copy_props(slave, encoder, tilcdc_slave_props, &kft); - - for_each_child_of_node(overlay, fragment) { - prop = of_find_property(fragment, "target-path", NULL); - if (!prop) - continue; - if (!strncmp("i2c", (char *)prop->value, prop->length)) - if (tilcdc_prop_str_update(prop, i2c->full_name, &kft)) - goto out; - if (!strncmp("lcdc", (char *)prop->value, prop->length)) - if (tilcdc_prop_str_update(prop, lcdc->full_name, &kft)) - goto out; - } - - tilcdc_node_disable(slave); - - ovcs_id = 0; - ret = of_overlay_apply(overlay, &ovcs_id); - if (ret) - pr_err("%s: Applying overlay changeset failed: %d\n", - __func__, ret); - else - pr_info("%s: ti,tilcdc,slave node successfully converted\n", - __func__); -out: - kfree_table_free(&kft); - of_node_put(i2c); - of_node_put(slave); - of_node_put(lcdc); - of_node_put(fragment); -} - -static int __init tilcdc_slave_compat_init(void) -{ - tilcdc_convert_slave_node(); - return 0; -} - -subsys_initcall(tilcdc_slave_compat_init); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts deleted file mode 100644 index 693f8b0aea2d..000000000000 --- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.dts +++ /dev/null @@ -1,72 +0,0 @@ -/* - * DTS overlay for converting ti,tilcdc,slave binding to new binding. - * - * Copyright (C) 2015 Texas Instruments Inc. - * Author: Jyri Sarha <[email protected]> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - */ - -/* - * target-path property values are simple tags that are replaced with - * correct values in tildcdc_slave_compat.c. Some properties are also - * copied over from the ti,tilcdc,slave node. - */ - -/dts-v1/; -/ { - fragment@0 { - target-path = "i2c"; - __overlay__ { - #address-cells = <1>; - #size-cells = <0>; - tda19988 { - compatible = "nxp,tda998x"; - reg = <0x70>; - status = "okay"; - - port { - hdmi_0: endpoint@0 { - remote-endpoint = <&lcd_0>; - }; - }; - }; - }; - }; - - fragment@1 { - target-path = "lcdc"; - __overlay__ { - port { - lcd_0: endpoint@0 { - remote-endpoint = <&hdmi_0>; - }; - }; - }; - }; - - __local_fixups__ { - fragment@0 { - __overlay__ { - tda19988 { - port { - endpoint@0 { - remote-endpoint = <0>; - }; - }; - }; - }; - }; - fragment@1 { - __overlay__ { - port { - endpoint@0 { - remote-endpoint = <0>; - }; - }; - }; - }; - }; -}; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h b/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h deleted file mode 100644 index 403d35d87d0b..000000000000 --- a/drivers/gpu/drm/tilcdc/tilcdc_slave_compat.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (C) 2015 Texas Instruments - * Author: Jyri Sarha <[email protected]> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - */ -/* This header declares the symbols defined in tilcdc_slave_compat.dts */ - -#ifndef __TILCDC_SLAVE_COMPAT_H__ -#define __TILCDC_SLAVE_COMPAT_H__ - -extern uint8_t __dtb_tilcdc_slave_compat_begin[]; -extern uint8_t __dtb_tilcdc_slave_compat_end[]; - -#endif /* __TILCDC_SLAVE_COMPAT_H__ */ diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c index 7a4b8362dda8..49bfe6e7d005 100644 --- a/drivers/gpu/ipu-v3/ipu-dc.c +++ b/drivers/gpu/ipu-v3/ipu-dc.c @@ -249,11 +249,8 @@ EXPORT_SYMBOL_GPL(ipu_dc_enable); void ipu_dc_enable_channel(struct ipu_dc *dc) { - int di; u32 reg; - di = dc->di; - reg = readl(dc->base + DC_WR_CH_CONF); reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL; writel(reg, dc->base + DC_WR_CH_CONF); diff --git a/drivers/ide/ide-pnp.c b/drivers/ide/ide-pnp.c index f5f2b62471da..859ddab9448f 100644 --- a/drivers/ide/ide-pnp.c +++ b/drivers/ide/ide-pnp.c @@ -22,7 +22,7 @@ #define DRV_NAME "ide-pnp" /* Add your devices here :)) */ -static struct pnp_device_id idepnp_devices[] = { +static const struct pnp_device_id idepnp_devices[] = { /* Generic ESDI/IDE/ATA compatible hard disk controller */ {.id = "PNP0600", .driver_data = 0}, {.id = ""} diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index 5a2d71729b9a..2a8ac6829d42 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -1,6 +1,5 @@ menuconfig MTD tristate "Memory Technology Device (MTD) support" - depends on GENERIC_IO help Memory Technology Devices are flash, RAM and similar chips, often used for solid state file systems on embedded devices. This option diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index afb43d5e1782..1cd0fff0e940 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c @@ -20,8 +20,9 @@ static int mapram_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch static int mapram_erase (struct mtd_info *, struct erase_info *); static void mapram_nop (struct mtd_info *); static struct mtd_info *map_ram_probe(struct map_info *map); -static unsigned long mapram_unmapped_area(struct mtd_info *, unsigned long, - unsigned long, unsigned long); +static int mapram_point (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys); +static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len); static struct mtd_chip_driver mapram_chipdrv = { @@ -65,11 +66,12 @@ static struct mtd_info *map_ram_probe(struct map_info *map) mtd->type = MTD_RAM; mtd->size = map->size; mtd->_erase = mapram_erase; - mtd->_get_unmapped_area = mapram_unmapped_area; mtd->_read = mapram_read; mtd->_write = mapram_write; mtd->_panic_write = mapram_write; + mtd->_point = mapram_point; mtd->_sync = mapram_nop; + mtd->_unpoint = mapram_unpoint; mtd->flags = MTD_CAP_RAM; mtd->writesize = 1; @@ -81,19 +83,23 @@ static struct mtd_info *map_ram_probe(struct map_info *map) return mtd; } - -/* - * Allow NOMMU mmap() to directly map the device (if not NULL) - * - return the address to which the offset maps - * - return -ENOSYS to indicate refusal to do the mapping - */ -static unsigned long mapram_unmapped_area(struct mtd_info *mtd, - unsigned long len, - unsigned long offset, - unsigned long flags) +static int mapram_point(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys) { struct map_info *map = mtd->priv; - return (unsigned long) map->virt + offset; + + if (!map->virt) + return -EINVAL; + *virt = map->virt + from; + if (phys) + *phys = map->phys + from; + *retlen = len; + return 0; +} + +static int mapram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) +{ + return 0; } static int mapram_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index e67f73ab44c9..20e3604b4d71 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c @@ -20,8 +20,10 @@ static int maprom_write (struct mtd_info *, loff_t, size_t, size_t *, const u_ch static void maprom_nop (struct mtd_info *); static struct mtd_info *map_rom_probe(struct map_info *map); static int maprom_erase (struct mtd_info *mtd, struct erase_info *info); -static unsigned long maprom_unmapped_area(struct mtd_info *, unsigned long, - unsigned long, unsigned long); +static int maprom_point (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys); +static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len); + static struct mtd_chip_driver maprom_chipdrv = { .probe = map_rom_probe, @@ -51,7 +53,8 @@ static struct mtd_info *map_rom_probe(struct map_info *map) mtd->name = map->name; mtd->type = MTD_ROM; mtd->size = map->size; - mtd->_get_unmapped_area = maprom_unmapped_area; + mtd->_point = maprom_point; + mtd->_unpoint = maprom_unpoint; mtd->_read = maprom_read; mtd->_write = maprom_write; mtd->_sync = maprom_nop; @@ -66,18 +69,23 @@ static struct mtd_info *map_rom_probe(struct map_info *map) } -/* - * Allow NOMMU mmap() to directly map the device (if not NULL) - * - return the address to which the offset maps - * - return -ENOSYS to indicate refusal to do the mapping - */ -static unsigned long maprom_unmapped_area(struct mtd_info *mtd, - unsigned long len, - unsigned long offset, - unsigned long flags) +static int maprom_point(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys) { struct map_info *map = mtd->priv; - return (unsigned long) map->virt + offset; + + if (!map->virt) + return -EINVAL; + *virt = map->virt + from; + if (phys) + *phys = map->phys + from; + *retlen = len; + return 0; +} + +static int maprom_unpoint(struct mtd_info *mtd, loff_t from, size_t len) +{ + return 0; } static int maprom_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c index 84b16133554b..0806f72102c0 100644 --- a/drivers/mtd/devices/docg3.c +++ b/drivers/mtd/devices/docg3.c @@ -1814,8 +1814,13 @@ static void __init doc_dbg_register(struct mtd_info *floor) struct dentry *root = floor->dbg.dfs_dir; struct docg3 *docg3 = floor->priv; - if (IS_ERR_OR_NULL(root)) + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + dev_warn(floor->dev.parent, + "CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return; + } debugfs_create_file("docg3_flashcontrol", S_IRUSR, root, docg3, &flashcontrol_fops); diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c index 268aae45b514..555b94406e0b 100644 --- a/drivers/mtd/devices/lart.c +++ b/drivers/mtd/devices/lart.c @@ -583,7 +583,7 @@ static struct mtd_erase_region_info erase_regions[] = { } }; -static struct mtd_partition lart_partitions[] = { +static const struct mtd_partition lart_partitions[] = { /* blob */ { .name = "blob", diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 00eea6fd379c..dbe6a1de2bb8 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -359,6 +359,7 @@ static const struct spi_device_id m25p_ids[] = { {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, /* Everspin MRAMs (non-JEDEC) */ + { "mr25h128" }, /* 128 Kib, 40 MHz */ { "mr25h256" }, /* 256 Kib, 40 MHz */ { "mr25h10" }, /* 1 Mib, 40 MHz */ { "mr25h40" }, /* 4 Mib, 40 MHz */ diff --git a/drivers/mtd/devices/mtdram.c b/drivers/mtd/devices/mtdram.c index cbd8547d7aad..0bf4aeaf0cb8 100644 --- a/drivers/mtd/devices/mtdram.c +++ b/drivers/mtd/devices/mtdram.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/ioport.h> #include <linux/vmalloc.h> +#include <linux/mm.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/mtdram.h> @@ -69,6 +70,27 @@ static int ram_point(struct mtd_info *mtd, loff_t from, size_t len, { *virt = mtd->priv + from; *retlen = len; + + if (phys) { + /* limit retlen to the number of contiguous physical pages */ + unsigned long page_ofs = offset_in_page(*virt); + void *addr = *virt - page_ofs; + unsigned long pfn1, pfn0 = vmalloc_to_pfn(addr); + + *phys = __pfn_to_phys(pfn0) + page_ofs; + len += page_ofs; + while (len > PAGE_SIZE) { + len -= PAGE_SIZE; + addr += PAGE_SIZE; + pfn0++; + pfn1 = vmalloc_to_pfn(addr); + if (pfn1 != pfn0) { + *retlen = addr - *virt; + break; + } + } + } + return 0; } @@ -77,19 +99,6 @@ static int ram_unpoint(struct mtd_info *mtd, loff_t from, size_t len) return 0; } -/* - * Allow NOMMU mmap() to directly map the device (if not NULL) - * - return the address to which the offset maps - * - return -ENOSYS to indicate refusal to do the mapping - */ -static unsigned long ram_get_unmapped_area(struct mtd_info *mtd, - unsigned long len, - unsigned long offset, - unsigned long flags) -{ - return (unsigned long) mtd->priv + offset; -} - static int ram_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { @@ -134,7 +143,6 @@ int mtdram_init_device(struct mtd_info *mtd, void *mapped_address, mtd->_erase = ram_erase; mtd->_point = ram_point; mtd->_unpoint = ram_unpoint; - mtd->_get_unmapped_area = ram_get_unmapped_area; mtd->_read = ram_read; mtd->_write = ram_write; diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 8087c36dc693..0ec85f316d24 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -163,8 +163,9 @@ static int register_device(char *name, unsigned long start, unsigned long length } if (!(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start = - ioremap(start, length))) { - E("slram: ioremap failed\n"); + memremap(start, length, + MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC))) { + E("slram: memremap failed\n"); return -EIO; } ((slram_priv_t *)(*curmtd)->mtdinfo->priv)->end = @@ -186,7 +187,7 @@ static int register_device(char *name, unsigned long start, unsigned long length if (mtd_device_register((*curmtd)->mtdinfo, NULL, 0)) { E("slram: Failed to register new device\n"); - iounmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); + memunmap(((slram_priv_t *)(*curmtd)->mtdinfo->priv)->start); kfree((*curmtd)->mtdinfo->priv); kfree((*curmtd)->mtdinfo); return(-EAGAIN); @@ -206,7 +207,7 @@ static void unregister_devices(void) while (slram_mtdlist) { nextitem = slram_mtdlist->next; mtd_device_unregister(slram_mtdlist->mtdinfo); - iounmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); + memunmap(((slram_priv_t *)slram_mtdlist->mtdinfo->priv)->start); kfree(slram_mtdlist->mtdinfo->priv); kfree(slram_mtdlist->mtdinfo); kfree(slram_mtdlist); diff --git a/drivers/mtd/maps/cfi_flagadm.c b/drivers/mtd/maps/cfi_flagadm.c index d504b3d1791d..70f488628464 100644 --- a/drivers/mtd/maps/cfi_flagadm.c +++ b/drivers/mtd/maps/cfi_flagadm.c @@ -61,7 +61,7 @@ static struct map_info flagadm_map = { .bankwidth = 2, }; -static struct mtd_partition flagadm_parts[] = { +static const struct mtd_partition flagadm_parts[] = { { .name = "Bootloader", .offset = FLASH_PARTITION0_ADDR, diff --git a/drivers/mtd/maps/impa7.c b/drivers/mtd/maps/impa7.c index 15bbda03be65..a0b8fa7849a9 100644 --- a/drivers/mtd/maps/impa7.c +++ b/drivers/mtd/maps/impa7.c @@ -47,7 +47,7 @@ static struct map_info impa7_map[NUM_FLASHBANKS] = { /* * MTD partitioning stuff */ -static struct mtd_partition partitions[] = +static const struct mtd_partition partitions[] = { { .name = "FileSystem", diff --git a/drivers/mtd/maps/netsc520.c b/drivers/mtd/maps/netsc520.c index 81dc2598bc0a..3528497f96c7 100644 --- a/drivers/mtd/maps/netsc520.c +++ b/drivers/mtd/maps/netsc520.c @@ -52,7 +52,7 @@ /* partition_info gives details on the logical partitions that the split the * single flash device into. If the size if zero we use up to the end of the * device. */ -static struct mtd_partition partition_info[]={ +static const struct mtd_partition partition_info[] = { { .name = "NetSc520 boot kernel", .offset = 0, diff --git a/drivers/mtd/maps/nettel.c b/drivers/mtd/maps/nettel.c index a577ef8553d0..729579fb654f 100644 --- a/drivers/mtd/maps/nettel.c +++ b/drivers/mtd/maps/nettel.c @@ -107,7 +107,7 @@ static struct map_info nettel_amd_map = { .bankwidth = AMD_BUSWIDTH, }; -static struct mtd_partition nettel_amd_partitions[] = { +static const struct mtd_partition nettel_amd_partitions[] = { { .name = "SnapGear BIOS config", .offset = 0x000e0000, diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 51572895c02c..6d9a4d6f9839 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c @@ -43,7 +43,6 @@ struct platram_info { struct device *dev; struct mtd_info *mtd; struct map_info map; - struct resource *area; struct platdata_mtd_ram *pdata; }; @@ -97,16 +96,6 @@ static int platram_remove(struct platform_device *pdev) platram_setrw(info, PLATRAM_RO); - /* release resources */ - - if (info->area) { - release_resource(info->area); - kfree(info->area); - } - - if (info->map.virt != NULL) - iounmap(info->map.virt); - kfree(info); return 0; @@ -147,12 +136,11 @@ static int platram_probe(struct platform_device *pdev) info->pdata = pdata; /* get the resource for the memory mapping */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - if (res == NULL) { - dev_err(&pdev->dev, "no memory resource specified\n"); - err = -ENOENT; + info->map.virt = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(info->map.virt)) { + err = PTR_ERR(info->map.virt); + dev_err(&pdev->dev, "failed to ioremap() region\n"); goto exit_free; } @@ -167,26 +155,8 @@ static int platram_probe(struct platform_device *pdev) (char *)pdata->mapname : (char *)pdev->name; info->map.bankwidth = pdata->bankwidth; - /* register our usage of the memory area */ - - info->area = request_mem_region(res->start, info->map.size, pdev->name); - if (info->area == NULL) { - dev_err(&pdev->dev, "failed to request memory region\n"); - err = -EIO; - goto exit_free; - } - - /* remap the memory area */ - - info->map.virt = ioremap(res->start, info->map.size); dev_dbg(&pdev->dev, "virt %p, %lu bytes\n", info->map.virt, info->map.size); - if (info->map.virt == NULL) { - dev_err(&pdev->dev, "failed to ioremap() region\n"); - err = -EIO; - goto exit_free; - } - simple_map_init(&info->map); dev_dbg(&pdev->dev, "initialised map, probing for mtd\n"); diff --git a/drivers/mtd/maps/sbc_gxx.c b/drivers/mtd/maps/sbc_gxx.c index 556a2dfe94c5..4337d279ad83 100644 --- a/drivers/mtd/maps/sbc_gxx.c +++ b/drivers/mtd/maps/sbc_gxx.c @@ -87,7 +87,7 @@ static DEFINE_SPINLOCK(sbc_gxx_spin); /* partition_info gives details on the logical partitions that the split the * single flash device into. If the size if zero we use up to the end of the * device. */ -static struct mtd_partition partition_info[]={ +static const struct mtd_partition partition_info[] = { { .name = "SBC-GXx flash boot partition", .offset = 0, .size = BOOT_PARTITION_SIZE_KiB*1024 }, diff --git a/drivers/mtd/maps/ts5500_flash.c b/drivers/mtd/maps/ts5500_flash.c index 9969fedb1f13..8f177e0acb8c 100644 --- a/drivers/mtd/maps/ts5500_flash.c +++ b/drivers/mtd/maps/ts5500_flash.c @@ -43,7 +43,7 @@ static struct map_info ts5500_map = { .phys = WINDOW_ADDR }; -static struct mtd_partition ts5500_partitions[] = { +static const struct mtd_partition ts5500_partitions[] = { { .name = "Drive A", .offset = 0, diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index 00a8190797ec..aef030ca8601 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c @@ -49,7 +49,7 @@ static struct mtd_info *uclinux_ram_mtdinfo; /****************************************************************************/ -static struct mtd_partition uclinux_romfs[] = { +static const struct mtd_partition uclinux_romfs[] = { { .name = "ROMfs" } }; diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index d573606b91c2..60bf53df5454 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -644,32 +644,6 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) } /* - * try to support NOMMU mmaps on concatenated devices - * - we don't support subdev spanning as we can't guarantee it'll work - */ -static unsigned long concat_get_unmapped_area(struct mtd_info *mtd, - unsigned long len, - unsigned long offset, - unsigned long flags) -{ - struct mtd_concat *concat = CONCAT(mtd); - int i; - - for (i = 0; i < concat->num_subdev; i++) { - struct mtd_info *subdev = concat->subdev[i]; - - if (offset >= subdev->size) { - offset -= subdev->size; - continue; - } - - return mtd_get_unmapped_area(subdev, len, offset, flags); - } - - return (unsigned long) -ENOSYS; -} - -/* * This function constructs a virtual MTD device by concatenating * num_devs MTD devices. A pointer to the new device object is * stored to *new_dev upon success. This function does _not_ @@ -790,7 +764,6 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c concat->mtd._unlock = concat_unlock; concat->mtd._suspend = concat_suspend; concat->mtd._resume = concat_resume; - concat->mtd._get_unmapped_area = concat_get_unmapped_area; /* * Combine the erase block size info of the subdevices: diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index e7ea842ba3db..f80e911b8843 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -1022,11 +1022,18 @@ EXPORT_SYMBOL_GPL(mtd_unpoint); unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, unsigned long offset, unsigned long flags) { - if (!mtd->_get_unmapped_area) - return -EOPNOTSUPP; - if (offset >= mtd->size || len > mtd->size - offset) - return -EINVAL; - return mtd->_get_unmapped_area(mtd, len, offset, flags); + size_t retlen; + void *virt; + int ret; + + ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL); + if (ret) + return ret; + if (retlen != len) { + mtd_unpoint(mtd, offset, retlen); + return -ENOSYS; + } + return (unsigned long)virt; } EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); @@ -1093,6 +1100,39 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, } EXPORT_SYMBOL_GPL(mtd_panic_write); +static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs, + struct mtd_oob_ops *ops) +{ + /* + * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving + * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in + * this case. + */ + if (!ops->datbuf) + ops->len = 0; + + if (!ops->oobbuf) + ops->ooblen = 0; + + if (offs < 0 || offs + ops->len >= mtd->size) + return -EINVAL; + + if (ops->ooblen) { + u64 maxooblen; + + if (ops->ooboffs >= mtd_oobavail(mtd, ops)) + return -EINVAL; + + maxooblen = ((mtd_div_by_ws(mtd->size, mtd) - + mtd_div_by_ws(offs, mtd)) * + mtd_oobavail(mtd, ops)) - ops->ooboffs; + if (ops->ooblen > maxooblen) + return -EINVAL; + } + + return 0; +} + int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { int ret_code; @@ -1100,6 +1140,10 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) if (!mtd->_read_oob) return -EOPNOTSUPP; + ret_code = mtd_check_oob_ops(mtd, from, ops); + if (ret_code) + return ret_code; + ledtrig_mtd_activity(); /* * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics @@ -1119,11 +1163,18 @@ EXPORT_SYMBOL_GPL(mtd_read_oob); int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) { + int ret; + ops->retlen = ops->oobretlen = 0; if (!mtd->_write_oob) return -EOPNOTSUPP; if (!(mtd->flags & MTD_WRITEABLE)) return -EROFS; + + ret = mtd_check_oob_ops(mtd, to, ops); + if (ret) + return ret; + ledtrig_mtd_activity(); return mtd->_write_oob(mtd, to, ops); } diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index a308e707392d..be088bccd593 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -101,18 +101,6 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) return part->parent->_unpoint(part->parent, from + part->offset, len); } -static unsigned long part_get_unmapped_area(struct mtd_info *mtd, - unsigned long len, - unsigned long offset, - unsigned long flags) -{ - struct mtd_part *part = mtd_to_part(mtd); - - offset += part->offset; - return part->parent->_get_unmapped_area(part->parent, len, offset, - flags); -} - static int part_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops) { @@ -458,8 +446,6 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, slave->mtd._unpoint = part_unpoint; } - if (parent->_get_unmapped_area) - slave->mtd._get_unmapped_area = part_get_unmapped_area; if (parent->_read_oob) slave->mtd._read_oob = part_read_oob; if (parent->_write_oob) diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c index 7d9080e33865..f07492c6f4b2 100644 --- a/drivers/mtd/mtdswap.c +++ b/drivers/mtd/mtdswap.c @@ -50,7 +50,7 @@ * Number of free eraseblocks below which GC can also collect low frag * blocks. */ -#define LOW_FRAG_GC_TRESHOLD 5 +#define LOW_FRAG_GC_THRESHOLD 5 /* * Wear level cost amortization. We want to do wear leveling on the background @@ -805,7 +805,7 @@ static int __mtdswap_choose_gc_tree(struct mtdswap_dev *d) { int idx, stopat; - if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_TRESHOLD) + if (TREE_COUNT(d, CLEAN) < LOW_FRAG_GC_THRESHOLD) stopat = MTDSWAP_LOWFRAG; else stopat = MTDSWAP_HIFRAG; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 3f2036f31da4..bb48aafed9a2 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -317,8 +317,11 @@ config MTD_NAND_PXA3xx tristate "NAND support on PXA3xx and Armada 370/XP" depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU help + This enables the driver for the NAND flash device found on - PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2). + PXA3xx processors (NFCv1) and also on 32-bit Armada + platforms (XP, 370, 375, 38x, 39x) and 64-bit Armada + platforms (7K, 8K) (NFCv2). config MTD_NAND_SLC_LPC32XX tristate "NXP LPC32xx SLC Controller" diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index 6e2db700d923..118a1349aad3 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -59,7 +59,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o -obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o mtk_ecc.o +obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o nand-objs := nand_base.o nand_bbt.o nand_timings.o nand_ids.o nand-objs += nand_amd.o diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index dcec9cf4983f..d60ada45c549 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c @@ -41,7 +41,7 @@ static struct mtd_info *ams_delta_mtd = NULL; * Define partitions for flash devices */ -static struct mtd_partition partition_info[] = { +static const struct mtd_partition partition_info[] = { { .name = "Kernel", .offset = 0, .size = 3 * SZ_1M + SZ_512K }, diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index f25eca79f4e5..90a71a56bc23 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -718,8 +718,7 @@ static void atmel_nfc_set_op_addr(struct nand_chip *chip, int page, int column) nc->op.addrs[nc->op.naddrs++] = page; nc->op.addrs[nc->op.naddrs++] = page >> 8; - if ((mtd->writesize > 512 && chip->chipsize > SZ_128M) || - (mtd->writesize <= 512 && chip->chipsize > SZ_32M)) + if (chip->options & NAND_ROW_ADDR_3) nc->op.addrs[nc->op.naddrs++] = page >> 16; } } @@ -2530,6 +2529,9 @@ static __maybe_unused int atmel_nand_controller_resume(struct device *dev) struct atmel_nand_controller *nc = dev_get_drvdata(dev); struct atmel_nand *nand; + if (nc->pmecc) + atmel_pmecc_reset(nc->pmecc); + list_for_each_entry(nand, &nc->chips, node) { int i; @@ -2547,6 +2549,7 @@ static struct platform_driver atmel_nand_controller_driver = { .driver = { .name = "atmel-nand-controller", .of_match_table = of_match_ptr(atmel_nand_controller_of_ids), + .pm = &atmel_nand_controller_pm_ops, }, .probe = atmel_nand_controller_probe, .remove = atmel_nand_controller_remove, diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c index 8268636675ef..fcbe4fd6e684 100644 --- a/drivers/mtd/nand/atmel/pmecc.c +++ b/drivers/mtd/nand/atmel/pmecc.c @@ -765,6 +765,13 @@ void atmel_pmecc_get_generated_eccbytes(struct atmel_pmecc_user *user, } EXPORT_SYMBOL_GPL(atmel_pmecc_get_generated_eccbytes); +void atmel_pmecc_reset(struct atmel_pmecc *pmecc) +{ + writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); + writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); +} +EXPORT_SYMBOL_GPL(atmel_pmecc_reset); + int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op) { struct atmel_pmecc *pmecc = user->pmecc; @@ -797,10 +804,7 @@ EXPORT_SYMBOL_GPL(atmel_pmecc_enable); void atmel_pmecc_disable(struct atmel_pmecc_user *user) { - struct atmel_pmecc *pmecc = user->pmecc; - - writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); - writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); + atmel_pmecc_reset(user->pmecc); mutex_unlock(&user->pmecc->lock); } EXPORT_SYMBOL_GPL(atmel_pmecc_disable); @@ -855,10 +859,7 @@ static struct atmel_pmecc *atmel_pmecc_create(struct platform_device *pdev, /* Disable all interrupts before registering the PMECC handler. */ writel(0xffffffff, pmecc->regs.base + ATMEL_PMECC_IDR); - - /* Reset the ECC engine */ - writel(PMECC_CTRL_RST, pmecc->regs.base + ATMEL_PMECC_CTRL); - writel(PMECC_CTRL_DISABLE, pmecc->regs.base + ATMEL_PMECC_CTRL); + atmel_pmecc_reset(pmecc); return pmecc; } diff --git a/drivers/mtd/nand/atmel/pmecc.h b/drivers/mtd/nand/atmel/pmecc.h index a8ddbfca2ea5..817e0dd9fd15 100644 --- a/drivers/mtd/nand/atmel/pmecc.h +++ b/drivers/mtd/nand/atmel/pmecc.h @@ -61,6 +61,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc, struct atmel_pmecc_user_req *req); void atmel_pmecc_destroy_user(struct atmel_pmecc_user *user); +void atmel_pmecc_reset(struct atmel_pmecc *pmecc); int atmel_pmecc_enable(struct atmel_pmecc_user *user, int op); void atmel_pmecc_disable(struct atmel_pmecc_user *user); int atmel_pmecc_wait_rdy(struct atmel_pmecc_user *user); diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c index 9d4a28fa6b73..8ab827edf94e 100644 --- a/drivers/mtd/nand/au1550nd.c +++ b/drivers/mtd/nand/au1550nd.c @@ -331,8 +331,7 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i ctx->write_byte(mtd, (u8)(page_addr >> 8)); - /* One more address cycle for devices > 32MiB */ - if (this->chipsize > (32 << 20)) + if (this->options & NAND_ROW_ADDR_3) ctx->write_byte(mtd, ((page_addr >> 16) & 0x0f)); } diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c index 1fc435f994e1..b01c9804590e 100644 --- a/drivers/mtd/nand/cmx270_nand.c +++ b/drivers/mtd/nand/cmx270_nand.c @@ -42,7 +42,7 @@ static void __iomem *cmx270_nand_io; /* * Define static partitions for flash device */ -static struct mtd_partition partition_info[] = { +static const struct mtd_partition partition_info[] = { [0] = { .name = "cmx270-0", .offset = 0, diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 3087b0ba7b7f..5124f8ae8c04 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -10,20 +10,18 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * */ -#include <linux/interrupt.h> -#include <linux/delay.h> + +#include <linux/bitfield.h> +#include <linux/completion.h> #include <linux/dma-mapping.h> -#include <linux/wait.h> -#include <linux/mutex.h> -#include <linux/mtd/mtd.h> +#include <linux/interrupt.h> +#include <linux/io.h> #include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/rawnand.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include "denali.h" @@ -31,9 +29,9 @@ MODULE_LICENSE("GPL"); #define DENALI_NAND_NAME "denali-nand" -/* Host Data/Command Interface */ -#define DENALI_HOST_ADDR 0x00 -#define DENALI_HOST_DATA 0x10 +/* for Indexed Addressing */ +#define DENALI_INDEXED_CTRL 0x00 +#define DENALI_INDEXED_DATA 0x10 #define DENALI_MAP00 (0 << 26) /* direct access to buffer */ #define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ @@ -61,31 +59,55 @@ MODULE_LICENSE("GPL"); */ #define DENALI_CLK_X_MULT 6 -/* - * this macro allows us to convert from an MTD structure to our own - * device context (denali) structure. - */ static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) { return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); } -static void denali_host_write(struct denali_nand_info *denali, - uint32_t addr, uint32_t data) +/* + * Direct Addressing - the slave address forms the control information (command + * type, bank, block, and page address). The slave data is the actual data to + * be transferred. This mode requires 28 bits of address region allocated. + */ +static u32 denali_direct_read(struct denali_nand_info *denali, u32 addr) +{ + return ioread32(denali->host + addr); +} + +static void denali_direct_write(struct denali_nand_info *denali, u32 addr, + u32 data) { - iowrite32(addr, denali->host + DENALI_HOST_ADDR); - iowrite32(data, denali->host + DENALI_HOST_DATA); + iowrite32(data, denali->host + addr); +} + +/* + * Indexed Addressing - address translation module intervenes in passing the + * control information. This mode reduces the required address range. The + * control information and transferred data are latched by the registers in + * the translation module. + */ +static u32 denali_indexed_read(struct denali_nand_info *denali, u32 addr) +{ + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); + return ioread32(denali->host + DENALI_INDEXED_DATA); +} + +static void denali_indexed_write(struct denali_nand_info *denali, u32 addr, + u32 data) +{ + iowrite32(addr, denali->host + DENALI_INDEXED_CTRL); + iowrite32(data, denali->host + DENALI_INDEXED_DATA); } /* * Use the configuration feature register to determine the maximum number of * banks that the hardware supports. */ -static void detect_max_banks(struct denali_nand_info *denali) +static void denali_detect_max_banks(struct denali_nand_info *denali) { uint32_t features = ioread32(denali->reg + FEATURES); - denali->max_banks = 1 << (features & FEATURES__N_BANKS); + denali->max_banks = 1 << FIELD_GET(FEATURES__N_BANKS, features); /* the encoding changed from rev 5.0 to 5.1 */ if (denali->revision < 0x0501) @@ -189,7 +211,7 @@ static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, msecs_to_jiffies(1000)); if (!time_left) { dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", - denali->irq_mask); + irq_mask); return 0; } @@ -208,73 +230,47 @@ static uint32_t denali_check_irq(struct denali_nand_info *denali) return irq_status; } -/* - * This helper function setups the registers for ECC and whether or not - * the spare area will be transferred. - */ -static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, - bool transfer_spare) -{ - int ecc_en_flag, transfer_spare_flag; - - /* set ECC, transfer spare bits if needed */ - ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0; - transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; - - /* Enable spare area/ECC per user's request. */ - iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE); - iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG); -} - static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); int i; - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), - denali->host + DENALI_HOST_ADDR); - for (i = 0; i < len; i++) - buf[i] = ioread32(denali->host + DENALI_HOST_DATA); + buf[i] = denali->host_read(denali, addr); } static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); int i; - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), - denali->host + DENALI_HOST_ADDR); - for (i = 0; i < len; i++) - iowrite32(buf[i], denali->host + DENALI_HOST_DATA); + denali->host_write(denali, addr, buf[i]); } static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) { struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); uint16_t *buf16 = (uint16_t *)buf; int i; - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), - denali->host + DENALI_HOST_ADDR); - for (i = 0; i < len / 2; i++) - buf16[i] = ioread32(denali->host + DENALI_HOST_DATA); + buf16[i] = denali->host_read(denali, addr); } static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len) { struct denali_nand_info *denali = mtd_to_denali(mtd); + u32 addr = DENALI_MAP11_DATA | DENALI_BANK(denali); const uint16_t *buf16 = (const uint16_t *)buf; int i; - iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), - denali->host + DENALI_HOST_ADDR); - for (i = 0; i < len / 2; i++) - iowrite32(buf16[i], denali->host + DENALI_HOST_DATA); + denali->host_write(denali, addr, buf16[i]); } static uint8_t denali_read_byte(struct mtd_info *mtd) @@ -319,7 +315,7 @@ static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) if (ctrl & NAND_CTRL_CHANGE) denali_reset_irq(denali); - denali_host_write(denali, DENALI_BANK(denali) | type, dat); + denali->host_write(denali, DENALI_BANK(denali) | type, dat); } static int denali_dev_ready(struct mtd_info *mtd) @@ -389,7 +385,7 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, return 0; } - max_bitflips = ecc_cor & ECC_COR_INFO__MAX_ERRORS; + max_bitflips = FIELD_GET(ECC_COR_INFO__MAX_ERRORS, ecc_cor); /* * The register holds the maximum of per-sector corrected bitflips. @@ -402,13 +398,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, return max_bitflips; } -#define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) -#define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) -#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) -#define ECC_ERROR_UNCORRECTABLE(x) ((x) & ERR_CORRECTION_INFO__ERROR_TYPE) -#define ECC_ERR_DEVICE(x) (((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8) -#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) - static int denali_sw_ecc_fixup(struct mtd_info *mtd, struct denali_nand_info *denali, unsigned long *uncor_ecc_flags, uint8_t *buf) @@ -426,18 +415,20 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, do { err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); - err_sector = ECC_SECTOR(err_addr); - err_byte = ECC_BYTE(err_addr); + err_sector = FIELD_GET(ECC_ERROR_ADDRESS__SECTOR, err_addr); + err_byte = FIELD_GET(ECC_ERROR_ADDRESS__OFFSET, err_addr); err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); - err_cor_value = ECC_CORRECTION_VALUE(err_cor_info); - err_device = ECC_ERR_DEVICE(err_cor_info); + err_cor_value = FIELD_GET(ERR_CORRECTION_INFO__BYTE, + err_cor_info); + err_device = FIELD_GET(ERR_CORRECTION_INFO__DEVICE, + err_cor_info); /* reset the bitflip counter when crossing ECC sector */ if (err_sector != prev_sector) bitflips = 0; - if (ECC_ERROR_UNCORRECTABLE(err_cor_info)) { + if (err_cor_info & ERR_CORRECTION_INFO__UNCOR) { /* * Check later if this is a real ECC error, or * an erased sector. @@ -467,12 +458,11 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, } prev_sector = err_sector; - } while (!ECC_LAST_ERR(err_cor_info)); + } while (!(err_cor_info & ERR_CORRECTION_INFO__LAST_ERR)); /* - * Once handle all ecc errors, controller will trigger a - * ECC_TRANSACTION_DONE interrupt, so here just wait for - * a while for this interrupt + * Once handle all ECC errors, controller will trigger an + * ECC_TRANSACTION_DONE interrupt. */ irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) @@ -481,13 +471,6 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, return max_bitflips; } -/* programs the controller to either enable/disable DMA transfers */ -static void denali_enable_dma(struct denali_nand_info *denali, bool en) -{ - iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE); - ioread32(denali->reg + DMA_ENABLE); -} - static void denali_setup_dma64(struct denali_nand_info *denali, dma_addr_t dma_addr, int page, int write) { @@ -502,14 +485,14 @@ static void denali_setup_dma64(struct denali_nand_info *denali, * 1. setup transfer type, interrupt when complete, * burst len = 64 bytes, the number of pages */ - denali_host_write(denali, mode, - 0x01002000 | (64 << 16) | (write << 8) | page_count); + denali->host_write(denali, mode, + 0x01002000 | (64 << 16) | (write << 8) | page_count); /* 2. set memory low address */ - denali_host_write(denali, mode, dma_addr); + denali->host_write(denali, mode, lower_32_bits(dma_addr)); /* 3. set memory high address */ - denali_host_write(denali, mode, (uint64_t)dma_addr >> 32); + denali->host_write(denali, mode, upper_32_bits(dma_addr)); } static void denali_setup_dma32(struct denali_nand_info *denali, @@ -523,32 +506,23 @@ static void denali_setup_dma32(struct denali_nand_info *denali, /* DMA is a four step process */ /* 1. setup transfer type and # of pages */ - denali_host_write(denali, mode | page, - 0x2000 | (write << 8) | page_count); + denali->host_write(denali, mode | page, + 0x2000 | (write << 8) | page_count); /* 2. set memory high address bits 23:8 */ - denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); + denali->host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); /* 3. set memory low address bits 23:8 */ - denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); + denali->host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); /* 4. interrupt when complete, burst len = 64 bytes */ - denali_host_write(denali, mode | 0x14000, 0x2400); -} - -static void denali_setup_dma(struct denali_nand_info *denali, - dma_addr_t dma_addr, int page, int write) -{ - if (denali->caps & DENALI_CAP_DMA_64BIT) - denali_setup_dma64(denali, dma_addr, page, write); - else - denali_setup_dma32(denali, dma_addr, page, write); + denali->host_write(denali, mode | 0x14000, 0x2400); } static int denali_pio_read(struct denali_nand_info *denali, void *buf, size_t size, int page, int raw) { - uint32_t addr = DENALI_BANK(denali) | page; + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; uint32_t *buf32 = (uint32_t *)buf; uint32_t irq_status, ecc_err_mask; int i; @@ -560,9 +534,8 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, denali_reset_irq(denali); - iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); for (i = 0; i < size / 4; i++) - *buf32++ = ioread32(denali->host + DENALI_HOST_DATA); + *buf32++ = denali->host_read(denali, addr); irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); if (!(irq_status & INTR__PAGE_XFER_INC)) @@ -577,16 +550,15 @@ static int denali_pio_read(struct denali_nand_info *denali, void *buf, static int denali_pio_write(struct denali_nand_info *denali, const void *buf, size_t size, int page, int raw) { - uint32_t addr = DENALI_BANK(denali) | page; + u32 addr = DENALI_MAP01 | DENALI_BANK(denali) | page; const uint32_t *buf32 = (uint32_t *)buf; uint32_t irq_status; int i; denali_reset_irq(denali); - iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); for (i = 0; i < size / 4; i++) - iowrite32(*buf32++, denali->host + DENALI_HOST_DATA); + denali->host_write(denali, addr, *buf32++); irq_status = denali_wait_for_irq(denali, INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); @@ -635,19 +607,19 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, ecc_err_mask = INTR__ECC_ERR; } - denali_enable_dma(denali, true); + iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); denali_reset_irq(denali); - denali_setup_dma(denali, dma_addr, page, write); + denali->setup_dma(denali, dma_addr, page, write); - /* wait for operation to complete */ irq_status = denali_wait_for_irq(denali, irq_mask); if (!(irq_status & INTR__DMA_CMD_COMP)) ret = -EIO; else if (irq_status & ecc_err_mask) ret = -EBADMSG; - denali_enable_dma(denali, false); + iowrite32(0, denali->reg + DMA_ENABLE); + dma_unmap_single(denali->dev, dma_addr, size, dir); if (irq_status & INTR__ERASED_PAGE) @@ -659,7 +631,9 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, static int denali_data_xfer(struct denali_nand_info *denali, void *buf, size_t size, int page, int raw, int write) { - setup_ecc_for_xfer(denali, !raw, raw); + iowrite32(raw ? 0 : ECC_ENABLE__FLAG, denali->reg + ECC_ENABLE); + iowrite32(raw ? TRANSFER_SPARE_REG__FLAG : 0, + denali->reg + TRANSFER_SPARE_REG); if (denali->dma_avail) return denali_dma_xfer(denali, buf, size, page, raw, write); @@ -970,8 +944,8 @@ static int denali_erase(struct mtd_info *mtd, int page) denali_reset_irq(denali); - denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, - DENALI_ERASE); + denali->host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, + DENALI_ERASE); /* wait for erase to complete or failure to occur */ irq_status = denali_wait_for_irq(denali, @@ -1009,7 +983,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, tmp = ioread32(denali->reg + ACC_CLKS); tmp &= ~ACC_CLKS__VALUE; - tmp |= acc_clks; + tmp |= FIELD_PREP(ACC_CLKS__VALUE, acc_clks); iowrite32(tmp, denali->reg + ACC_CLKS); /* tRWH -> RE_2_WE */ @@ -1018,7 +992,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, tmp = ioread32(denali->reg + RE_2_WE); tmp &= ~RE_2_WE__VALUE; - tmp |= re_2_we; + tmp |= FIELD_PREP(RE_2_WE__VALUE, re_2_we); iowrite32(tmp, denali->reg + RE_2_WE); /* tRHZ -> RE_2_RE */ @@ -1027,16 +1001,22 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, tmp = ioread32(denali->reg + RE_2_RE); tmp &= ~RE_2_RE__VALUE; - tmp |= re_2_re; + tmp |= FIELD_PREP(RE_2_RE__VALUE, re_2_re); iowrite32(tmp, denali->reg + RE_2_RE); - /* tWHR -> WE_2_RE */ - we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk); + /* + * tCCS, tWHR -> WE_2_RE + * + * With WE_2_RE properly set, the Denali controller automatically takes + * care of the delay; the driver need not set NAND_WAIT_TCCS. + */ + we_2_re = DIV_ROUND_UP(max(timings->tCCS_min, timings->tWHR_min), + t_clk); we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; - tmp |= we_2_re; + tmp |= FIELD_PREP(TWHR2_AND_WE_2_RE__WE_2_RE, we_2_re); iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); /* tADL -> ADDR_2_DATA */ @@ -1050,8 +1030,8 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); - tmp &= ~addr_2_data_mask; - tmp |= addr_2_data; + tmp &= ~TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; + tmp |= FIELD_PREP(TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA, addr_2_data); iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); /* tREH, tWH -> RDWR_EN_HI_CNT */ @@ -1061,7 +1041,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); tmp &= ~RDWR_EN_HI_CNT__VALUE; - tmp |= rdwr_en_hi; + tmp |= FIELD_PREP(RDWR_EN_HI_CNT__VALUE, rdwr_en_hi); iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); /* tRP, tWP -> RDWR_EN_LO_CNT */ @@ -1075,7 +1055,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); tmp &= ~RDWR_EN_LO_CNT__VALUE; - tmp |= rdwr_en_lo; + tmp |= FIELD_PREP(RDWR_EN_LO_CNT__VALUE, rdwr_en_lo); iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); /* tCS, tCEA -> CS_SETUP_CNT */ @@ -1086,7 +1066,7 @@ static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, tmp = ioread32(denali->reg + CS_SETUP_CNT); tmp &= ~CS_SETUP_CNT__VALUE; - tmp |= cs_setup; + tmp |= FIELD_PREP(CS_SETUP_CNT__VALUE, cs_setup); iowrite32(tmp, denali->reg + CS_SETUP_CNT); return 0; @@ -1131,15 +1111,11 @@ static void denali_hw_init(struct denali_nand_info *denali) * if this value is 0, just let it be. */ denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); - detect_max_banks(denali); + denali_detect_max_banks(denali); iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); - - /* Should set value for these registers when init */ - iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES); - iowrite32(1, denali->reg + ECC_ENABLE); } int denali_calc_ecc_bytes(int step_size, int strength) @@ -1211,22 +1187,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = { .free = denali_ooblayout_free, }; -/* initialize driver data structures */ -static void denali_drv_init(struct denali_nand_info *denali) -{ - /* - * the completion object will be used to notify - * the callee that the interrupt is done - */ - init_completion(&denali->complete); - - /* - * the spinlock will be used to synchronize the ISR with any - * element that might be access shared data (interrupt status) - */ - spin_lock_init(&denali->irq_lock); -} - static int denali_multidev_fixup(struct denali_nand_info *denali) { struct nand_chip *chip = &denali->nand; @@ -1282,15 +1242,17 @@ int denali_init(struct denali_nand_info *denali) { struct nand_chip *chip = &denali->nand; struct mtd_info *mtd = nand_to_mtd(chip); + u32 features = ioread32(denali->reg + FEATURES); int ret; mtd->dev.parent = denali->dev; denali_hw_init(denali); - denali_drv_init(denali); + + init_completion(&denali->complete); + spin_lock_init(&denali->irq_lock); denali_clear_irq_all(denali); - /* Request IRQ after all the hardware initialization is finished */ ret = devm_request_irq(denali->dev, denali->irq, denali_isr, IRQF_SHARED, DENALI_NAND_NAME, denali); if (ret) { @@ -1308,7 +1270,6 @@ int denali_init(struct denali_nand_info *denali) if (!mtd->name) mtd->name = "denali-nand"; - /* register the driver with the NAND core subsystem */ chip->select_chip = denali_select_chip; chip->read_byte = denali_read_byte; chip->write_byte = denali_write_byte; @@ -1317,15 +1278,18 @@ int denali_init(struct denali_nand_info *denali) chip->dev_ready = denali_dev_ready; chip->waitfunc = denali_waitfunc; + if (features & FEATURES__INDEX_ADDR) { + denali->host_read = denali_indexed_read; + denali->host_write = denali_indexed_write; + } else { + denali->host_read = denali_direct_read; + denali->host_write = denali_direct_write; + } + /* clk rate info is needed for setup_data_interface */ if (denali->clk_x_rate) chip->setup_data_interface = denali_setup_data_interface; - /* - * scan for NAND devices attached to the controller - * this is the first stage in a two step process to register - * with the nand subsystem - */ ret = nand_scan_ident(mtd, denali->max_banks, NULL); if (ret) goto disable_irq; @@ -1347,20 +1311,15 @@ int denali_init(struct denali_nand_info *denali) if (denali->dma_avail) { chip->options |= NAND_USE_BOUNCE_BUFFER; chip->buf_align = 16; + if (denali->caps & DENALI_CAP_DMA_64BIT) + denali->setup_dma = denali_setup_dma64; + else + denali->setup_dma = denali_setup_dma32; } - /* - * second stage of the NAND scan - * this stage requires information regarding ECC and - * bad block management. - */ - chip->bbt_options |= NAND_BBT_USE_FLASH; chip->bbt_options |= NAND_BBT_NO_OOB; - chip->ecc.mode = NAND_ECC_HW_SYNDROME; - - /* no subpage writes on denali */ chip->options |= NAND_NO_SUBPAGE_WRITE; ret = denali_ecc_setup(mtd, chip, denali); @@ -1373,12 +1332,15 @@ int denali_init(struct denali_nand_info *denali) "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); - iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1), + iowrite32(FIELD_PREP(ECC_CORRECTION__ERASE_THRESHOLD, 1) | + FIELD_PREP(ECC_CORRECTION__VALUE, chip->ecc.strength), denali->reg + ECC_CORRECTION); iowrite32(mtd->erasesize / mtd->writesize, denali->reg + PAGES_PER_BLOCK); iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, denali->reg + DEVICE_WIDTH); + iowrite32(chip->options & NAND_ROW_ADDR_3 ? 0 : TWO_ROW_ADDR_CYCLES__FLAG, + denali->reg + TWO_ROW_ADDR_CYCLES); iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); @@ -1441,7 +1403,6 @@ disable_irq: } EXPORT_SYMBOL(denali_init); -/* driver exit point */ void denali_remove(struct denali_nand_info *denali) { struct mtd_info *mtd = nand_to_mtd(&denali->nand); diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index 9239e6793e6e..2911066dacac 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -10,18 +10,16 @@ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * */ #ifndef __DENALI_H__ #define __DENALI_H__ #include <linux/bitops.h> +#include <linux/completion.h> #include <linux/mtd/rawnand.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> #define DEVICE_RESET 0x0 #define DEVICE_RESET__BANK(bank) BIT(bank) @@ -111,9 +109,6 @@ #define ECC_CORRECTION 0x1b0 #define ECC_CORRECTION__VALUE GENMASK(4, 0) #define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) -#define MAKE_ECC_CORRECTION(val, thresh) \ - (((val) & (ECC_CORRECTION__VALUE)) | \ - (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD))) #define READ_MODE 0x1c0 #define READ_MODE__VALUE GENMASK(3, 0) @@ -255,13 +250,13 @@ #define ECC_ERROR_ADDRESS 0x630 #define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) -#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12) +#define ECC_ERROR_ADDRESS__SECTOR GENMASK(15, 12) #define ERR_CORRECTION_INFO 0x640 -#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0) -#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8) -#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14) -#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15) +#define ERR_CORRECTION_INFO__BYTE GENMASK(7, 0) +#define ERR_CORRECTION_INFO__DEVICE GENMASK(11, 8) +#define ERR_CORRECTION_INFO__UNCOR BIT(14) +#define ERR_CORRECTION_INFO__LAST_ERR BIT(15) #define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) #define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) @@ -310,23 +305,24 @@ struct denali_nand_info { struct device *dev; void __iomem *reg; /* Register Interface */ void __iomem *host; /* Host Data/Command Interface */ - - /* elements used by ISR */ struct completion complete; - spinlock_t irq_lock; - uint32_t irq_mask; - uint32_t irq_status; + spinlock_t irq_lock; /* protect irq_mask and irq_status */ + u32 irq_mask; /* interrupts we are waiting for */ + u32 irq_status; /* interrupts that have happened */ int irq; - - void *buf; + void *buf; /* for syndrome layout conversion */ dma_addr_t dma_addr; - int dma_avail; + int dma_avail; /* can support DMA? */ int devs_per_cs; /* devices connected in parallel */ - int oob_skip_bytes; + int oob_skip_bytes; /* number of bytes reserved for BBM */ int max_banks; - unsigned int revision; - unsigned int caps; + unsigned int revision; /* IP revision */ + unsigned int caps; /* IP capability (or quirk) */ const struct nand_ecc_caps *ecc_caps; + u32 (*host_read)(struct denali_nand_info *denali, u32 addr); + void (*host_write)(struct denali_nand_info *denali, u32 addr, u32 data); + void (*setup_dma)(struct denali_nand_info *denali, dma_addr_t dma_addr, + int page, int write); }; #define DENALI_CAP_HW_ECC_FIXUP BIT(0) diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index 56e2e177644d..cfd33e6ca77f 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -12,15 +12,16 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ + #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/platform_device.h> #include "denali.h" @@ -155,7 +156,6 @@ static struct platform_driver denali_dt_driver = { .of_match_table = denali_nand_dt_ids, }, }; - module_platform_driver(denali_dt_driver); MODULE_LICENSE("GPL"); diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index 81370c79aa48..57fb7ae31412 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c @@ -11,6 +11,9 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. */ + +#include <linux/errno.h> +#include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> @@ -106,7 +109,6 @@ failed_remap_reg: return ret; } -/* driver exit point */ static void denali_pci_remove(struct pci_dev *dev) { struct denali_nand_info *denali = pci_get_drvdata(dev); @@ -122,5 +124,4 @@ static struct pci_driver denali_pci_driver = { .probe = denali_pci_probe, .remove = denali_pci_remove, }; - module_pci_driver(denali_pci_driver); diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c index c3aa53caab5c..72671dc52e2e 100644 --- a/drivers/mtd/nand/diskonchip.c +++ b/drivers/mtd/nand/diskonchip.c @@ -705,8 +705,7 @@ static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int colu if (page_addr != -1) { WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress); WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress); - /* One more address cycle for higher density devices */ - if (this->chipsize & 0x0c000000) { + if (this->options & NAND_ROW_ADDR_3) { WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress); printk("high density\n"); } diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c index fd3648952b5a..484f7fbc3f7d 100644 --- a/drivers/mtd/nand/gpio.c +++ b/drivers/mtd/nand/gpio.c @@ -23,7 +23,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/io.h> #include <linux/mtd/mtd.h> #include <linux/mtd/rawnand.h> @@ -31,12 +31,16 @@ #include <linux/mtd/nand-gpio.h> #include <linux/of.h> #include <linux/of_address.h> -#include <linux/of_gpio.h> struct gpiomtd { void __iomem *io_sync; struct nand_chip nand_chip; struct gpio_nand_platdata plat; + struct gpio_desc *nce; /* Optional chip enable */ + struct gpio_desc *cle; + struct gpio_desc *ale; + struct gpio_desc *rdy; + struct gpio_desc *nwp; /* Optional write protection */ }; static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd) @@ -78,11 +82,10 @@ static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) gpio_nand_dosync(gpiomtd); if (ctrl & NAND_CTRL_CHANGE) { - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) - gpio_set_value(gpiomtd->plat.gpio_nce, - !(ctrl & NAND_NCE)); - gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE)); - gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE)); + if (gpiomtd->nce) + gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE)); + gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE)); + gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE)); gpio_nand_dosync(gpiomtd); } if (cmd == NAND_CMD_NONE) @@ -96,7 +99,7 @@ static int gpio_nand_devready(struct mtd_info *mtd) { struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd); - return gpio_get_value(gpiomtd->plat.gpio_rdy); + return gpiod_get_value(gpiomtd->rdy); } #ifdef CONFIG_OF @@ -123,12 +126,6 @@ static int gpio_nand_get_config_of(const struct device *dev, } } - plat->gpio_rdy = of_get_gpio(dev->of_node, 0); - plat->gpio_nce = of_get_gpio(dev->of_node, 1); - plat->gpio_ale = of_get_gpio(dev->of_node, 2); - plat->gpio_cle = of_get_gpio(dev->of_node, 3); - plat->gpio_nwp = of_get_gpio(dev->of_node, 4); - if (!of_property_read_u32(dev->of_node, "chip-delay", &val)) plat->chip_delay = val; @@ -201,10 +198,11 @@ static int gpio_nand_remove(struct platform_device *pdev) nand_release(nand_to_mtd(&gpiomtd->nand_chip)); - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) - gpio_set_value(gpiomtd->plat.gpio_nwp, 0); - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) - gpio_set_value(gpiomtd->plat.gpio_nce, 1); + /* Enable write protection and disable the chip */ + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) + gpiod_set_value(gpiomtd->nwp, 0); + if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) + gpiod_set_value(gpiomtd->nce, 0); return 0; } @@ -215,66 +213,66 @@ static int gpio_nand_probe(struct platform_device *pdev) struct nand_chip *chip; struct mtd_info *mtd; struct resource *res; + struct device *dev = &pdev->dev; int ret = 0; - if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev)) + if (!dev->of_node && !dev_get_platdata(dev)) return -EINVAL; - gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL); + gpiomtd = devm_kzalloc(dev, sizeof(*gpiomtd), GFP_KERNEL); if (!gpiomtd) return -ENOMEM; chip = &gpiomtd->nand_chip; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res); + chip->IO_ADDR_R = devm_ioremap_resource(dev, res); if (IS_ERR(chip->IO_ADDR_R)) return PTR_ERR(chip->IO_ADDR_R); res = gpio_nand_get_io_sync(pdev); if (res) { - gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res); + gpiomtd->io_sync = devm_ioremap_resource(dev, res); if (IS_ERR(gpiomtd->io_sync)) return PTR_ERR(gpiomtd->io_sync); } - ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat); + ret = gpio_nand_get_config(dev, &gpiomtd->plat); if (ret) return ret; - if (gpio_is_valid(gpiomtd->plat.gpio_nce)) { - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, - "NAND NCE"); - if (ret) - return ret; - gpio_direction_output(gpiomtd->plat.gpio_nce, 1); + /* Just enable the chip */ + gpiomtd->nce = devm_gpiod_get_optional(dev, "nce", GPIOD_OUT_HIGH); + if (IS_ERR(gpiomtd->nce)) + return PTR_ERR(gpiomtd->nce); + + /* We disable write protection once we know probe() will succeed */ + gpiomtd->nwp = devm_gpiod_get_optional(dev, "nwp", GPIOD_OUT_LOW); + if (IS_ERR(gpiomtd->nwp)) { + ret = PTR_ERR(gpiomtd->nwp); + goto out_ce; } - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) { - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp, - "NAND NWP"); - if (ret) - return ret; + gpiomtd->nwp = devm_gpiod_get(dev, "ale", GPIOD_OUT_LOW); + if (IS_ERR(gpiomtd->nwp)) { + ret = PTR_ERR(gpiomtd->nwp); + goto out_ce; } - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE"); - if (ret) - return ret; - gpio_direction_output(gpiomtd->plat.gpio_ale, 0); + gpiomtd->cle = devm_gpiod_get(dev, "cle", GPIOD_OUT_LOW); + if (IS_ERR(gpiomtd->cle)) { + ret = PTR_ERR(gpiomtd->cle); + goto out_ce; + } - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE"); - if (ret) - return ret; - gpio_direction_output(gpiomtd->plat.gpio_cle, 0); - - if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) { - ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy, - "NAND RDY"); - if (ret) - return ret; - gpio_direction_input(gpiomtd->plat.gpio_rdy); - chip->dev_ready = gpio_nand_devready; + gpiomtd->rdy = devm_gpiod_get_optional(dev, "rdy", GPIOD_IN); + if (IS_ERR(gpiomtd->rdy)) { + ret = PTR_ERR(gpiomtd->rdy); + goto out_ce; } + /* Using RDY pin */ + if (gpiomtd->rdy) + chip->dev_ready = gpio_nand_devready; nand_set_flash_node(chip, pdev->dev.of_node); chip->IO_ADDR_W = chip->IO_ADDR_R; @@ -285,12 +283,13 @@ static int gpio_nand_probe(struct platform_device *pdev) chip->cmd_ctrl = gpio_nand_cmd_ctrl; mtd = nand_to_mtd(chip); - mtd->dev.parent = &pdev->dev; + mtd->dev.parent = dev; platform_set_drvdata(pdev, gpiomtd); - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) - gpio_direction_output(gpiomtd->plat.gpio_nwp, 1); + /* Disable write protection, if wired up */ + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) + gpiod_direction_output(gpiomtd->nwp, 1); ret = nand_scan(mtd, 1); if (ret) @@ -305,8 +304,11 @@ static int gpio_nand_probe(struct platform_device *pdev) return 0; err_wp: - if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) - gpio_set_value(gpiomtd->plat.gpio_nwp, 0); + if (gpiomtd->nwp && !IS_ERR(gpiomtd->nwp)) + gpiod_set_value(gpiomtd->nwp, 0); +out_ce: + if (gpiomtd->nce && !IS_ERR(gpiomtd->nce)) + gpiod_set_value(gpiomtd->nce, 0); return ret; } diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index d9ee1a7e6956..0897261c3e17 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c @@ -432,8 +432,7 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr) host->addr_value[0] |= (page_addr & 0xffff) << (host->addr_cycle * 8); host->addr_cycle += 2; - /* One more address cycle for devices > 128MiB */ - if (chip->chipsize > (128 << 20)) { + if (chip->options & NAND_ROW_ADDR_3) { host->addr_cycle += 1; if (host->command == NAND_CMD_ERASE1) host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16; diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index 7f3b065b6b8f..c51d214d169e 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c @@ -115,6 +115,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) op = ECC_DECODE; dec = readw(ecc->regs + ECC_DECDONE); if (dec & ecc->sectors) { + /* + * Clear decode IRQ status once again to ensure that + * there will be no extra IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); ecc->sectors = 0; complete(&ecc->done); } else { @@ -130,8 +135,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) } } - writel(0, ecc->regs + ECC_IRQ_REG(op)); - return IRQ_HANDLED; } @@ -307,6 +310,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc) /* disable it */ mtk_ecc_wait_idle(ecc, op); + if (op == ECC_DECODE) + /* + * Clear decode IRQ status in case there is a timeout to wait + * decode IRQ. + */ + readw(ecc->regs + ECC_DECIRQ_STA); writew(0, ecc->regs + ECC_IRQ_REG(op)); writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 53e5e0337c3e..f3be0b2a8869 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -415,7 +415,7 @@ static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq) * waits for completion. */ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) { - pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq); + dev_dbg(host->dev, "send_cmd(host, 0x%x, %d)\n", cmd, useirq); writew(cmd, NFC_V1_V2_FLASH_CMD); writew(NFC_CMD, NFC_V1_V2_CONFIG2); @@ -431,7 +431,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq) udelay(1); } if (max_retries < 0) - pr_debug("%s: RESET failed\n", __func__); + dev_dbg(host->dev, "%s: RESET failed\n", __func__); } else { /* Wait for operation to complete */ wait_op_done(host, useirq); @@ -454,7 +454,7 @@ static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast) * a NAND command. */ static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast) { - pr_debug("send_addr(host, 0x%x %d)\n", addr, islast); + dev_dbg(host->dev, "send_addr(host, 0x%x %d)\n", addr, islast); writew(addr, NFC_V1_V2_FLASH_ADDR); writew(NFC_ADDR, NFC_V1_V2_CONFIG2); @@ -607,7 +607,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat, uint16_t ecc_status = get_ecc_status_v1(host); if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { - pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); + dev_dbg(host->dev, "HWECC uncorrectable 2-bit ECC error\n"); return -EBADMSG; } @@ -634,7 +634,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, do { err = ecc_stat & ecc_bit_mask; if (err > err_limit) { - printk(KERN_WARNING "UnCorrectable RS-ECC Error\n"); + dev_dbg(host->dev, "UnCorrectable RS-ECC Error\n"); return -EBADMSG; } else { ret += err; @@ -642,7 +642,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat, ecc_stat >>= 4; } while (--no_subpages); - pr_debug("%d Symbol Correctable RS-ECC Error\n", ret); + dev_dbg(host->dev, "%d Symbol Correctable RS-ECC Error\n", ret); return ret; } @@ -673,7 +673,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd) host->buf_start++; } - pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); + dev_dbg(host->dev, "%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start); return ret; } @@ -859,8 +859,7 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr) host->devtype_data->send_addr(host, (page_addr >> 8) & 0xff, true); } else { - /* One more address cycle for higher density devices */ - if (mtd->size >= 0x4000000) { + if (nand_chip->options & NAND_ROW_ADDR_3) { /* paddr_8 - paddr_15 */ host->devtype_data->send_addr(host, (page_addr >> 8) & 0xff, @@ -1212,7 +1211,7 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command, struct nand_chip *nand_chip = mtd_to_nand(mtd); struct mxc_nand_host *host = nand_get_controller_data(nand_chip); - pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", + dev_dbg(host->dev, "mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n", command, column, page_addr); /* Reset command state information */ diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 12edaae17d81..6135d007a068 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -115,7 +115,7 @@ static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section, struct nand_chip *chip = mtd_to_nand(mtd); struct nand_ecc_ctrl *ecc = &chip->ecc; - if (section) + if (section || !ecc->total) return -ERANGE; oobregion->length = ecc->total; @@ -727,8 +727,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, chip->cmd_ctrl(mtd, page_addr, ctrl); ctrl &= ~NAND_CTRL_CHANGE; chip->cmd_ctrl(mtd, page_addr >> 8, ctrl); - /* One more address cycle for devices > 32MiB */ - if (chip->chipsize > (32 << 20)) + if (chip->options & NAND_ROW_ADDR_3) chip->cmd_ctrl(mtd, page_addr >> 16, ctrl); } chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE); @@ -854,8 +853,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, chip->cmd_ctrl(mtd, page_addr, ctrl); chip->cmd_ctrl(mtd, page_addr >> 8, NAND_NCE | NAND_ALE); - /* One more address cycle for devices > 128MiB */ - if (chip->chipsize > (128 << 20)) + if (chip->options & NAND_ROW_ADDR_3) chip->cmd_ctrl(mtd, page_addr >> 16, NAND_NCE | NAND_ALE); } @@ -1246,6 +1244,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) return 0; } +EXPORT_SYMBOL_GPL(nand_reset); /** * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data @@ -2799,15 +2798,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const uint8_t *buf) { struct nand_chip *chip = mtd_to_nand(mtd); + int chipnr = (int)(to >> chip->chip_shift); struct mtd_oob_ops ops; int ret; - /* Wait for the device to get ready */ - panic_nand_wait(mtd, chip, 400); - /* Grab the device */ panic_nand_get_device(chip, mtd, FL_WRITING); + chip->select_chip(mtd, chipnr); + + /* Wait for the device to get ready */ + panic_nand_wait(mtd, chip, 400); + memset(&ops, 0, sizeof(ops)); ops.len = len; ops.datbuf = (uint8_t *)buf; @@ -3999,6 +4001,9 @@ ident_done: chip->chip_shift += 32 - 1; } + if (chip->chip_shift - chip->page_shift > 16) + chip->options |= NAND_ROW_ADDR_3; + chip->badblockbits = 8; chip->erase = single_erase; @@ -4700,6 +4705,19 @@ int nand_scan_tail(struct mtd_info *mtd) mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops); break; default: + /* + * Expose the whole OOB area to users if ECC_NONE + * is passed. We could do that for all kind of + * ->oobsize, but we must keep the old large/small + * page with ECC layout when ->oobsize <= 128 for + * compatibility reasons. + */ + if (ecc->mode == NAND_ECC_NONE) { + mtd_set_ooblayout(mtd, + &nand_ooblayout_lp_ops); + break; + } + WARN(1, "No oob scheme defined for oobsize %d\n", mtd->oobsize); ret = -EINVAL; diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 246b4393118e..44322a363ba5 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -520,11 +520,16 @@ static int nandsim_debugfs_create(struct nandsim *dev) struct dentry *root = nsmtd->dbg.dfs_dir; struct dentry *dent; - if (!IS_ENABLED(CONFIG_DEBUG_FS)) + /* + * Just skip debugfs initialization when the debugfs directory is + * missing. + */ + if (IS_ERR_OR_NULL(root)) { + if (IS_ENABLED(CONFIG_DEBUG_FS) && + !IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) + NS_WARN("CONFIG_MTD_PARTITIONED_MASTER must be enabled to expose debugfs stuff\n"); return 0; - - if (IS_ERR_OR_NULL(root)) - return -1; + } dent = debugfs_create_file("nandsim_wear_report", S_IRUSR, root, dev, &dfs_fops); diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c index 7bb4d2ea9342..af5b32c9a791 100644 --- a/drivers/mtd/nand/nuc900_nand.c +++ b/drivers/mtd/nand/nuc900_nand.c @@ -154,7 +154,7 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, if (page_addr != -1) { write_addr_reg(nand, page_addr); - if (chip->chipsize > (128 << 20)) { + if (chip->options & NAND_ROW_ADDR_3) { write_addr_reg(nand, page_addr >> 8); write_addr_reg(nand, page_addr >> 16 | ENDADDR); } else { diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 54540c8fa1a2..dad438c4906a 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2, 0x97, 0x79, 0xe5, 0x24, 0xb5}; /** - * omap_calculate_ecc_bch - Generate bytes of ECC bytes + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector * @mtd: MTD device structure * @dat: The pointer to data on which ecc is computed * @ecc_code: The ecc_code buffer + * @i: The sector number (for a multi sector page) * - * Support calculating of BCH4/8 ecc vectors for the page + * Support calculating of BCH4/8/16 ECC vectors for one sector + * within a page. Sector number is in @i. */ -static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd, - const u_char *dat, u_char *ecc_calc) +static int _omap_calculate_ecc_bch(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc, int i) { struct omap_nand_info *info = mtd_to_omap(mtd); int eccbytes = info->nand.ecc.bytes; struct gpmc_nand_regs *gpmc_regs = &info->reg; u8 *ecc_code; - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4; + unsigned long bch_val1, bch_val2, bch_val3, bch_val4; u32 val; - int i, j; + int j; + + ecc_code = ecc_calc; + switch (info->ecc_opt) { + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH8_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); + *ecc_code++ = (bch_val4 & 0xFF); + *ecc_code++ = ((bch_val3 >> 24) & 0xFF); + *ecc_code++ = ((bch_val3 >> 16) & 0xFF); + *ecc_code++ = ((bch_val3 >> 8) & 0xFF); + *ecc_code++ = (bch_val3 & 0xFF); + *ecc_code++ = ((bch_val2 >> 24) & 0xFF); + *ecc_code++ = ((bch_val2 >> 16) & 0xFF); + *ecc_code++ = ((bch_val2 >> 8) & 0xFF); + *ecc_code++ = (bch_val2 & 0xFF); + *ecc_code++ = ((bch_val1 >> 24) & 0xFF); + *ecc_code++ = ((bch_val1 >> 16) & 0xFF); + *ecc_code++ = ((bch_val1 >> 8) & 0xFF); + *ecc_code++ = (bch_val1 & 0xFF); + break; + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + case OMAP_ECC_BCH4_CODE_HW: + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); + *ecc_code++ = ((bch_val2 >> 12) & 0xFF); + *ecc_code++ = ((bch_val2 >> 4) & 0xFF); + *ecc_code++ = ((bch_val2 & 0xF) << 4) | + ((bch_val1 >> 28) & 0xF); + *ecc_code++ = ((bch_val1 >> 20) & 0xFF); + *ecc_code++ = ((bch_val1 >> 12) & 0xFF); + *ecc_code++ = ((bch_val1 >> 4) & 0xFF); + *ecc_code++ = ((bch_val1 & 0xF) << 4); + break; + case OMAP_ECC_BCH16_CODE_HW: + val = readl(gpmc_regs->gpmc_bch_result6[i]); + ecc_code[0] = ((val >> 8) & 0xFF); + ecc_code[1] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result5[i]); + ecc_code[2] = ((val >> 24) & 0xFF); + ecc_code[3] = ((val >> 16) & 0xFF); + ecc_code[4] = ((val >> 8) & 0xFF); + ecc_code[5] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result4[i]); + ecc_code[6] = ((val >> 24) & 0xFF); + ecc_code[7] = ((val >> 16) & 0xFF); + ecc_code[8] = ((val >> 8) & 0xFF); + ecc_code[9] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result3[i]); + ecc_code[10] = ((val >> 24) & 0xFF); + ecc_code[11] = ((val >> 16) & 0xFF); + ecc_code[12] = ((val >> 8) & 0xFF); + ecc_code[13] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result2[i]); + ecc_code[14] = ((val >> 24) & 0xFF); + ecc_code[15] = ((val >> 16) & 0xFF); + ecc_code[16] = ((val >> 8) & 0xFF); + ecc_code[17] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result1[i]); + ecc_code[18] = ((val >> 24) & 0xFF); + ecc_code[19] = ((val >> 16) & 0xFF); + ecc_code[20] = ((val >> 8) & 0xFF); + ecc_code[21] = ((val >> 0) & 0xFF); + val = readl(gpmc_regs->gpmc_bch_result0[i]); + ecc_code[22] = ((val >> 24) & 0xFF); + ecc_code[23] = ((val >> 16) & 0xFF); + ecc_code[24] = ((val >> 8) & 0xFF); + ecc_code[25] = ((val >> 0) & 0xFF); + break; + default: + return -EINVAL; + } + + /* ECC scheme specific syndrome customizations */ + switch (info->ecc_opt) { + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch4_polynomial[j]; + break; + case OMAP_ECC_BCH4_CODE_HW: + /* Set 8th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: + /* Add constant polynomial to remainder, so that + * ECC of blank pages results in 0x0 on reading back + */ + for (j = 0; j < eccbytes; j++) + ecc_calc[j] ^= bch8_polynomial[j]; + break; + case OMAP_ECC_BCH8_CODE_HW: + /* Set 14th ECC byte as 0x0 for ROM compatibility */ + ecc_calc[eccbytes - 1] = 0x0; + break; + case OMAP_ECC_BCH16_CODE_HW: + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used + * when SW based correction is required as ECC is required for one sector + * at a time. + */ +static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0); +} + +/** + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors + * @mtd: MTD device structure + * @dat: The pointer to data on which ecc is computed + * @ecc_code: The ecc_code buffer + * + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go. + */ +static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd, + const u_char *dat, u_char *ecc_calc) +{ + struct omap_nand_info *info = mtd_to_omap(mtd); + int eccbytes = info->nand.ecc.bytes; + unsigned long nsectors; + int i, ret; nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1; for (i = 0; i < nsectors; i++) { - ecc_code = ecc_calc; - switch (info->ecc_opt) { - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH8_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]); - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]); - *ecc_code++ = (bch_val4 & 0xFF); - *ecc_code++ = ((bch_val3 >> 24) & 0xFF); - *ecc_code++ = ((bch_val3 >> 16) & 0xFF); - *ecc_code++ = ((bch_val3 >> 8) & 0xFF); - *ecc_code++ = (bch_val3 & 0xFF); - *ecc_code++ = ((bch_val2 >> 24) & 0xFF); - *ecc_code++ = ((bch_val2 >> 16) & 0xFF); - *ecc_code++ = ((bch_val2 >> 8) & 0xFF); - *ecc_code++ = (bch_val2 & 0xFF); - *ecc_code++ = ((bch_val1 >> 24) & 0xFF); - *ecc_code++ = ((bch_val1 >> 16) & 0xFF); - *ecc_code++ = ((bch_val1 >> 8) & 0xFF); - *ecc_code++ = (bch_val1 & 0xFF); - break; - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - case OMAP_ECC_BCH4_CODE_HW: - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]); - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]); - *ecc_code++ = ((bch_val2 >> 12) & 0xFF); - *ecc_code++ = ((bch_val2 >> 4) & 0xFF); - *ecc_code++ = ((bch_val2 & 0xF) << 4) | - ((bch_val1 >> 28) & 0xF); - *ecc_code++ = ((bch_val1 >> 20) & 0xFF); - *ecc_code++ = ((bch_val1 >> 12) & 0xFF); - *ecc_code++ = ((bch_val1 >> 4) & 0xFF); - *ecc_code++ = ((bch_val1 & 0xF) << 4); - break; - case OMAP_ECC_BCH16_CODE_HW: - val = readl(gpmc_regs->gpmc_bch_result6[i]); - ecc_code[0] = ((val >> 8) & 0xFF); - ecc_code[1] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result5[i]); - ecc_code[2] = ((val >> 24) & 0xFF); - ecc_code[3] = ((val >> 16) & 0xFF); - ecc_code[4] = ((val >> 8) & 0xFF); - ecc_code[5] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result4[i]); - ecc_code[6] = ((val >> 24) & 0xFF); - ecc_code[7] = ((val >> 16) & 0xFF); - ecc_code[8] = ((val >> 8) & 0xFF); - ecc_code[9] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result3[i]); - ecc_code[10] = ((val >> 24) & 0xFF); - ecc_code[11] = ((val >> 16) & 0xFF); - ecc_code[12] = ((val >> 8) & 0xFF); - ecc_code[13] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result2[i]); - ecc_code[14] = ((val >> 24) & 0xFF); - ecc_code[15] = ((val >> 16) & 0xFF); - ecc_code[16] = ((val >> 8) & 0xFF); - ecc_code[17] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result1[i]); - ecc_code[18] = ((val >> 24) & 0xFF); - ecc_code[19] = ((val >> 16) & 0xFF); - ecc_code[20] = ((val >> 8) & 0xFF); - ecc_code[21] = ((val >> 0) & 0xFF); - val = readl(gpmc_regs->gpmc_bch_result0[i]); - ecc_code[22] = ((val >> 24) & 0xFF); - ecc_code[23] = ((val >> 16) & 0xFF); - ecc_code[24] = ((val >> 8) & 0xFF); - ecc_code[25] = ((val >> 0) & 0xFF); - break; - default: - return -EINVAL; - } - - /* ECC scheme specific syndrome customizations */ - switch (info->ecc_opt) { - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch4_polynomial[j]; - break; - case OMAP_ECC_BCH4_CODE_HW: - /* Set 8th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: - /* Add constant polynomial to remainder, so that - * ECC of blank pages results in 0x0 on reading back */ - for (j = 0; j < eccbytes; j++) - ecc_calc[j] ^= bch8_polynomial[j]; - break; - case OMAP_ECC_BCH8_CODE_HW: - /* Set 14th ECC byte as 0x0 for ROM compatibility */ - ecc_calc[eccbytes - 1] = 0x0; - break; - case OMAP_ECC_BCH16_CODE_HW: - break; - default: - return -EINVAL; - } + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i); + if (ret) + return ret; - ecc_calc += eccbytes; + ecc_calc += eccbytes; } return 0; @@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->write_buf(mtd, buf, mtd->writesize); /* Update ecc vector from GPMC result registers */ - chip->ecc.calculate(mtd, buf, &ecc_calc[0]); + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]); ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, chip->ecc.total); @@ -1509,6 +1552,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip, } /** + * omap_write_subpage_bch - BCH hardware ECC based subpage write + * @mtd: mtd info structure + * @chip: nand chip info structure + * @offset: column address of subpage within the page + * @data_len: data length + * @buf: data buffer + * @oob_required: must write chip->oob_poi to OOB + * @page: page number to write + * + * OMAP optimized subpage write method. + */ +static int omap_write_subpage_bch(struct mtd_info *mtd, + struct nand_chip *chip, u32 offset, + u32 data_len, const u8 *buf, + int oob_required, int page) +{ + u8 *ecc_calc = chip->buffers->ecccalc; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int ecc_steps = chip->ecc.steps; + u32 start_step = offset / ecc_size; + u32 end_step = (offset + data_len - 1) / ecc_size; + int step, ret = 0; + + /* + * Write entire page at one go as it would be optimal + * as ECC is calculated by hardware. + * ECC is calculated for all subpages but we choose + * only what we want. + */ + + /* Enable GPMC ECC engine */ + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + + /* Write data */ + chip->write_buf(mtd, buf, mtd->writesize); + + for (step = 0; step < ecc_steps; step++) { + /* mask ECC of un-touched subpages by padding 0xFF */ + if (step < start_step || step > end_step) + memset(ecc_calc, 0xff, ecc_bytes); + else + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step); + + if (ret) + return ret; + + buf += ecc_size; + ecc_calc += ecc_bytes; + } + + /* copy calculated ECC for whole page to chip->buffer->oob */ + /* this include masked-value(0xFF) for unwritten subpages */ + ecc_calc = chip->buffers->ecccalc; + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0, + chip->ecc.total); + if (ret) + return ret; + + /* write OOB buffer to NAND device */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + +/** * omap_read_page_bch - BCH ecc based page read function for entire page * @mtd: mtd info structure * @chip: nand chip info structure @@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip, chip->ecc.total); /* Calculate ecc bytes */ - chip->ecc.calculate(mtd, buf, ecc_calc); + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc); ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0, chip->ecc.total); @@ -1588,8 +1697,7 @@ static bool is_elm_present(struct omap_nand_info *info, return true; } -static bool omap2_nand_ecc_check(struct omap_nand_info *info, - struct omap_nand_platform_data *pdata) +static bool omap2_nand_ecc_check(struct omap_nand_info *info) { bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm; @@ -1804,7 +1912,6 @@ static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = { static int omap_nand_probe(struct platform_device *pdev) { struct omap_nand_info *info; - struct omap_nand_platform_data *pdata = NULL; struct mtd_info *mtd; struct nand_chip *nand_chip; int err; @@ -1821,29 +1928,10 @@ static int omap_nand_probe(struct platform_device *pdev) info->pdev = pdev; - if (dev->of_node) { - if (omap_get_dt_info(dev, info)) - return -EINVAL; - } else { - pdata = dev_get_platdata(&pdev->dev); - if (!pdata) { - dev_err(&pdev->dev, "platform data missing\n"); - return -EINVAL; - } - - info->gpmc_cs = pdata->cs; - info->reg = pdata->reg; - info->ecc_opt = pdata->ecc_opt; - if (pdata->dev_ready) - dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n"); - - info->xfer_type = pdata->xfer_type; - info->devsize = pdata->devsize; - info->elm_of_node = pdata->elm_of_node; - info->flash_bbt = pdata->flash_bbt; - } + err = omap_get_dt_info(dev, info); + if (err) + return err; - platform_set_drvdata(pdev, info); info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs); if (!info->ops) { dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n"); @@ -2002,7 +2090,7 @@ static int omap_nand_probe(struct platform_device *pdev) goto return_error; } - if (!omap2_nand_ecc_check(info, pdata)) { + if (!omap2_nand_ecc_check(info)) { err = -EINVAL; goto return_error; } @@ -2044,7 +2132,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2066,9 +2154,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 4; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2087,7 +2175,7 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = nand_bch_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw; mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops); /* Reserve one byte for the OMAP marker */ oobbytes_per_step = nand_chip->ecc.bytes + 1; @@ -2109,9 +2197,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 8; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2131,9 +2219,9 @@ static int omap_nand_probe(struct platform_device *pdev) nand_chip->ecc.strength = 16; nand_chip->ecc.hwctl = omap_enable_hwecc_bch; nand_chip->ecc.correct = omap_elm_correct_data; - nand_chip->ecc.calculate = omap_calculate_ecc_bch; nand_chip->ecc.read_page = omap_read_page_bch; nand_chip->ecc.write_page = omap_write_page_bch; + nand_chip->ecc.write_subpage = omap_write_subpage_bch; mtd_set_ooblayout(mtd, &omap_ooblayout_ops); oobbytes_per_step = nand_chip->ecc.bytes; @@ -2167,10 +2255,9 @@ scan_tail: if (err) goto return_error; - if (dev->of_node) - mtd_device_register(mtd, NULL, 0); - else - mtd_device_register(mtd, pdata->parts, pdata->nr_parts); + err = mtd_device_register(mtd, NULL, 0); + if (err) + goto return_error; platform_set_drvdata(pdev, mtd); diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 85cff68643e0..90b9a9ccbe60 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -30,6 +30,8 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_data/mtd-nand-pxa3xx.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #define CHIP_DELAY_TIMEOUT msecs_to_jiffies(200) #define NAND_STOP_DELAY msecs_to_jiffies(40) @@ -45,6 +47,10 @@ */ #define INIT_BUFFER_SIZE 2048 +/* System control register and bit to enable NAND on some SoCs */ +#define GENCONF_SOC_DEVICE_MUX 0x208 +#define GENCONF_SOC_DEVICE_MUX_NFC_EN BIT(0) + /* registers and bit definitions */ #define NDCR (0x00) /* Control register */ #define NDTR0CS0 (0x04) /* Timing Parameter 0 for CS0 */ @@ -174,6 +180,7 @@ enum { enum pxa3xx_nand_variant { PXA3XX_NAND_VARIANT_PXA, PXA3XX_NAND_VARIANT_ARMADA370, + PXA3XX_NAND_VARIANT_ARMADA_8K, }; struct pxa3xx_nand_host { @@ -425,6 +432,10 @@ static const struct of_device_id pxa3xx_nand_dt_ids[] = { .compatible = "marvell,armada370-nand", .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370, }, + { + .compatible = "marvell,armada-8k-nand", + .data = (void *)PXA3XX_NAND_VARIANT_ARMADA_8K, + }, {} }; MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids); @@ -825,7 +836,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) info->retcode = ERR_UNCORERR; if (status & NDSR_CORERR) { info->retcode = ERR_CORERR; - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 && + if ((info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) && info->ecc_bch) info->ecc_err_cnt = NDSR_ERR_CNT(status); else @@ -888,7 +900,8 @@ static irqreturn_t pxa3xx_nand_irq(int irq, void *devid) nand_writel(info, NDCB0, info->ndcb2); /* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */ - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) nand_writel(info, NDCB0, info->ndcb3); } @@ -1671,7 +1684,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) chip->options |= NAND_BUSWIDTH_16; /* Device detection must be done with ECC disabled */ - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) nand_writel(info, NDECCCTRL, 0x0); if (pdata->flash_bbt) @@ -1709,7 +1723,8 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) * (aka splitted) command handling, */ if (mtd->writesize > PAGE_CHUNK_SIZE) { - if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) { + if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 || + info->variant == PXA3XX_NAND_VARIANT_ARMADA_8K) { chip->cmdfunc = nand_cmdfunc_extended; } else { dev_err(&info->pdev->dev, @@ -1928,6 +1943,24 @@ static int pxa3xx_nand_probe_dt(struct platform_device *pdev) if (!of_id) return 0; + /* + * Some SoCs like A7k/A8k need to enable manually the NAND + * controller to avoid being bootloader dependent. This is done + * through the use of a single bit in the System Functions registers. + */ + if (pxa3xx_nand_get_variant(pdev) == PXA3XX_NAND_VARIANT_ARMADA_8K) { + struct regmap *sysctrl_base = syscon_regmap_lookup_by_phandle( + pdev->dev.of_node, "marvell,system-controller"); + u32 reg; + + if (IS_ERR(sysctrl_base)) + return PTR_ERR(sysctrl_base); + + regmap_read(sysctrl_base, GENCONF_SOC_DEVICE_MUX, ®); + reg |= GENCONF_SOC_DEVICE_MUX_NFC_EN; + regmap_write(sysctrl_base, GENCONF_SOC_DEVICE_MUX, reg); + } + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return -ENOMEM; diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index 3baddfc997d1..2656c1ac5646 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c @@ -22,6 +22,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/delay.h> +#include <linux/dma/qcom_bam_dma.h> /* NANDc reg offsets */ #define NAND_FLASH_CMD 0x00 @@ -199,6 +200,15 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ */ #define dev_cmd_reg_addr(nandc, reg) ((nandc)->props->dev_cmd_reg_start + (reg)) +/* Returns the NAND register physical address */ +#define nandc_reg_phys(chip, offset) ((chip)->base_phys + (offset)) + +/* Returns the dma address for reg read buffer */ +#define reg_buf_dma_addr(chip, vaddr) \ + ((chip)->reg_read_dma + \ + ((uint8_t *)(vaddr) - (uint8_t *)(chip)->reg_read_buf)) + +#define QPIC_PER_CW_CMD_ELEMENTS 32 #define QPIC_PER_CW_CMD_SGL 32 #define QPIC_PER_CW_DATA_SGL 8 @@ -221,8 +231,13 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ /* * This data type corresponds to the BAM transaction which will be used for all * NAND transfers. + * @bam_ce - the array of BAM command elements * @cmd_sgl - sgl for NAND BAM command pipe * @data_sgl - sgl for NAND BAM consumer/producer pipe + * @bam_ce_pos - the index in bam_ce which is available for next sgl + * @bam_ce_start - the index in bam_ce which marks the start position ce + * for current sgl. It will be used for size calculation + * for current sgl * @cmd_sgl_pos - current index in command sgl. * @cmd_sgl_start - start index in command sgl. * @tx_sgl_pos - current index in data sgl for tx. @@ -231,8 +246,11 @@ nandc_set_reg(nandc, NAND_READ_LOCATION_##reg, \ * @rx_sgl_start - start index in data sgl for rx. */ struct bam_transaction { + struct bam_cmd_element *bam_ce; struct scatterlist *cmd_sgl; struct scatterlist *data_sgl; + u32 bam_ce_pos; + u32 bam_ce_start; u32 cmd_sgl_pos; u32 cmd_sgl_start; u32 tx_sgl_pos; @@ -307,7 +325,8 @@ struct nandc_regs { * controller * @dev: parent device * @base: MMIO base - * @base_dma: physical base address of controller registers + * @base_phys: physical base address of controller registers + * @base_dma: dma base address of controller registers * @core_clk: controller clock * @aon_clk: another controller clock * @@ -340,6 +359,7 @@ struct qcom_nand_controller { struct device *dev; void __iomem *base; + phys_addr_t base_phys; dma_addr_t base_dma; struct clk *core_clk; @@ -462,7 +482,8 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) bam_txn_size = sizeof(*bam_txn) + num_cw * - ((sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + + ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + + (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); bam_txn_buf = devm_kzalloc(nandc->dev, bam_txn_size, GFP_KERNEL); @@ -472,6 +493,10 @@ alloc_bam_transaction(struct qcom_nand_controller *nandc) bam_txn = bam_txn_buf; bam_txn_buf += sizeof(*bam_txn); + bam_txn->bam_ce = bam_txn_buf; + bam_txn_buf += + sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS * num_cw; + bam_txn->cmd_sgl = bam_txn_buf; bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL * num_cw; @@ -489,6 +514,8 @@ static void clear_bam_transaction(struct qcom_nand_controller *nandc) if (!nandc->props->is_bam) return; + bam_txn->bam_ce_pos = 0; + bam_txn->bam_ce_start = 0; bam_txn->cmd_sgl_pos = 0; bam_txn->cmd_sgl_start = 0; bam_txn->tx_sgl_pos = 0; @@ -734,6 +761,66 @@ static int prepare_bam_async_desc(struct qcom_nand_controller *nandc, } /* + * Prepares the command descriptor for BAM DMA which will be used for NAND + * register reads and writes. The command descriptor requires the command + * to be formed in command element type so this function uses the command + * element from bam transaction ce array and fills the same with required + * data. A single SGL can contain multiple command elements so + * NAND_BAM_NEXT_SGL will be used for starting the separate SGL + * after the current command element. + */ +static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, + int reg_off, const void *vaddr, + int size, unsigned int flags) +{ + int bam_ce_size; + int i, ret; + struct bam_cmd_element *bam_ce_buffer; + struct bam_transaction *bam_txn = nandc->bam_txn; + + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; + + /* fill the command desc */ + for (i = 0; i < size; i++) { + if (read) + bam_prep_ce(&bam_ce_buffer[i], + nandc_reg_phys(nandc, reg_off + 4 * i), + BAM_READ_COMMAND, + reg_buf_dma_addr(nandc, + (__le32 *)vaddr + i)); + else + bam_prep_ce_le32(&bam_ce_buffer[i], + nandc_reg_phys(nandc, reg_off + 4 * i), + BAM_WRITE_COMMAND, + *((__le32 *)vaddr + i)); + } + + bam_txn->bam_ce_pos += size; + + /* use the separate sgl after this command */ + if (flags & NAND_BAM_NEXT_SGL) { + bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; + bam_ce_size = (bam_txn->bam_ce_pos - + bam_txn->bam_ce_start) * + sizeof(struct bam_cmd_element); + sg_set_buf(&bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], + bam_ce_buffer, bam_ce_size); + bam_txn->cmd_sgl_pos++; + bam_txn->bam_ce_start = bam_txn->bam_ce_pos; + + if (flags & NAND_BAM_NWD) { + ret = prepare_bam_async_desc(nandc, nandc->cmd_chan, + DMA_PREP_FENCE | + DMA_PREP_CMD); + if (ret) + return ret; + } + } + + return 0; +} + +/* * Prepares the data descriptor for BAM DMA which will be used for NAND * data reads and writes. */ @@ -851,19 +938,22 @@ static int read_reg_dma(struct qcom_nand_controller *nandc, int first, { bool flow_control = false; void *vaddr; - int size; - if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) - flow_control = true; + vaddr = nandc->reg_read_buf + nandc->reg_read_pos; + nandc->reg_read_pos += num_regs; if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) first = dev_cmd_reg_addr(nandc, first); - size = num_regs * sizeof(u32); - vaddr = nandc->reg_read_buf + nandc->reg_read_pos; - nandc->reg_read_pos += num_regs; + if (nandc->props->is_bam) + return prep_bam_dma_desc_cmd(nandc, true, first, vaddr, + num_regs, flags); + + if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) + flow_control = true; - return prep_adm_dma_desc(nandc, true, first, vaddr, size, flow_control); + return prep_adm_dma_desc(nandc, true, first, vaddr, + num_regs * sizeof(u32), flow_control); } /* @@ -880,13 +970,9 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, bool flow_control = false; struct nandc_regs *regs = nandc->regs; void *vaddr; - int size; vaddr = offset_to_nandc_reg(regs, first); - if (first == NAND_FLASH_CMD) - flow_control = true; - if (first == NAND_ERASED_CW_DETECT_CFG) { if (flags & NAND_ERASED_CW_SET) vaddr = ®s->erased_cw_detect_cfg_set; @@ -903,10 +989,15 @@ static int write_reg_dma(struct qcom_nand_controller *nandc, int first, if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); - size = num_regs * sizeof(u32); + if (nandc->props->is_bam) + return prep_bam_dma_desc_cmd(nandc, false, first, vaddr, + num_regs, flags); + + if (first == NAND_FLASH_CMD) + flow_control = true; - return prep_adm_dma_desc(nandc, false, first, vaddr, size, - flow_control); + return prep_adm_dma_desc(nandc, false, first, vaddr, + num_regs * sizeof(u32), flow_control); } /* @@ -1170,7 +1261,8 @@ static int submit_descs(struct qcom_nand_controller *nandc) } if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { - r = prepare_bam_async_desc(nandc, nandc->cmd_chan, 0); + r = prepare_bam_async_desc(nandc, nandc->cmd_chan, + DMA_PREP_CMD); if (r) return r; } @@ -2705,6 +2797,7 @@ static int qcom_nandc_probe(struct platform_device *pdev) if (IS_ERR(nandc->base)) return PTR_ERR(nandc->base); + nandc->base_phys = res->start; nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start); nandc->core_clk = devm_clk_get(dev, "core"); diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index e7f3c98487e6..3c5008a4f5f3 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -1094,14 +1094,11 @@ MODULE_DEVICE_TABLE(of, of_flctl_match); static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev) { - const struct of_device_id *match; - struct flctl_soc_config *config; + const struct flctl_soc_config *config; struct sh_flctl_platform_data *pdata; - match = of_match_device(of_flctl_match, dev); - if (match) - config = (struct flctl_soc_config *)match->data; - else { + config = of_device_get_match_data(dev); + if (!config) { dev_err(dev, "%s: no OF configuration attached\n", __func__); return NULL; } diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig index d206b3c533bc..ee5ab994132f 100644 --- a/drivers/mtd/parsers/Kconfig +++ b/drivers/mtd/parsers/Kconfig @@ -6,3 +6,11 @@ config MTD_PARSER_TRX may contain up to 3/4 partitions (depending on the version). This driver will parse TRX header and report at least two partitions: kernel and rootfs. + +config MTD_SHARPSL_PARTS + tristate "Sharp SL Series NAND flash partition parser" + depends on MTD_NAND_SHARPSL || MTD_NAND_TMIO || COMPILE_TEST + help + This provides the read-only FTL logic necessary to read the partition + table from the NAND flash of Sharp SL Series (Zaurus) and the MTD + partition parser using this code. diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile index 4d9024e0be3b..5b1bcc3d90d9 100644 --- a/drivers/mtd/parsers/Makefile +++ b/drivers/mtd/parsers/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o +obj-$(CONFIG_MTD_SHARPSL_PARTS) += sharpslpart.o diff --git a/drivers/mtd/parsers/sharpslpart.c b/drivers/mtd/parsers/sharpslpart.c new file mode 100644 index 000000000000..5fe0079ea5ed --- /dev/null +++ b/drivers/mtd/parsers/sharpslpart.c @@ -0,0 +1,398 @@ +/* + * sharpslpart.c - MTD partition parser for NAND flash using the SHARP FTL + * for logical addressing, as used on the PXA models of the SHARP SL Series. + * + * Copyright (C) 2017 Andrea Adami <[email protected]> + * + * Based on SHARP GPL 2.4 sources: + * http://support.ezaurus.com/developer/source/source_dl.asp + * drivers/mtd/nand/sharp_sl_logical.c + * linux/include/asm-arm/sharp_nand_logical.h + * + * Copyright (C) 2002 SHARP + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/sizes.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> + +/* oob structure */ +#define NAND_NOOB_LOGADDR_00 8 +#define NAND_NOOB_LOGADDR_01 9 +#define NAND_NOOB_LOGADDR_10 10 +#define NAND_NOOB_LOGADDR_11 11 +#define NAND_NOOB_LOGADDR_20 12 +#define NAND_NOOB_LOGADDR_21 13 + +#define BLOCK_IS_RESERVED 0xffff +#define BLOCK_UNMASK_COMPLEMENT 1 + +/* factory defaults */ +#define SHARPSL_NAND_PARTS 3 +#define SHARPSL_FTL_PART_SIZE (7 * SZ_1M) +#define SHARPSL_PARTINFO1_LADDR 0x00060000 +#define SHARPSL_PARTINFO2_LADDR 0x00064000 + +#define BOOT_MAGIC 0x424f4f54 +#define FSRO_MAGIC 0x4653524f +#define FSRW_MAGIC 0x46535257 + +/** + * struct sharpsl_ftl - Sharp FTL Logical Table + * @logmax: number of logical blocks + * @log2phy: the logical-to-physical table + * + * Structure containing the logical-to-physical translation table + * used by the SHARP SL FTL. + */ +struct sharpsl_ftl { + unsigned int logmax; + unsigned int *log2phy; +}; + +/* verify that the OOB bytes 8 to 15 are free and available for the FTL */ +static int sharpsl_nand_check_ooblayout(struct mtd_info *mtd) +{ + u8 freebytes = 0; + int section = 0; + + while (true) { + struct mtd_oob_region oobfree = { }; + int ret, i; + + ret = mtd_ooblayout_free(mtd, section++, &oobfree); + if (ret) + break; + + if (!oobfree.length || oobfree.offset > 15 || + (oobfree.offset + oobfree.length) < 8) + continue; + + i = oobfree.offset >= 8 ? oobfree.offset : 8; + for (; i < oobfree.offset + oobfree.length && i < 16; i++) + freebytes |= BIT(i - 8); + + if (freebytes == 0xff) + return 0; + } + + return -ENOTSUPP; +} + +static int sharpsl_nand_read_oob(struct mtd_info *mtd, loff_t offs, u8 *buf) +{ + struct mtd_oob_ops ops = { }; + int ret; + + ops.mode = MTD_OPS_PLACE_OOB; + ops.ooblen = mtd->oobsize; + ops.oobbuf = buf; + + ret = mtd_read_oob(mtd, offs, &ops); + if (ret != 0 || mtd->oobsize != ops.oobretlen) + return -1; + + return 0; +} + +/* + * The logical block number assigned to a physical block is stored in the OOB + * of the first page, in 3 16-bit copies with the following layout: + * + * 01234567 89abcdef + * -------- -------- + * ECC BB xyxyxy + * + * When reading we check that the first two copies agree. + * In case of error, matching is tried using the following pairs. + * Reserved values 0xffff mean the block is kept for wear leveling. + * + * 01234567 89abcdef + * -------- -------- + * ECC BB xyxy oob[8]==oob[10] && oob[9]==oob[11] -> byte0=8 byte1=9 + * ECC BB xyxy oob[10]==oob[12] && oob[11]==oob[13] -> byte0=10 byte1=11 + * ECC BB xy xy oob[12]==oob[8] && oob[13]==oob[9] -> byte0=12 byte1=13 + */ +static int sharpsl_nand_get_logical_num(u8 *oob) +{ + u16 us; + int good0, good1; + + if (oob[NAND_NOOB_LOGADDR_00] == oob[NAND_NOOB_LOGADDR_10] && + oob[NAND_NOOB_LOGADDR_01] == oob[NAND_NOOB_LOGADDR_11]) { + good0 = NAND_NOOB_LOGADDR_00; + good1 = NAND_NOOB_LOGADDR_01; + } else if (oob[NAND_NOOB_LOGADDR_10] == oob[NAND_NOOB_LOGADDR_20] && + oob[NAND_NOOB_LOGADDR_11] == oob[NAND_NOOB_LOGADDR_21]) { + good0 = NAND_NOOB_LOGADDR_10; + good1 = NAND_NOOB_LOGADDR_11; + } else if (oob[NAND_NOOB_LOGADDR_20] == oob[NAND_NOOB_LOGADDR_00] && + oob[NAND_NOOB_LOGADDR_21] == oob[NAND_NOOB_LOGADDR_01]) { + good0 = NAND_NOOB_LOGADDR_20; + good1 = NAND_NOOB_LOGADDR_21; + } else { + return -EINVAL; + } + + us = oob[good0] | oob[good1] << 8; + + /* parity check */ + if (hweight16(us) & BLOCK_UNMASK_COMPLEMENT) + return -EINVAL; + + /* reserved */ + if (us == BLOCK_IS_RESERVED) + return BLOCK_IS_RESERVED; + + return (us >> 1) & GENMASK(9, 0); +} + +static int sharpsl_nand_init_ftl(struct mtd_info *mtd, struct sharpsl_ftl *ftl) +{ + unsigned int block_num, log_num, phymax; + loff_t block_adr; + u8 *oob; + int i, ret; + + oob = kzalloc(mtd->oobsize, GFP_KERNEL); + if (!oob) + return -ENOMEM; + + phymax = mtd_div_by_eb(SHARPSL_FTL_PART_SIZE, mtd); + + /* FTL reserves 5% of the blocks + 1 spare */ + ftl->logmax = ((phymax * 95) / 100) - 1; + + ftl->log2phy = kmalloc_array(ftl->logmax, sizeof(*ftl->log2phy), + GFP_KERNEL); + if (!ftl->log2phy) { + ret = -ENOMEM; + goto exit; + } + + /* initialize ftl->log2phy */ + for (i = 0; i < ftl->logmax; i++) + ftl->log2phy[i] = UINT_MAX; + + /* create physical-logical table */ + for (block_num = 0; block_num < phymax; block_num++) { + block_adr = block_num * mtd->erasesize; + + if (mtd_block_isbad(mtd, block_adr)) + continue; + + if (sharpsl_nand_read_oob(mtd, block_adr, oob)) + continue; + + /* get logical block */ + log_num = sharpsl_nand_get_logical_num(oob); + + /* cut-off errors and skip the out-of-range values */ + if (log_num > 0 && log_num < ftl->logmax) { + if (ftl->log2phy[log_num] == UINT_MAX) + ftl->log2phy[log_num] = block_num; + } + } + + pr_info("Sharp SL FTL: %d blocks used (%d logical, %d reserved)\n", + phymax, ftl->logmax, phymax - ftl->logmax); + + ret = 0; +exit: + kfree(oob); + return ret; +} + +void sharpsl_nand_cleanup_ftl(struct sharpsl_ftl *ftl) +{ + kfree(ftl->log2phy); +} + +static int sharpsl_nand_read_laddr(struct mtd_info *mtd, + loff_t from, + size_t len, + void *buf, + struct sharpsl_ftl *ftl) +{ + unsigned int log_num, final_log_num; + unsigned int block_num; + loff_t block_adr; + loff_t block_ofs; + size_t retlen; + int err; + + log_num = mtd_div_by_eb((u32)from, mtd); + final_log_num = mtd_div_by_eb(((u32)from + len - 1), mtd); + + if (len <= 0 || log_num >= ftl->logmax || final_log_num > log_num) + return -EINVAL; + + block_num = ftl->log2phy[log_num]; + block_adr = block_num * mtd->erasesize; + block_ofs = mtd_mod_by_eb((u32)from, mtd); + + err = mtd_read(mtd, block_adr + block_ofs, len, &retlen, buf); + /* Ignore corrected ECC errors */ + if (mtd_is_bitflip(err)) + err = 0; + + if (!err && retlen != len) + err = -EIO; + + if (err) + pr_err("sharpslpart: error, read failed at %#llx\n", + block_adr + block_ofs); + + return err; +} + +/* + * MTD Partition Parser + * + * Sample values read from SL-C860 + * + * # cat /proc/mtd + * dev: size erasesize name + * mtd0: 006d0000 00020000 "Filesystem" + * mtd1: 00700000 00004000 "smf" + * mtd2: 03500000 00004000 "root" + * mtd3: 04400000 00004000 "home" + * + * PARTITIONINFO1 + * 0x00060000: 00 00 00 00 00 00 70 00 42 4f 4f 54 00 00 00 00 ......p.BOOT.... + * 0x00060010: 00 00 70 00 00 00 c0 03 46 53 52 4f 00 00 00 00 ..p.....FSRO.... + * 0x00060020: 00 00 c0 03 00 00 00 04 46 53 52 57 00 00 00 00 ........FSRW.... + */ +struct sharpsl_nand_partinfo { + __le32 start; + __le32 end; + __be32 magic; + u32 reserved; +}; + +static int sharpsl_nand_read_partinfo(struct mtd_info *master, + loff_t from, + size_t len, + struct sharpsl_nand_partinfo *buf, + struct sharpsl_ftl *ftl) +{ + int ret; + + ret = sharpsl_nand_read_laddr(master, from, len, buf, ftl); + if (ret) + return ret; + + /* check for magics */ + if (be32_to_cpu(buf[0].magic) != BOOT_MAGIC || + be32_to_cpu(buf[1].magic) != FSRO_MAGIC || + be32_to_cpu(buf[2].magic) != FSRW_MAGIC) { + pr_err("sharpslpart: magic values mismatch\n"); + return -EINVAL; + } + + /* fixup for hardcoded value 64 MiB (for older models) */ + buf[2].end = cpu_to_le32(master->size); + + /* extra sanity check */ + if (le32_to_cpu(buf[0].end) <= le32_to_cpu(buf[0].start) || + le32_to_cpu(buf[1].start) < le32_to_cpu(buf[0].end) || + le32_to_cpu(buf[1].end) <= le32_to_cpu(buf[1].start) || + le32_to_cpu(buf[2].start) < le32_to_cpu(buf[1].end) || + le32_to_cpu(buf[2].end) <= le32_to_cpu(buf[2].start)) { + pr_err("sharpslpart: partition sizes mismatch\n"); + return -EINVAL; + } + + return 0; +} + +static int sharpsl_parse_mtd_partitions(struct mtd_info *master, + const struct mtd_partition **pparts, + struct mtd_part_parser_data *data) +{ + struct sharpsl_ftl ftl; + struct sharpsl_nand_partinfo buf[SHARPSL_NAND_PARTS]; + struct mtd_partition *sharpsl_nand_parts; + int err; + + /* check that OOB bytes 8 to 15 used by the FTL are actually free */ + err = sharpsl_nand_check_ooblayout(master); + if (err) + return err; + + /* init logical mgmt (FTL) */ + err = sharpsl_nand_init_ftl(master, &ftl); + if (err) + return err; + + /* read and validate first partition table */ + pr_info("sharpslpart: try reading first partition table\n"); + err = sharpsl_nand_read_partinfo(master, + SHARPSL_PARTINFO1_LADDR, + sizeof(buf), buf, &ftl); + if (err) { + /* fallback: read second partition table */ + pr_warn("sharpslpart: first partition table is invalid, retry using the second\n"); + err = sharpsl_nand_read_partinfo(master, + SHARPSL_PARTINFO2_LADDR, + sizeof(buf), buf, &ftl); + } + + /* cleanup logical mgmt (FTL) */ + sharpsl_nand_cleanup_ftl(&ftl); + + if (err) { + pr_err("sharpslpart: both partition tables are invalid\n"); + return err; + } + + sharpsl_nand_parts = kzalloc(sizeof(*sharpsl_nand_parts) * + SHARPSL_NAND_PARTS, GFP_KERNEL); + if (!sharpsl_nand_parts) + return -ENOMEM; + + /* original names */ + sharpsl_nand_parts[0].name = "smf"; + sharpsl_nand_parts[0].offset = le32_to_cpu(buf[0].start); + sharpsl_nand_parts[0].size = le32_to_cpu(buf[0].end) - + le32_to_cpu(buf[0].start); + + sharpsl_nand_parts[1].name = "root"; + sharpsl_nand_parts[1].offset = le32_to_cpu(buf[1].start); + sharpsl_nand_parts[1].size = le32_to_cpu(buf[1].end) - + le32_to_cpu(buf[1].start); + + sharpsl_nand_parts[2].name = "home"; + sharpsl_nand_parts[2].offset = le32_to_cpu(buf[2].start); + sharpsl_nand_parts[2].size = le32_to_cpu(buf[2].end) - + le32_to_cpu(buf[2].start); + + *pparts = sharpsl_nand_parts; + return SHARPSL_NAND_PARTS; +} + +static struct mtd_part_parser sharpsl_mtd_parser = { + .parse_fn = sharpsl_parse_mtd_partitions, + .name = "sharpslpart", +}; +module_mtd_part_parser(sharpsl_mtd_parser); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Andrea Adami <[email protected]>"); +MODULE_DESCRIPTION("MTD partitioning for NAND flash on Sharp SL Series"); diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index 69c638dd0484..89da88e59121 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -50,7 +50,7 @@ config SPI_ATMEL_QUADSPI config SPI_CADENCE_QUADSPI tristate "Cadence Quad SPI controller" - depends on OF && (ARM || COMPILE_TEST) + depends on OF && (ARM || ARM64 || COMPILE_TEST) help Enable support for the Cadence Quad SPI Flash controller. @@ -90,7 +90,7 @@ config SPI_INTEL_SPI tristate config SPI_INTEL_SPI_PCI - tristate "Intel PCH/PCU SPI flash PCI driver" if EXPERT + tristate "Intel PCH/PCU SPI flash PCI driver" depends on X86 && PCI select SPI_INTEL_SPI help @@ -106,7 +106,7 @@ config SPI_INTEL_SPI_PCI will be called intel-spi-pci. config SPI_INTEL_SPI_PLATFORM - tristate "Intel PCH/PCU SPI flash platform driver" if EXPERT + tristate "Intel PCH/PCU SPI flash platform driver" depends on X86 select SPI_INTEL_SPI help diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 53c7d8e0327a..75a2bc447a99 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -31,6 +31,7 @@ #include <linux/of_device.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/sched.h> #include <linux/spi/spi.h> #include <linux/timer.h> @@ -38,6 +39,9 @@ #define CQSPI_NAME "cadence-qspi" #define CQSPI_MAX_CHIPSELECT 16 +/* Quirks */ +#define CQSPI_NEEDS_WR_DELAY BIT(0) + struct cqspi_st; struct cqspi_flash_pdata { @@ -75,7 +79,9 @@ struct cqspi_st { bool is_decoded_cs; u32 fifo_depth; u32 fifo_width; + bool rclk_en; u32 trigger_address; + u32 wr_delay; struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT]; }; @@ -608,6 +614,15 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, reinit_completion(&cqspi->transfer_complete); writel(CQSPI_REG_INDIRECTWR_START_MASK, reg_base + CQSPI_REG_INDIRECTWR); + /* + * As per 66AK2G02 TRM SPRUHY8F section 11.15.5.3 Indirect Access + * Controller programming sequence, couple of cycles of + * QSPI_REF_CLK delay is required for the above bit to + * be internally synchronized by the QSPI module. Provide 5 + * cycles of delay. + */ + if (cqspi->wr_delay) + ndelay(cqspi->wr_delay); while (remaining > 0) { write_bytes = remaining > page_size ? page_size : remaining; @@ -775,7 +790,7 @@ static void cqspi_config_baudrate_div(struct cqspi_st *cqspi) } static void cqspi_readdata_capture(struct cqspi_st *cqspi, - const unsigned int bypass, + const bool bypass, const unsigned int delay) { void __iomem *reg_base = cqspi->iobase; @@ -839,7 +854,8 @@ static void cqspi_configure(struct spi_nor *nor) cqspi->sclk = sclk; cqspi_config_baudrate_div(cqspi); cqspi_delay(nor); - cqspi_readdata_capture(cqspi, 1, f_pdata->read_delay); + cqspi_readdata_capture(cqspi, !cqspi->rclk_en, + f_pdata->read_delay); } if (switch_cs || switch_ck) @@ -1036,6 +1052,8 @@ static int cqspi_of_get_pdata(struct platform_device *pdev) return -ENXIO; } + cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en"); + return 0; } @@ -1156,6 +1174,7 @@ static int cqspi_probe(struct platform_device *pdev) struct cqspi_st *cqspi; struct resource *res; struct resource *res_ahb; + unsigned long data; int ret; int irq; @@ -1206,13 +1225,24 @@ static int cqspi_probe(struct platform_device *pdev) return -ENXIO; } + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + ret = clk_prepare_enable(cqspi->clk); if (ret) { dev_err(dev, "Cannot enable QSPI clock.\n"); - return ret; + goto probe_clk_failed; } cqspi->master_ref_clk_hz = clk_get_rate(cqspi->clk); + data = (unsigned long)of_device_get_match_data(dev); + if (data & CQSPI_NEEDS_WR_DELAY) + cqspi->wr_delay = 5 * DIV_ROUND_UP(NSEC_PER_SEC, + cqspi->master_ref_clk_hz); ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0, pdev->name, cqspi); @@ -1233,10 +1263,13 @@ static int cqspi_probe(struct platform_device *pdev) } return ret; -probe_irq_failed: - cqspi_controller_enable(cqspi, 0); probe_setup_failed: + cqspi_controller_enable(cqspi, 0); +probe_irq_failed: clk_disable_unprepare(cqspi->clk); +probe_clk_failed: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -1253,6 +1286,9 @@ static int cqspi_remove(struct platform_device *pdev) clk_disable_unprepare(cqspi->clk); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + return 0; } @@ -1284,7 +1320,14 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = { #endif static const struct of_device_id cqspi_dt_ids[] = { - {.compatible = "cdns,qspi-nor",}, + { + .compatible = "cdns,qspi-nor", + .data = (void *)0, + }, + { + .compatible = "ti,k2g-qspi", + .data = (void *)CQSPI_NEEDS_WR_DELAY, + }, { /* end of table */ } }; diff --git a/drivers/mtd/spi-nor/intel-spi-pci.c b/drivers/mtd/spi-nor/intel-spi-pci.c index e82652335ede..c0976f2e3dd1 100644 --- a/drivers/mtd/spi-nor/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/intel-spi-pci.c @@ -63,7 +63,10 @@ static void intel_spi_pci_remove(struct pci_dev *pdev) } static const struct pci_device_id intel_spi_pci_ids[] = { + { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa1a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0xa224), (unsigned long)&bxt_info }, { }, }; MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids); diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index 8a596bfeddff..ef034d898a23 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -67,8 +67,6 @@ #define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) #define PR_RPE BIT(15) #define PR_BASE_MASK 0x3fff -/* Last PR is GPR0 */ -#define PR_NUM (5 + 1) /* Offsets are from @ispi->sregs */ #define SSFSTS_CTL 0x00 @@ -90,20 +88,35 @@ #define OPMENU0 0x08 #define OPMENU1 0x0c +#define OPTYPE_READ_NO_ADDR 0 +#define OPTYPE_WRITE_NO_ADDR 1 +#define OPTYPE_READ_WITH_ADDR 2 +#define OPTYPE_WRITE_WITH_ADDR 3 + /* CPU specifics */ #define BYT_PR 0x74 #define BYT_SSFSTS_CTL 0x90 #define BYT_BCR 0xfc #define BYT_BCR_WPD BIT(0) #define BYT_FREG_NUM 5 +#define BYT_PR_NUM 5 #define LPT_PR 0x74 #define LPT_SSFSTS_CTL 0x90 #define LPT_FREG_NUM 5 +#define LPT_PR_NUM 5 #define BXT_PR 0x84 #define BXT_SSFSTS_CTL 0xa0 #define BXT_FREG_NUM 12 +#define BXT_PR_NUM 6 + +#define LVSCC 0xc4 +#define UVSCC 0xc8 +#define ERASE_OPCODE_SHIFT 8 +#define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) +#define ERASE_64K_OPCODE_SHIFT 16 +#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) #define INTEL_SPI_TIMEOUT 5000 /* ms */ #define INTEL_SPI_FIFO_SZ 64 @@ -117,8 +130,11 @@ * @pregs: Start of protection registers * @sregs: Start of software sequencer registers * @nregions: Maximum number of regions + * @pr_num: Maximum number of protected range registers * @writeable: Is the chip writeable - * @swseq: Use SW sequencer in register reads/writes + * @locked: Is SPI setting locked + * @swseq_reg: Use SW sequencer in register reads/writes + * @swseq_erase: Use SW sequencer in erase operation * @erase_64k: 64k erase supported * @opcodes: Opcodes which are supported. This are programmed by BIOS * before it locks down the controller. @@ -132,8 +148,11 @@ struct intel_spi { void __iomem *pregs; void __iomem *sregs; size_t nregions; + size_t pr_num; bool writeable; - bool swseq; + bool locked; + bool swseq_reg; + bool swseq_erase; bool erase_64k; u8 opcodes[8]; u8 preopcodes[2]; @@ -167,7 +186,7 @@ static void intel_spi_dump_regs(struct intel_spi *ispi) for (i = 0; i < ispi->nregions; i++) dev_dbg(ispi->dev, "FREG(%d)=0x%08x\n", i, readl(ispi->base + FREG(i))); - for (i = 0; i < PR_NUM; i++) + for (i = 0; i < ispi->pr_num; i++) dev_dbg(ispi->dev, "PR(%d)=0x%08x\n", i, readl(ispi->pregs + PR(i))); @@ -181,8 +200,11 @@ static void intel_spi_dump_regs(struct intel_spi *ispi) if (ispi->info->type == INTEL_SPI_BYT) dev_dbg(ispi->dev, "BCR=0x%08x\n", readl(ispi->base + BYT_BCR)); + dev_dbg(ispi->dev, "LVSCC=0x%08x\n", readl(ispi->base + LVSCC)); + dev_dbg(ispi->dev, "UVSCC=0x%08x\n", readl(ispi->base + UVSCC)); + dev_dbg(ispi->dev, "Protected regions:\n"); - for (i = 0; i < PR_NUM; i++) { + for (i = 0; i < ispi->pr_num; i++) { u32 base, limit; value = readl(ispi->pregs + PR(i)); @@ -214,7 +236,9 @@ static void intel_spi_dump_regs(struct intel_spi *ispi) } dev_dbg(ispi->dev, "Using %cW sequencer for register access\n", - ispi->swseq ? 'S' : 'H'); + ispi->swseq_reg ? 'S' : 'H'); + dev_dbg(ispi->dev, "Using %cW sequencer for erase operation\n", + ispi->swseq_erase ? 'S' : 'H'); } /* Reads max INTEL_SPI_FIFO_SZ bytes from the device fifo */ @@ -278,7 +302,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi) static int intel_spi_init(struct intel_spi *ispi) { - u32 opmenu0, opmenu1, val; + u32 opmenu0, opmenu1, lvscc, uvscc, val; int i; switch (ispi->info->type) { @@ -286,6 +310,8 @@ static int intel_spi_init(struct intel_spi *ispi) ispi->sregs = ispi->base + BYT_SSFSTS_CTL; ispi->pregs = ispi->base + BYT_PR; ispi->nregions = BYT_FREG_NUM; + ispi->pr_num = BYT_PR_NUM; + ispi->swseq_reg = true; if (writeable) { /* Disable write protection */ @@ -305,12 +331,15 @@ static int intel_spi_init(struct intel_spi *ispi) ispi->sregs = ispi->base + LPT_SSFSTS_CTL; ispi->pregs = ispi->base + LPT_PR; ispi->nregions = LPT_FREG_NUM; + ispi->pr_num = LPT_PR_NUM; + ispi->swseq_reg = true; break; case INTEL_SPI_BXT: ispi->sregs = ispi->base + BXT_SSFSTS_CTL; ispi->pregs = ispi->base + BXT_PR; ispi->nregions = BXT_FREG_NUM; + ispi->pr_num = BXT_PR_NUM; ispi->erase_64k = true; break; @@ -318,42 +347,64 @@ static int intel_spi_init(struct intel_spi *ispi) return -EINVAL; } - /* Disable #SMI generation */ + /* Disable #SMI generation from HW sequencer */ val = readl(ispi->base + HSFSTS_CTL); val &= ~HSFSTS_CTL_FSMIE; writel(val, ispi->base + HSFSTS_CTL); /* - * BIOS programs allowed opcodes and then locks down the register. - * So read back what opcodes it decided to support. That's the set - * we are going to support as well. + * Determine whether erase operation should use HW or SW sequencer. + * + * The HW sequencer has a predefined list of opcodes, with only the + * erase opcode being programmable in LVSCC and UVSCC registers. + * If these registers don't contain a valid erase opcode, erase + * cannot be done using HW sequencer. */ - opmenu0 = readl(ispi->sregs + OPMENU0); - opmenu1 = readl(ispi->sregs + OPMENU1); + lvscc = readl(ispi->base + LVSCC); + uvscc = readl(ispi->base + UVSCC); + if (!(lvscc & ERASE_OPCODE_MASK) || !(uvscc & ERASE_OPCODE_MASK)) + ispi->swseq_erase = true; + /* SPI controller on Intel BXT supports 64K erase opcode */ + if (ispi->info->type == INTEL_SPI_BXT && !ispi->swseq_erase) + if (!(lvscc & ERASE_64K_OPCODE_MASK) || + !(uvscc & ERASE_64K_OPCODE_MASK)) + ispi->erase_64k = false; /* * Some controllers can only do basic operations using hardware * sequencer. All other operations are supposed to be carried out - * using software sequencer. If we find that BIOS has programmed - * opcodes for the software sequencer we use that over the hardware - * sequencer. + * using software sequencer. */ - if (opmenu0 && opmenu1) { - for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { - ispi->opcodes[i] = opmenu0 >> i * 8; - ispi->opcodes[i + 4] = opmenu1 >> i * 8; - } - - val = readl(ispi->sregs + PREOP_OPTYPE); - ispi->preopcodes[0] = val; - ispi->preopcodes[1] = val >> 8; - + if (ispi->swseq_reg) { /* Disable #SMI generation from SW sequencer */ val = readl(ispi->sregs + SSFSTS_CTL); val &= ~SSFSTS_CTL_FSMIE; writel(val, ispi->sregs + SSFSTS_CTL); + } + + /* Check controller's lock status */ + val = readl(ispi->base + HSFSTS_CTL); + ispi->locked = !!(val & HSFSTS_CTL_FLOCKDN); + + if (ispi->locked) { + /* + * BIOS programs allowed opcodes and then locks down the + * register. So read back what opcodes it decided to support. + * That's the set we are going to support as well. + */ + opmenu0 = readl(ispi->sregs + OPMENU0); + opmenu1 = readl(ispi->sregs + OPMENU1); - ispi->swseq = true; + if (opmenu0 && opmenu1) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes) / 2; i++) { + ispi->opcodes[i] = opmenu0 >> i * 8; + ispi->opcodes[i + 4] = opmenu1 >> i * 8; + } + + val = readl(ispi->sregs + PREOP_OPTYPE); + ispi->preopcodes[0] = val; + ispi->preopcodes[1] = val >> 8; + } } intel_spi_dump_regs(ispi); @@ -361,18 +412,28 @@ static int intel_spi_init(struct intel_spi *ispi) return 0; } -static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode) +static int intel_spi_opcode_index(struct intel_spi *ispi, u8 opcode, int optype) { int i; + int preop; - for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) - if (ispi->opcodes[i] == opcode) - return i; - return -EINVAL; + if (ispi->locked) { + for (i = 0; i < ARRAY_SIZE(ispi->opcodes); i++) + if (ispi->opcodes[i] == opcode) + return i; + + return -EINVAL; + } + + /* The lock is off, so just use index 0 */ + writel(opcode, ispi->sregs + OPMENU0); + preop = readw(ispi->sregs + PREOP_OPTYPE); + writel(optype << 16 | preop, ispi->sregs + PREOP_OPTYPE); + + return 0; } -static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, - int len) +static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, int len) { u32 val, status; int ret; @@ -394,6 +455,9 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, return -EINVAL; } + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + val |= (len - 1) << HSFSTS_CTL_FDBC_SHIFT; val |= HSFSTS_CTL_FCERR | HSFSTS_CTL_FDONE; val |= HSFSTS_CTL_FGO; @@ -412,27 +476,39 @@ static int intel_spi_hw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, return 0; } -static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, u8 *buf, - int len) +static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len, + int optype) { - u32 val, status; + u32 val = 0, status; + u16 preop; int ret; - ret = intel_spi_opcode_index(ispi, opcode); + ret = intel_spi_opcode_index(ispi, opcode, optype); if (ret < 0) return ret; - val = (len << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; + if (len > INTEL_SPI_FIFO_SZ) + return -EINVAL; + + /* Only mark 'Data Cycle' bit when there is data to be transferred */ + if (len > 0) + val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS; val |= ret << SSFSTS_CTL_COP_SHIFT; val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE; val |= SSFSTS_CTL_SCGO; + preop = readw(ispi->sregs + PREOP_OPTYPE); + if (preop) { + val |= SSFSTS_CTL_ACS; + if (preop >> 8) + val |= SSFSTS_CTL_SPOP; + } writel(val, ispi->sregs + SSFSTS_CTL); ret = intel_spi_wait_sw_busy(ispi); if (ret) return ret; - status = readl(ispi->base + SSFSTS_CTL); + status = readl(ispi->sregs + SSFSTS_CTL); if (status & SSFSTS_CTL_FCERR) return -EIO; else if (status & SSFSTS_CTL_AEL) @@ -449,10 +525,11 @@ static int intel_spi_read_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) /* Address of the first chip */ writel(0, ispi->base + FADDR); - if (ispi->swseq) - ret = intel_spi_sw_cycle(ispi, opcode, buf, len); + if (ispi->swseq_reg) + ret = intel_spi_sw_cycle(ispi, opcode, len, + OPTYPE_READ_NO_ADDR); else - ret = intel_spi_hw_cycle(ispi, opcode, buf, len); + ret = intel_spi_hw_cycle(ispi, opcode, len); if (ret) return ret; @@ -467,10 +544,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) /* * This is handled with atomic operation and preop code in Intel - * controller so skip it here now. + * controller so skip it here now. If the controller is not locked, + * program the opcode to the PREOP register for later use. */ - if (opcode == SPINOR_OP_WREN) + if (opcode == SPINOR_OP_WREN) { + if (!ispi->locked) + writel(opcode, ispi->sregs + PREOP_OPTYPE); + return 0; + } writel(0, ispi->base + FADDR); @@ -479,9 +561,10 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) if (ret) return ret; - if (ispi->swseq) - return intel_spi_sw_cycle(ispi, opcode, buf, len); - return intel_spi_hw_cycle(ispi, opcode, buf, len); + if (ispi->swseq_reg) + return intel_spi_sw_cycle(ispi, opcode, len, + OPTYPE_WRITE_NO_ADDR); + return intel_spi_hw_cycle(ispi, opcode, len); } static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len, @@ -561,12 +644,6 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, val |= (block_size - 1) << HSFSTS_CTL_FDBC_SHIFT; val |= HSFSTS_CTL_FCYCLE_WRITE; - /* Write enable */ - if (ispi->preopcodes[1] == SPINOR_OP_WREN) - val |= SSFSTS_CTL_SPOP; - val |= SSFSTS_CTL_ACS; - writel(val, ispi->base + HSFSTS_CTL); - ret = intel_spi_write_block(ispi, write_buf, block_size); if (ret) { dev_err(ispi->dev, "failed to write block\n"); @@ -574,8 +651,8 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len, } /* Start the write now */ - val = readl(ispi->base + HSFSTS_CTL); - writel(val | HSFSTS_CTL_FGO, ispi->base + HSFSTS_CTL); + val |= HSFSTS_CTL_FGO; + writel(val, ispi->base + HSFSTS_CTL); ret = intel_spi_wait_hw_busy(ispi); if (ret) { @@ -620,6 +697,22 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs) erase_size = SZ_4K; } + if (ispi->swseq_erase) { + while (len > 0) { + writel(offs, ispi->base + FADDR); + + ret = intel_spi_sw_cycle(ispi, nor->erase_opcode, + 0, OPTYPE_WRITE_WITH_ADDR); + if (ret) + return ret; + + offs += erase_size; + len -= erase_size; + } + + return 0; + } + while (len > 0) { writel(offs, ispi->base + FADDR); @@ -652,7 +745,7 @@ static bool intel_spi_is_protected(const struct intel_spi *ispi, { int i; - for (i = 0; i < PR_NUM; i++) { + for (i = 0; i < ispi->pr_num; i++) { u32 pr_base, pr_limit, pr_value; pr_value = readl(ispi->pregs + PR(i)); diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index c258c7adf1c5..abe455ccd68b 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -404,6 +404,29 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, return ret; } +static void mt8173_nor_disable_clk(struct mt8173_nor *mt8173_nor) +{ + clk_disable_unprepare(mt8173_nor->spi_clk); + clk_disable_unprepare(mt8173_nor->nor_clk); +} + +static int mt8173_nor_enable_clk(struct mt8173_nor *mt8173_nor) +{ + int ret; + + ret = clk_prepare_enable(mt8173_nor->spi_clk); + if (ret) + return ret; + + ret = clk_prepare_enable(mt8173_nor->nor_clk); + if (ret) { + clk_disable_unprepare(mt8173_nor->spi_clk); + return ret; + } + + return 0; +} + static int mtk_nor_init(struct mt8173_nor *mt8173_nor, struct device_node *flash_node) { @@ -468,15 +491,11 @@ static int mtk_nor_drv_probe(struct platform_device *pdev) return PTR_ERR(mt8173_nor->nor_clk); mt8173_nor->dev = &pdev->dev; - ret = clk_prepare_enable(mt8173_nor->spi_clk); + + ret = mt8173_nor_enable_clk(mt8173_nor); if (ret) return ret; - ret = clk_prepare_enable(mt8173_nor->nor_clk); - if (ret) { - clk_disable_unprepare(mt8173_nor->spi_clk); - return ret; - } /* only support one attached flash */ flash_np = of_get_next_available_child(pdev->dev.of_node, NULL); if (!flash_np) { @@ -487,10 +506,9 @@ static int mtk_nor_drv_probe(struct platform_device *pdev) ret = mtk_nor_init(mt8173_nor, flash_np); nor_free: - if (ret) { - clk_disable_unprepare(mt8173_nor->spi_clk); - clk_disable_unprepare(mt8173_nor->nor_clk); - } + if (ret) + mt8173_nor_disable_clk(mt8173_nor); + return ret; } @@ -498,11 +516,38 @@ static int mtk_nor_drv_remove(struct platform_device *pdev) { struct mt8173_nor *mt8173_nor = platform_get_drvdata(pdev); - clk_disable_unprepare(mt8173_nor->spi_clk); - clk_disable_unprepare(mt8173_nor->nor_clk); + mt8173_nor_disable_clk(mt8173_nor); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_nor_suspend(struct device *dev) +{ + struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev); + + mt8173_nor_disable_clk(mt8173_nor); + return 0; } +static int mtk_nor_resume(struct device *dev) +{ + struct mt8173_nor *mt8173_nor = dev_get_drvdata(dev); + + return mt8173_nor_enable_clk(mt8173_nor); +} + +static const struct dev_pm_ops mtk_nor_dev_pm_ops = { + .suspend = mtk_nor_suspend, + .resume = mtk_nor_resume, +}; + +#define MTK_NOR_DEV_PM_OPS (&mtk_nor_dev_pm_ops) +#else +#define MTK_NOR_DEV_PM_OPS NULL +#endif + static const struct of_device_id mtk_nor_of_ids[] = { { .compatible = "mediatek,mt8173-nor"}, { /* sentinel */ } @@ -514,6 +559,7 @@ static struct platform_driver mtk_nor_driver = { .remove = mtk_nor_drv_remove, .driver = { .name = "mtk-nor", + .pm = MTK_NOR_DEV_PM_OPS, .of_match_table = mtk_nor_of_ids, }, }; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 19c000722cbc..bc266f70a15b 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -89,6 +89,8 @@ struct flash_info { #define NO_CHIP_ERASE BIT(12) /* Chip does not support chip erase */ #define SPI_NOR_SKIP_SFDP BIT(13) /* Skip parsing of SFDP tables */ #define USE_CLSR BIT(14) /* use CLSR command */ + + int (*quad_enable)(struct spi_nor *nor); }; #define JEDEC_MFR(info) ((info)->id[0]) @@ -870,6 +872,8 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) return ret; } +static int macronix_quad_enable(struct spi_nor *nor); + /* Used when the "_ext_id" is two bytes at most */ #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ .id = { \ @@ -964,6 +968,7 @@ static const struct flash_info spi_nor_ids[] = { { "f25l64qa", INFO(0x8c4117, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_HAS_LOCK) }, /* Everspin */ + { "mr25h128", CAT25_INFO( 16 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, { "mr25h40", CAT25_INFO(512 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, @@ -983,6 +988,11 @@ static const struct flash_info spi_nor_ids[] = { SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { + "gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, + { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) @@ -997,6 +1007,12 @@ static const struct flash_info spi_nor_ids[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { + "gd25q256", INFO(0xc84019, 0, 64 * 1024, 512, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_4B_OPCODES | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + .quad_enable = macronix_quad_enable, + }, /* Intel/Numonyx -- xxxs33b */ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, @@ -1024,7 +1040,7 @@ static const struct flash_info spi_nor_ids[] = { { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, - { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, @@ -1137,6 +1153,11 @@ static const struct flash_info spi_nor_ids[] = { { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, + { + "w25q16dw", INFO(0xef6015, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | + SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) + }, { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, { "w25q20cl", INFO(0xef4012, 0, 64 * 1024, 4, SECT_4K) }, { "w25q20bw", INFO(0xef5012, 0, 64 * 1024, 4, SECT_4K) }, @@ -2288,8 +2309,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, /* Check the SFDP header version. */ if (le32_to_cpu(header.signature) != SFDP_SIGNATURE || - header.major != SFDP_JESD216_MAJOR || - header.minor < SFDP_JESD216_MINOR) + header.major != SFDP_JESD216_MAJOR) return -EINVAL; /* @@ -2427,6 +2447,15 @@ static int spi_nor_init_params(struct spi_nor *nor, params->quad_enable = spansion_quad_enable; break; } + + /* + * Some manufacturer like GigaDevice may use different + * bit to set QE on different memories, so the MFR can't + * indicate the quad_enable method for this case, we need + * set it in flash info list. + */ + if (info->quad_enable) + params->quad_enable = info->quad_enable; } /* Override the parameters with data read from SFDP tables. */ @@ -2630,17 +2659,60 @@ static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info, /* Enable Quad I/O if needed. */ enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 || spi_nor_get_protocol_width(nor->write_proto) == 4); - if (enable_quad_io && params->quad_enable) { - err = params->quad_enable(nor); + if (enable_quad_io && params->quad_enable) + nor->quad_enable = params->quad_enable; + else + nor->quad_enable = NULL; + + return 0; +} + +static int spi_nor_init(struct spi_nor *nor) +{ + int err; + + /* + * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up + * with the software protection bits set + */ + if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL || + JEDEC_MFR(nor->info) == SNOR_MFR_INTEL || + JEDEC_MFR(nor->info) == SNOR_MFR_SST || + nor->info->flags & SPI_NOR_HAS_LOCK) { + write_enable(nor); + write_sr(nor, 0); + spi_nor_wait_till_ready(nor); + } + + if (nor->quad_enable) { + err = nor->quad_enable(nor); if (err) { dev_err(nor->dev, "quad mode not supported\n"); return err; } } + if ((nor->addr_width == 4) && + (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) && + !(nor->info->flags & SPI_NOR_4B_OPCODES)) + set_4byte(nor, nor->info, 1); + return 0; } +/* mtd resume handler */ +static void spi_nor_resume(struct mtd_info *mtd) +{ + struct spi_nor *nor = mtd_to_spi_nor(mtd); + struct device *dev = nor->dev; + int ret; + + /* re-initialize the nor chip */ + ret = spi_nor_init(nor); + if (ret) + dev_err(dev, "resume() failed\n"); +} + int spi_nor_scan(struct spi_nor *nor, const char *name, const struct spi_nor_hwcaps *hwcaps) { @@ -2708,20 +2780,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (ret) return ret; - /* - * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up - * with the software protection bits set - */ - - if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || - JEDEC_MFR(info) == SNOR_MFR_INTEL || - JEDEC_MFR(info) == SNOR_MFR_SST || - info->flags & SPI_NOR_HAS_LOCK) { - write_enable(nor); - write_sr(nor, 0); - spi_nor_wait_till_ready(nor); - } - if (!mtd->name) mtd->name = dev_name(dev); mtd->priv = nor; @@ -2731,6 +2789,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, mtd->size = params.size; mtd->_erase = spi_nor_erase; mtd->_read = spi_nor_read; + mtd->_resume = spi_nor_resume; /* NOR protection support for STmicro/Micron chips and similar */ if (JEDEC_MFR(info) == SNOR_MFR_MICRON || @@ -2804,8 +2863,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, if (JEDEC_MFR(info) == SNOR_MFR_SPANSION || info->flags & SPI_NOR_4B_OPCODES) spi_nor_set_4byte_opcodes(nor, info); - else - set_4byte(nor, info, 1); } else { nor->addr_width = 3; } @@ -2822,6 +2879,12 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, return ret; } + /* Send all the required SPI flash commands to initialize device */ + nor->info = info; + ret = spi_nor_init(nor); + if (ret) + return ret; + dev_info(dev, "%s (%lld Kbytes)\n", info->name, (long long)mtd->size >> 10); diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c index 86c0931543c5..b3c7f6addba7 100644 --- a/drivers/mtd/spi-nor/stm32-quadspi.c +++ b/drivers/mtd/spi-nor/stm32-quadspi.c @@ -1,9 +1,22 @@ /* - * stm32_quadspi.c + * Driver for stm32 quadspi controller * - * Copyright (C) 2017, Ludovic Barre + * Copyright (C) 2017, STMicroelectronics - All Rights Reserved + * Author(s): Ludovic Barre author <[email protected]>. * - * License terms: GNU General Public License (GPL), version 2 + * License terms: GPL V2.0. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License along with + * This program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/clk.h> #include <linux/errno.h> @@ -113,6 +126,7 @@ #define STM32_MAX_MMAP_SZ SZ_256M #define STM32_MAX_NORCHIP 2 +#define STM32_QSPI_FIFO_SZ 32 #define STM32_QSPI_FIFO_TIMEOUT_US 30000 #define STM32_QSPI_BUSY_TIMEOUT_US 100000 @@ -124,6 +138,7 @@ struct stm32_qspi_flash { u32 presc; u32 read_mode; bool registered; + u32 prefetch_limit; }; struct stm32_qspi { @@ -240,12 +255,12 @@ static int stm32_qspi_tx_poll(struct stm32_qspi *qspi, STM32_QSPI_FIFO_TIMEOUT_US); if (ret) { dev_err(qspi->dev, "fifo timeout (stat:%#x)\n", sr); - break; + return ret; } tx_fifo(buf++, qspi->io_base + QUADSPI_DR); } - return ret; + return 0; } static int stm32_qspi_tx_mm(struct stm32_qspi *qspi, @@ -272,6 +287,7 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash, { struct stm32_qspi *qspi = flash->qspi; u32 ccr, dcr, cr; + u32 last_byte; int err; err = stm32_qspi_wait_nobusy(qspi); @@ -314,6 +330,10 @@ static int stm32_qspi_send(struct stm32_qspi_flash *flash, if (err) goto abort; writel_relaxed(FCR_CTCF, qspi->io_base + QUADSPI_FCR); + } else { + last_byte = cmd->addr + cmd->len; + if (last_byte > flash->prefetch_limit) + goto abort; } return err; @@ -322,7 +342,9 @@ abort: cr = readl_relaxed(qspi->io_base + QUADSPI_CR) | CR_ABORT; writel_relaxed(cr, qspi->io_base + QUADSPI_CR); - dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); + if (err) + dev_err(qspi->dev, "%s abort err:%d\n", __func__, err); + return err; } @@ -550,6 +572,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi, } flash->fsize = FSIZE_VAL(mtd->size); + flash->prefetch_limit = mtd->size - STM32_QSPI_FIFO_SZ; flash->read_mode = CCR_FMODE_MM; if (mtd->size > qspi->mm_size) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 93faa1fed6f2..ea01f24f15e7 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -95,7 +95,7 @@ static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable) reg = reg_readl(priv, REG_SPHY_CNTRL); if (enable) { reg |= PHY_RESET; - reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS); + reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS); reg_writel(priv, reg, REG_SPHY_CNTRL); udelay(21); reg = reg_readl(priv, REG_SPHY_CNTRL); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index d4496e9afcdf..8b2c31e2a2b0 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c @@ -1355,7 +1355,6 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, /* Offload checksum calculation to HW */ if (skb->ip_summed == CHECKSUM_PARTIAL) { - hdr->csum_l3 = 1; /* Enable IP csum calculation */ hdr->l3_offset = skb_network_offset(skb); hdr->l4_offset = skb_transport_offset(skb); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index dbd69310f263..538b42d5c187 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1231,7 +1231,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE)) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 5829715fa342..e019baa905c5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -90,7 +90,6 @@ #define I40E_AQ_LEN 256 #define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */ #define I40E_MAX_USER_PRIORITY 8 -#define I40E_MAX_QUEUES_PER_CH 64 #define I40E_DEFAULT_TRAFFIC_CLASS BIT(0) #define I40E_DEFAULT_MSG_ENABLE 4 #define I40E_QUEUE_WAIT_RETRY_LIMIT 10 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 9dcb2a961197..9af74253c3f7 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -613,6 +613,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; } + /* Newer versions of firmware require lock when reading the NVM */ + if (hw->aq.api_maj_ver > 1 || + (hw->aq.api_maj_ver == 1 && + hw->aq.api_min_ver >= 5)) + hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; + /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */ if (hw->aq.api_maj_ver > 1 || (hw->aq.api_maj_ver == 1 && diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 0203665cb53c..095965f268bd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -948,7 +948,8 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw) hw->pf_id = (u8)(func_rid & 0x7); if (hw->mac.type == I40E_MAC_X722) - hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE; + hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE | + I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK; status = i40e_init_nvm(hw); return status; @@ -1268,6 +1269,7 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) * we don't need to do the PF Reset */ if (!cnt) { + u32 reg2 = 0; if (hw->revision_id == 0) cnt = I40E_PF_RESET_WAIT_COUNT_A0; else @@ -1279,6 +1281,12 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) reg = rd32(hw, I40E_PFGEN_CTRL); if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK)) break; + reg2 = rd32(hw, I40E_GLGEN_RSTAT); + if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) { + hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n"); + hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2); + return I40E_ERR_NOT_READY; + } usleep_range(1000, 2000); } if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 4a964d6e4a9e..4c08cc86463e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2167,6 +2167,73 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, } /** + * i40e_set_promiscuous - set promiscuous mode + * @pf: board private structure + * @promisc: promisc on or off + * + * There are different ways of setting promiscuous mode on a PF depending on + * what state/environment we're in. This identifies and sets it appropriately. + * Returns 0 on success. + **/ +static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) +{ + struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_hw *hw = &pf->hw; + i40e_status aq_ret; + + if (vsi->type == I40E_VSI_MAIN && + pf->lan_veb != I40E_NO_VEB && + !(pf->flags & I40E_FLAG_MFP_ENABLED)) { + /* set defport ON for Main VSI instead of true promisc + * this way we will get all unicast/multicast and VLAN + * promisc behavior but will not get VF or VMDq traffic + * replicated on the Main VSI. + */ + if (promisc) + aq_ret = i40e_aq_set_default_vsi(hw, + vsi->seid, + NULL); + else + aq_ret = i40e_aq_clear_default_vsi(hw, + vsi->seid, + NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "Set default VSI failed, err %s, aq_err %s\n", + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + } + } else { + aq_ret = i40e_aq_set_vsi_unicast_promiscuous( + hw, + vsi->seid, + promisc, NULL, + true); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "set unicast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + } + aq_ret = i40e_aq_set_vsi_multicast_promiscuous( + hw, + vsi->seid, + promisc, NULL); + if (aq_ret) { + dev_info(&pf->pdev->dev, + "set multicast promisc failed, err %s, aq_err %s\n", + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); + } + } + + if (!aq_ret) + pf->cur_promisc = promisc; + + return aq_ret; +} + +/** * i40e_sync_vsi_filters - Update the VSI filter list to the HW * @vsi: ptr to the VSI * @@ -2467,81 +2534,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) || test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state)); - if ((vsi->type == I40E_VSI_MAIN) && - (pf->lan_veb != I40E_NO_VEB) && - !(pf->flags & I40E_FLAG_MFP_ENABLED)) { - /* set defport ON for Main VSI instead of true promisc - * this way we will get all unicast/multicast and VLAN - * promisc behavior but will not get VF or VMDq traffic - * replicated on the Main VSI. - */ - if (pf->cur_promisc != cur_promisc) { - pf->cur_promisc = cur_promisc; - if (cur_promisc) - aq_ret = - i40e_aq_set_default_vsi(hw, - vsi->seid, - NULL); - else - aq_ret = - i40e_aq_clear_default_vsi(hw, - vsi->seid, - NULL); - if (aq_ret) { - retval = i40e_aq_rc_to_posix(aq_ret, - hw->aq.asq_last_status); - dev_info(&pf->pdev->dev, - "Set default VSI failed on %s, err %s, aq_err %s\n", - vsi_name, - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, - hw->aq.asq_last_status)); - } - } - } else { - aq_ret = i40e_aq_set_vsi_unicast_promiscuous( - hw, - vsi->seid, - cur_promisc, NULL, - true); - if (aq_ret) { - retval = - i40e_aq_rc_to_posix(aq_ret, - hw->aq.asq_last_status); - dev_info(&pf->pdev->dev, - "set unicast promisc failed on %s, err %s, aq_err %s\n", - vsi_name, - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, - hw->aq.asq_last_status)); - } - aq_ret = i40e_aq_set_vsi_multicast_promiscuous( - hw, - vsi->seid, - cur_promisc, NULL); - if (aq_ret) { - retval = - i40e_aq_rc_to_posix(aq_ret, - hw->aq.asq_last_status); - dev_info(&pf->pdev->dev, - "set multicast promisc failed on %s, err %s, aq_err %s\n", - vsi_name, - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, - hw->aq.asq_last_status)); - } - } - aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw, - vsi->seid, - cur_promisc, NULL); + aq_ret = i40e_set_promiscuous(pf, cur_promisc); if (aq_ret) { retval = i40e_aq_rc_to_posix(aq_ret, - pf->hw.aq.asq_last_status); + hw->aq.asq_last_status); dev_info(&pf->pdev->dev, - "set brdcast promisc failed, err %s, aq_err %s\n", - i40e_stat_str(hw, aq_ret), - i40e_aq_str(hw, - hw->aq.asq_last_status)); + "Setting promiscuous %s failed on %s, err %s aq_err %s\n", + cur_promisc ? "on" : "off", + vsi_name, + i40e_stat_str(hw, aq_ret), + i40e_aq_str(hw, hw->aq.asq_last_status)); } } out: @@ -3964,7 +3966,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if the descriptor isn't done, no work yet to do */ if (!(eop_desc->cmd_type_offset_bsz & @@ -5629,14 +5631,6 @@ static int i40e_validate_num_queues(struct i40e_pf *pf, int num_queues, return -EINVAL; *reconfig_rss = false; - - if (num_queues > I40E_MAX_QUEUES_PER_CH) { - dev_err(&pf->pdev->dev, - "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n", - num_queues, I40E_MAX_QUEUES_PER_CH); - return -EINVAL; - } - if (vsi->current_rss_size) { if (num_queues > vsi->current_rss_size) { dev_dbg(&pf->pdev->dev, @@ -9429,6 +9423,15 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) if (!lock_acquired) rtnl_unlock(); + /* Restore promiscuous settings */ + ret = i40e_set_promiscuous(pf, pf->cur_promisc); + if (ret) + dev_warn(&pf->pdev->dev, + "Failed to restore promiscuous setting: %s, err %s aq_err %s\n", + pf->cur_promisc ? "on" : "off", + i40e_stat_str(&pf->hw, ret), + i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status)); + i40e_reset_all_vfs(pf, true); /* tell the firmware that we're starting */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 0ccab0a5d717..7689c2ee0d46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -328,15 +328,17 @@ static i40e_status __i40e_read_nvm_word(struct i40e_hw *hw, i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, u16 *data) { - i40e_status ret_code; + i40e_status ret_code = 0; - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (ret_code) return ret_code; ret_code = __i40e_read_nvm_word(hw, offset, data); - i40e_release_nvm(hw); + if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK) + i40e_release_nvm(hw); return ret_code; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index d6d352a6e6ea..4566d66ffc7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -759,7 +759,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* we have caught up to head, no work left to do */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 00d4833e9925..0e8568719b4e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -629,6 +629,7 @@ struct i40e_hw { #define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0) #define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) +#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) u64 flags; /* Used in set switch config AQ command */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index f8a794b72462..a3dc9b932946 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2218,18 +2218,19 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) struct i40e_mac_filter *f; f = i40e_find_mac(vsi, al->list[i].addr); - if (!f) + if (!f) { f = i40e_add_mac_filter(vsi, al->list[i].addr); - if (!f) { - dev_err(&pf->pdev->dev, - "Unable to add MAC filter %pM for VF %d\n", - al->list[i].addr, vf->vf_id); - ret = I40E_ERR_PARAM; - spin_unlock_bh(&vsi->mac_filter_hash_lock); - goto error_param; - } else { - vf->num_mac++; + if (!f) { + dev_err(&pf->pdev->dev, + "Unable to add MAC filter %pM for VF %d\n", + al->list[i].addr, vf->vf_id); + ret = I40E_ERR_PARAM; + spin_unlock_bh(&vsi->mac_filter_hash_lock); + goto error_param; + } else { + vf->num_mac++; + } } } spin_unlock_bh(&vsi->mac_filter_hash_lock); diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index fe817e2b6fef..50864f99446d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -179,7 +179,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); /* if the descriptor isn't done, no work yet to do */ diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_client.c b/drivers/net/ethernet/intel/i40evf/i40evf_client.c index d8131139565e..da60ce12b33d 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_client.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_client.c @@ -26,6 +26,26 @@ static struct i40e_ops i40evf_lan_ops = { }; /** + * i40evf_client_get_params - retrieve relevant client parameters + * @vsi: VSI with parameters + * @params: client param struct + **/ +static +void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params) +{ + int i; + + memset(params, 0, sizeof(struct i40e_params)); + params->mtu = vsi->netdev->mtu; + params->link_up = vsi->back->link_up; + + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { + params->qos.prio_qos[i].tc = 0; + params->qos.prio_qos[i].qs_handle = vsi->qs_handle; + } +} + +/** * i40evf_notify_client_message - call the client message receive callback * @vsi: the VSI associated with this client * @msg: message buffer @@ -66,10 +86,6 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) return; cinst = vsi->back->cinst; - memset(¶ms, 0, sizeof(params)); - params.mtu = vsi->netdev->mtu; - params.link_up = vsi->back->link_up; - params.qos.prio_qos[0].qs_handle = vsi->qs_handle; if (!cinst || !cinst->client || !cinst->client->ops || !cinst->client->ops->l2_param_change) { @@ -77,6 +93,8 @@ void i40evf_notify_client_l2_params(struct i40e_vsi *vsi) "Cannot locate client instance l2_param_change function\n"); return; } + i40evf_client_get_params(vsi, ¶ms); + cinst->lan_info.params = params; cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client, ¶ms); } @@ -166,9 +184,9 @@ static struct i40e_client_instance * i40evf_client_add_instance(struct i40evf_adapter *adapter) { struct i40e_client_instance *cinst = NULL; - struct netdev_hw_addr *mac = NULL; struct i40e_vsi *vsi = &adapter->vsi; - int i; + struct netdev_hw_addr *mac = NULL; + struct i40e_params params; if (!vf_registered_client) goto out; @@ -192,18 +210,14 @@ i40evf_client_add_instance(struct i40evf_adapter *adapter) cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR; cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR; cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD; + i40evf_client_get_params(vsi, ¶ms); + cinst->lan_info.params = params; set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state); cinst->lan_info.msix_count = adapter->num_iwarp_msix; cinst->lan_info.msix_entries = &adapter->msix_entries[adapter->iwarp_base_vector]; - for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) { - cinst->lan_info.params.qos.prio_qos[i].tc = 0; - cinst->lan_info.params.qos.prio_qos[i].qs_handle = - vsi->qs_handle; - } - mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list, struct netdev_hw_addr, list); if (mac) diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c index ca2ebdbd24d7..7b2a4eba92e2 100644 --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c @@ -2110,6 +2110,11 @@ static void i40evf_client_task(struct work_struct *work) adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED; goto out; } + if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { + i40evf_notify_client_l2_params(&adapter->vsi); + adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; + goto out; + } if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) { i40evf_notify_client_close(&adapter->vsi, false); adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE; @@ -2118,11 +2123,6 @@ static void i40evf_client_task(struct work_struct *work) if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) { i40evf_notify_client_open(&adapter->vsi); adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN; - goto out; - } - if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) { - i40evf_notify_client_l2_params(&adapter->vsi); - adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS; } out: clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index e94d3c256667..c208753ff5b7 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -7317,7 +7317,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 713e8df23744..4214c1519a87 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring) break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ca06c3cc2ca8..62a18914f00f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1192,7 +1192,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index feed11bc9ddf..1f4a69134ade 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -326,7 +326,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, break; /* prevent any other reads prior to eop_desc */ - read_barrier_depends(); + smp_rmb(); /* if DD is not set pending work has not been completed */ if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index b6cee71f49d3..bc879aeb62d4 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -214,8 +214,14 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, { int err; - if (prog && !prog->aux->offload) - return -EINVAL; + if (prog) { + struct bpf_dev_offload *offload = prog->aux->offload; + + if (!offload) + return -EINVAL; + if (offload->netdev != nn->dp.netdev) + return -EINVAL; + } if (prog && old_prog) { u8 cap; diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index ac8439ceea10..481876b5424c 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb) tx_skb->dma_len, DMA_TO_DEVICE); else - pci_unmap_page(np->pci_dev, tx_skb->dma, + dma_unmap_page(&np->pci_dev->dev, tx_skb->dma, tx_skb->dma_len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_skb->dma = 0; } } diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 2cb3622c4acc..fc0d5fa65ad4 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2030,21 +2030,6 @@ out: return ret; } -static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) -{ - struct rtl8169_private *tp = netdev_priv(dev); - int ret; - - del_timer_sync(&tp->timer); - - rtl_lock_work(tp); - ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), - cmd->duplex, cmd->advertising); - rtl_unlock_work(tp); - - return ret; -} - static netdev_features_t rtl8169_fix_features(struct net_device *dev, netdev_features_t features) { @@ -2171,6 +2156,27 @@ static int rtl8169_get_link_ksettings(struct net_device *dev, return rc; } +static int rtl8169_set_link_ksettings(struct net_device *dev, + const struct ethtool_link_ksettings *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int rc; + u32 advertising; + + if (!ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising)) + return -EINVAL; + + del_timer_sync(&tp->timer); + + rtl_lock_work(tp); + rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed, + cmd->base.duplex, advertising); + rtl_unlock_work(tp); + + return rc; +} + static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { @@ -2591,7 +2597,6 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_link = ethtool_op_get_link, .get_coalesce = rtl_get_coalesce, .set_coalesce = rtl_set_coalesce, - .set_settings = rtl8169_set_settings, .get_msglevel = rtl8169_get_msglevel, .set_msglevel = rtl8169_set_msglevel, .get_regs = rtl8169_get_regs, @@ -2603,6 +2608,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, .nway_reset = rtl8169_nway_reset, .get_link_ksettings = rtl8169_get_link_ksettings, + .set_link_ksettings = rtl8169_set_link_ksettings, }; static void rtl8169_get_mac_version(struct rtl8169_private *tp, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 4e16d839c311..b718a02a6bb6 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -1337,21 +1337,33 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) { +#if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX])) info->key.tun_flags &= ~TUNNEL_CSUM; +#else + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX], + "IPv6 support not enabled in the kernel"); + return -EPFNOSUPPORT; +#endif } if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) { +#if IS_ENABLED(CONFIG_IPV6) if (changelink) { attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX; goto change_notsup; } if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX])) *use_udp6_rx_checksums = false; +#else + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX], + "IPv6 support not enabled in the kernel"); + return -EPFNOSUPPORT; +#endif } return 0; @@ -1527,11 +1539,13 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) goto nla_put_failure; if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA)) - goto nla_put_failure; + goto nla_put_failure; +#if IS_ENABLED(CONFIG_IPV6) if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX, !geneve->use_udp6_rx_checksums)) goto nla_put_failure; +#endif return 0; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index f2a7e929316e..11c1e7950fe5 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -116,7 +116,7 @@ bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6) return false; } -static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) +static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type) { void *lyr3h = NULL; @@ -124,7 +124,7 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) case htons(ETH_P_ARP): { struct arphdr *arph; - if (unlikely(!pskb_may_pull(skb, sizeof(*arph)))) + if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev)))) return NULL; arph = arp_hdr(skb); @@ -165,8 +165,26 @@ static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type) /* Only Neighbour Solicitation pkts need different treatment */ if (ipv6_addr_any(&ip6h->saddr) && ip6h->nexthdr == NEXTHDR_ICMP) { + struct icmp6hdr *icmph; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)))) + return NULL; + + ip6h = ipv6_hdr(skb); + icmph = (struct icmp6hdr *)(ip6h + 1); + + if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { + /* Need to access the ipv6 address in body */ + if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph) + + sizeof(struct in6_addr)))) + return NULL; + + ip6h = ipv6_hdr(skb); + icmph = (struct icmp6hdr *)(ip6h + 1); + } + *type = IPVL_ICMPV6; - lyr3h = ip6h + 1; + lyr3h = icmph; } break; } @@ -510,7 +528,7 @@ static int ipvlan_xmit_mode_l3(struct sk_buff *skb, struct net_device *dev) struct ipvl_addr *addr; int addr_type; - lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type); if (!lyr3h) goto out; @@ -539,7 +557,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) if (!ipvlan_is_vepa(ipvlan->port) && ether_addr_equal(eth->h_dest, eth->h_source)) { - lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type); if (lyr3h) { addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true); if (addr) { @@ -606,7 +624,7 @@ static bool ipvlan_external_frame(struct sk_buff *skb, struct ipvl_port *port) int addr_type; if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) { - lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); if (!lyr3h) return true; @@ -627,7 +645,7 @@ static rx_handler_result_t ipvlan_handle_mode_l3(struct sk_buff **pskb, struct sk_buff *skb = *pskb; rx_handler_result_t ret = RX_HANDLER_PASS; - lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); if (!lyr3h) goto out; @@ -666,7 +684,7 @@ static rx_handler_result_t ipvlan_handle_mode_l2(struct sk_buff **pskb, } else { struct ipvl_addr *addr; - lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); if (!lyr3h) return ret; @@ -717,7 +735,7 @@ static struct ipvl_addr *ipvlan_skb_to_addr(struct sk_buff *skb, if (!port || port->mode != IPVLAN_MODE_L3S) goto out; - lyr3h = ipvlan_get_L3_hdr(skb, &addr_type); + lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type); if (!lyr3h) goto out; diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c index 72f4228a63bb..9442db221834 100644 --- a/drivers/net/phy/cortina.c +++ b/drivers/net/phy/cortina.c @@ -116,3 +116,7 @@ static struct mdio_device_id __maybe_unused cortina_tbl[] = { }; MODULE_DEVICE_TABLE(mdio, cortina_tbl); + +MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver"); +MODULE_AUTHOR("NXP"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index b13890953ebb..e9489b88407c 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1077,7 +1077,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, case TUNSETOFFLOAD: /* let the user check for future flags */ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | - TUN_F_TSO_ECN)) + TUN_F_TSO_ECN | TUN_F_UFO)) return -EINVAL; rtnl_lock(); diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 228d4aa6d9ae..ca5e375de27c 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring) if (ring->ring->is_tx) { dir = DMA_TO_DEVICE; order = 0; - size = tbnet_frame_size(tf); + size = TBNET_FRAME_SIZE; } else { dir = DMA_FROM_DEVICE; order = TBNET_RX_PAGE_ORDER; @@ -512,6 +512,7 @@ err_free: static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) { struct tbnet_ring *ring = &net->tx_ring; + struct device *dma_dev = tb_ring_dma_device(ring->ring); struct tbnet_frame *tf; unsigned int index; @@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) tf = &ring->frames[index]; tf->frame.size = 0; - tf->frame.buffer_phy = 0; + + dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy, + tbnet_frame_size(tf), DMA_TO_DEVICE); return tf; } @@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame, bool canceled) { struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); - struct device *dma_dev = tb_ring_dma_device(ring); struct tbnet *net = netdev_priv(tf->dev); - dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf), - DMA_TO_DEVICE); - tf->frame.buffer_phy = 0; - /* Return buffer to the ring */ net->tx_ring.prod++; @@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame, static int tbnet_alloc_tx_buffers(struct tbnet *net) { struct tbnet_ring *ring = &net->tx_ring; + struct device *dma_dev = tb_ring_dma_device(ring->ring); unsigned int i; for (i = 0; i < TBNET_RING_SIZE; i++) { struct tbnet_frame *tf = &ring->frames[i]; + dma_addr_t dma_addr; tf->page = alloc_page(GFP_KERNEL); if (!tf->page) { @@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net) return -ENOMEM; } + dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + __free_page(tf->page); + tf->page = NULL; + tbnet_free_buffers(ring); + return -ENOMEM; + } + tf->dev = net->dev; + tf->frame.buffer_phy = dma_addr; tf->frame.callback = tbnet_tx_callback; tf->frame.sof = TBIP_PDF_FRAME_START; tf->frame.eof = TBIP_PDF_FRAME_END; @@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev) return 0; } -static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf) -{ - dma_addr_t dma_addr; - - dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf), - DMA_TO_DEVICE); - if (dma_mapping_error(dma_dev, dma_addr)) - return false; - - tf->frame.buffer_phy = dma_addr; - return true; -} - static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, struct tbnet_frame **frames, u32 frame_count) { @@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, if (skb->ip_summed != CHECKSUM_PARTIAL) { /* No need to calculate checksum so we just update the - * total frame count and map the frames for DMA. + * total frame count and sync the frames for DMA. */ for (i = 0; i < frame_count; i++) { hdr = page_address(frames[i]->page); hdr->frame_count = cpu_to_le32(frame_count); - if (!tbnet_xmit_map(dma_dev, frames[i])) - goto err_unmap; + dma_sync_single_for_device(dma_dev, + frames[i]->frame.buffer_phy, + tbnet_frame_size(frames[i]), DMA_TO_DEVICE); } return true; @@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, *tucso = csum_fold(wsum); /* Checksum is finally calculated and we don't touch the memory - * anymore, so DMA map the frames now. + * anymore, so DMA sync the frames now. */ for (i = 0; i < frame_count; i++) { - if (!tbnet_xmit_map(dma_dev, frames[i])) - goto err_unmap; + dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy, + tbnet_frame_size(frames[i]), DMA_TO_DEVICE); } return true; - -err_unmap: - while (i--) - dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy, - tbnet_frame_size(frames[i]), DMA_TO_DEVICE); - - return false; } static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5a2ea78a008f..6a7bde9bc4b2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -2370,6 +2370,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) features |= NETIF_F_TSO6; arg &= ~(TUN_F_TSO4|TUN_F_TSO6); } + + arg &= ~TUN_F_UFO; } /* This gives the user a way to test for new features in future by diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index e31438541ee1..7d295ee71534 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -566,18 +566,16 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar, #define MICHAEL_MIC_LEN 8 -static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, - enum htt_rx_mpdu_encrypt_type type) +static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) { switch (type) { case HTT_RX_MPDU_ENCRYPT_NONE: - return 0; case HTT_RX_MPDU_ENCRYPT_WEP40: case HTT_RX_MPDU_ENCRYPT_WEP104: - return IEEE80211_WEP_ICV_LEN; case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: - return IEEE80211_TKIP_ICV_LEN; + return 0; case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: return IEEE80211_CCMP_MIC_LEN; case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: @@ -594,6 +592,31 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar, return 0; } +static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar, + enum htt_rx_mpdu_encrypt_type type) +{ + switch (type) { + case HTT_RX_MPDU_ENCRYPT_NONE: + case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2: + case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2: + return 0; + case HTT_RX_MPDU_ENCRYPT_WEP40: + case HTT_RX_MPDU_ENCRYPT_WEP104: + return IEEE80211_WEP_ICV_LEN; + case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC: + case HTT_RX_MPDU_ENCRYPT_TKIP_WPA: + return IEEE80211_TKIP_ICV_LEN; + case HTT_RX_MPDU_ENCRYPT_WEP128: + case HTT_RX_MPDU_ENCRYPT_WAPI: + break; + } + + ath10k_warn(ar, "unsupported encryption type %d\n", type); + return 0; +} + struct amsdu_subframe_hdr { u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; @@ -1063,25 +1086,27 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, /* Tail */ if (status->flag & RX_FLAG_IV_STRIPPED) { skb_trim(msdu, msdu->len - - ath10k_htt_rx_crypto_tail_len(ar, enctype)); + ath10k_htt_rx_crypto_mic_len(ar, enctype)); + + skb_trim(msdu, msdu->len - + ath10k_htt_rx_crypto_icv_len(ar, enctype)); } else { /* MIC */ - if ((status->flag & RX_FLAG_MIC_STRIPPED) && - enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) - skb_trim(msdu, msdu->len - 8); + if (status->flag & RX_FLAG_MIC_STRIPPED) + skb_trim(msdu, msdu->len - + ath10k_htt_rx_crypto_mic_len(ar, enctype)); /* ICV */ - if (status->flag & RX_FLAG_ICV_STRIPPED && - enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) + if (status->flag & RX_FLAG_ICV_STRIPPED) skb_trim(msdu, msdu->len - - ath10k_htt_rx_crypto_tail_len(ar, enctype)); + ath10k_htt_rx_crypto_icv_len(ar, enctype)); } /* MMIC */ if ((status->flag & RX_FLAG_MMIC_STRIPPED) && !ieee80211_has_morefrags(hdr->frame_control) && enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA) - skb_trim(msdu, msdu->len - 8); + skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN); /* Head */ if (status->flag & RX_FLAG_IV_STRIPPED) { diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index 71812a2dd513..f7d228b5ba93 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -1233,7 +1233,7 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn, } /* External RF module */ - iris_node = of_find_node_by_name(mmio_node, "iris"); + iris_node = of_get_child_by_name(mmio_node, "iris"); if (iris_node) { if (of_device_is_compatible(iris_node, "qcom,wcn3620")) wcn->rf_id = RF_IRIS_WCN3620; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index af7c4f36b66f..e7e75b458005 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -72,18 +72,21 @@ #define IWL9000_SMEM_OFFSET 0x400000 #define IWL9000_SMEM_LEN 0x68000 -#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-" +#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-" #define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-" #define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-" #define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-" -#define IWL9000_MODULE_FIRMWARE(api) \ - IWL9000_FW_PRE "-" __stringify(api) ".ucode" +#define IWL9000A_MODULE_FIRMWARE(api) \ + IWL9000A_FW_PRE __stringify(api) ".ucode" +#define IWL9000B_MODULE_FIRMWARE(api) \ + IWL9000B_FW_PRE __stringify(api) ".ucode" #define IWL9000RFB_MODULE_FIRMWARE(api) \ - IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode" + IWL9000RFB_FW_PRE __stringify(api) ".ucode" #define IWL9260A_MODULE_FIRMWARE(api) \ - IWL9260A_FW_PRE "-" __stringify(api) ".ucode" + IWL9260A_FW_PRE __stringify(api) ".ucode" #define IWL9260B_MODULE_FIRMWARE(api) \ - IWL9260B_FW_PRE "-" __stringify(api) ".ucode" + IWL9260B_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_9000 10 @@ -194,7 +197,48 @@ const struct iwl_cfg iwl9460_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, +}; + +const struct iwl_cfg iwl9460_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9460", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9461_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9461", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; + +const struct iwl_cfg iwl9462_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9462", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, }; const struct iwl_cfg iwl9560_2ac_cfg = { @@ -206,10 +250,23 @@ const struct iwl_cfg iwl9560_2ac_cfg = { .nvm_ver = IWL9000_NVM_VERSION, .nvm_calib_ver = IWL9000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, - .integrated = true, }; -MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +const struct iwl_cfg iwl9560_2ac_cfg_soc = { + .name = "Intel(R) Dual Band Wireless AC 9560", + .fw_name_pre = IWL9000A_FW_PRE, + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, + IWL_DEVICE_9000, + .ht_params = &iwl9000_ht_params, + .nvm_ver = IWL9000_NVM_VERSION, + .nvm_calib_ver = IWL9000_TX_POWER_VERSION, + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .integrated = true, + .soc_latency = 5000, +}; +MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index ea8206515171..705f83b02e13 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c @@ -80,15 +80,15 @@ #define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" #define IWL_A000_HR_MODULE_FIRMWARE(api) \ - IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_MODULE_FIRMWARE(api) \ - IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode" #define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ - IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode" + IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_A000 10 diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 5a40092febfb..3bfc657f6b42 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -531,6 +531,8 @@ struct iwl_scan_config_v1 { } __packed; /* SCAN_CONFIG_DB_CMD_API_S */ #define SCAN_TWO_LMACS 2 +#define SCAN_LB_LMAC_IDX 0 +#define SCAN_HB_LMAC_IDX 1 struct iwl_scan_config { __le32 flags; @@ -578,6 +580,7 @@ enum iwl_umac_scan_general_flags { IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9), IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10), IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11), + IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13), }; /** @@ -631,12 +634,17 @@ struct iwl_scan_req_umac_tail { * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @general_flags: &enum iwl_umac_scan_general_flags - * @reserved2: for future use and alignment * @scan_start_mac_id: report the scan start TSF time according to this mac TSF * @extended_dwell: dwell time for channels 1, 6 and 11 * @active_dwell: dwell time for active scan * @passive_dwell: dwell time for passive scan * @fragmented_dwell: dwell time for fragmented passive scan + * @adwell_default_n_aps: for adaptive dwell the default number of APs + * per channel + * @adwell_default_n_aps_social: for adaptive dwell the default + * number of APs per social (1,6,11) channel + * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added + * to total scan time * @max_out_time: max out of serving channel time, per LMAC - for CDB there * are 2 LMACs * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs @@ -644,6 +652,8 @@ struct iwl_scan_req_umac_tail { * @channel_flags: &enum iwl_scan_channel_flags * @n_channels: num of channels in scan request * @reserved: for future use and alignment + * @reserved2: for future use and alignment + * @reserved3: for future use and alignment * @data: &struct iwl_scan_channel_cfg_umac and * &struct iwl_scan_req_umac_tail */ @@ -651,41 +661,64 @@ struct iwl_scan_req_umac { __le32 flags; __le32 uid; __le32 ooc_priority; - /* SCAN_GENERAL_PARAMS_API_S_VER_4 */ __le16 general_flags; - u8 reserved2; + u8 reserved; u8 scan_start_mac_id; - u8 extended_dwell; - u8 active_dwell; - u8 passive_dwell; - u8 fragmented_dwell; union { struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time; __le32 suspend_time; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */ struct { + u8 extended_dwell; + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; __le32 max_out_time[SCAN_TWO_LMACS]; __le32 suspend_time[SCAN_TWO_LMACS]; __le32 scan_priority; - /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */ + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ u8 channel_flags; u8 n_channels; - __le16 reserved; + __le16 reserved2; u8 data[]; } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */ + struct { + u8 active_dwell; + u8 passive_dwell; + u8 fragmented_dwell; + u8 adwell_default_n_aps; + u8 adwell_default_n_aps_social; + u8 reserved3; + __le16 adwell_max_budget; + __le32 max_out_time[SCAN_TWO_LMACS]; + __le32 suspend_time[SCAN_TWO_LMACS]; + __le32 scan_priority; + /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */ + u8 channel_flags; + u8 n_channels; + __le16 reserved2; + u8 data[]; + } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */ }; } __packed; -#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac) +#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \ + 2 * sizeof(u8) - sizeof(__le16)) #define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \ - 2 * sizeof(__le32)) + 2 * sizeof(__le32) - 2 * sizeof(u8) - \ + sizeof(__le16)) /** * struct iwl_umac_scan_abort diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 740d97093d1c..37a5c5b4eda6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -264,6 +264,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30, IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31, /* API Set 1 */ + IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32, IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34, IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35, IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index d1263a554420..e21e46cf6f9a 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -366,6 +366,7 @@ struct iwl_cfg { u32 dccm2_len; u32 smem_offset; u32 smem_len; + u32 soc_latency; u16 nvm_ver; u16 nvm_calib_ver; u16 rx_with_siso_diversity:1, @@ -472,6 +473,10 @@ extern const struct iwl_cfg iwl9260_2ac_cfg; extern const struct iwl_cfg iwl9270_2ac_cfg; extern const struct iwl_cfg iwl9460_2ac_cfg; extern const struct iwl_cfg iwl9560_2ac_cfg; +extern const struct iwl_cfg iwl9460_2ac_cfg_soc; +extern const struct iwl_cfg iwl9461_2ac_cfg_soc; +extern const struct iwl_cfg iwl9462_2ac_cfg_soc; +extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwla000_2ac_cfg_hr; extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwla000_2ac_cfg_jf; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 0e18c5066f04..4575595ab022 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1142,6 +1142,12 @@ static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm) IWL_UCODE_TLV_CAPA_D0I3_SUPPORT); } +static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm) +{ + return fw_has_api(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_API_ADAPTIVE_DWELL); +} + static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm) { /* For now we only use this mode to differentiate between diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 774122fed454..e4fd476e9ccb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -130,6 +130,19 @@ struct iwl_mvm_scan_params { u32 measurement_dwell; }; +static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm) +{ + struct iwl_scan_req_umac *cmd = mvm->scan_cmd; + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + return (void *)&cmd->v7.data; + + if (iwl_mvm_has_new_tx_api(mvm)) + return (void *)&cmd->v6.data; + + return (void *)&cmd->v1.data; +} + static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm) { if (mvm->scan_rx_ant != ANT_NONE) @@ -1075,25 +1088,57 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, { struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type]; + if (iwl_mvm_is_regular_scan(params)) + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + else + cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); + + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + if (params->measurement_dwell) { + cmd->v7.active_dwell = params->measurement_dwell; + cmd->v7.passive_dwell = params->measurement_dwell; + } else { + cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + } + cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + + cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); + cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + if (iwl_mvm_is_cdb_supported(mvm)) { + cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); + } + + return; + } + if (params->measurement_dwell) { - cmd->active_dwell = params->measurement_dwell; - cmd->passive_dwell = params->measurement_dwell; - cmd->extended_dwell = params->measurement_dwell; + cmd->v1.active_dwell = params->measurement_dwell; + cmd->v1.passive_dwell = params->measurement_dwell; + cmd->v1.extended_dwell = params->measurement_dwell; } else { - cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE; - cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE; - cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED; + cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE; + cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE; + cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED; } - cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; + cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED; if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time); + cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->max_out_time); + cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] = + cpu_to_le32(timing->suspend_time); if (iwl_mvm_is_cdb_supported(mvm)) { - cmd->v6.max_out_time[1] = + cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->max_out_time); - cmd->v6.suspend_time[1] = + cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] = cpu_to_le32(timing->suspend_time); } } else { @@ -1102,11 +1147,6 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm, cmd->v1.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); } - - if (iwl_mvm_is_regular_scan(params)) - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6); - else - cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2); } static void @@ -1178,8 +1218,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, int type) { struct iwl_scan_req_umac *cmd = mvm->scan_cmd; - void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ? - (void *)&cmd->v6.data : (void *)&cmd->v1.data; + void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm); struct iwl_scan_req_umac_tail *sec_part = cmd_data + sizeof(struct iwl_scan_channel_cfg_umac) * mvm->fw->ucode_capa.n_scan_channels; @@ -1216,7 +1255,10 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | IWL_SCAN_CHANNEL_FLAG_CACHE_ADD; - if (iwl_mvm_has_new_tx_api(mvm)) { + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) { + cmd->v7.channel_flags = channel_flags; + cmd->v7.n_channels = params->n_channels; + } else if (iwl_mvm_has_new_tx_api(mvm)) { cmd->v6.channel_flags = channel_flags; cmd->v6.n_channels = params->n_channels; } else { @@ -1661,8 +1703,10 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm) { int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1; - if (iwl_mvm_has_new_tx_api(mvm)) - base_size = IWL_SCAN_REQ_UMAC_SIZE; + if (iwl_mvm_is_adaptive_dwell_supported(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V7; + else if (iwl_mvm_has_new_tx_api(mvm)) + base_size = IWL_SCAN_REQ_UMAC_SIZE_V6; if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) return base_size + diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 4a21c12276d7..f21fe59faccf 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -535,47 +535,121 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)}, - {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)}, {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)}, - {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)}, - {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)}, + {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)}, /* a000 Series */ {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)}, diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig index a89243c9fdd3..e51b581fd102 100644 --- a/drivers/ntb/hw/Kconfig +++ b/drivers/ntb/hw/Kconfig @@ -1,3 +1,4 @@ source "drivers/ntb/hw/amd/Kconfig" source "drivers/ntb/hw/idt/Kconfig" source "drivers/ntb/hw/intel/Kconfig" +source "drivers/ntb/hw/mscc/Kconfig" diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile index 87332c3905f0..923c442db750 100644 --- a/drivers/ntb/hw/Makefile +++ b/drivers/ntb/hw/Makefile @@ -1,3 +1,4 @@ obj-$(CONFIG_NTB_AMD) += amd/ obj-$(CONFIG_NTB_IDT) += idt/ obj-$(CONFIG_NTB_INTEL) += intel/ +obj-$(CONFIG_NTB_SWITCHTEC) += mscc/ diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c index d44d7ef38fe8..0cd79f367f7c 100644 --- a/drivers/ntb/hw/idt/ntb_hw_idt.c +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -2628,35 +2628,35 @@ static void idt_pci_remove(struct pci_dev *pdev) /* * IDT PCIe-switch models ports configuration structures */ -static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { +static const struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { .name = "89HPES24NT6AG2", .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12} }; -static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { +static const struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { .name = "89HPES32NT8AG2", .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} }; -static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { +static const struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { .name = "89HPES32NT8BG2", .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} }; -static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { +static const struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { .name = "89HPES12NT12G2", .port_cnt = 3, .ports = {0, 8, 16} }; -static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { +static const struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { .name = "89HPES16NT16G2", .port_cnt = 4, .ports = {0, 8, 12, 16} }; -static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { +static const struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { .name = "89HPES24NT24G2", .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} }; -static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { +static const struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { .name = "89HPES32NT24AG2", .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} }; -static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { +static const struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { .name = "89HPES32NT24BG2", .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} }; diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index 2557e2c05b90..4de074a86073 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c @@ -1742,89 +1742,18 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, { struct pci_dev *pdev; void __iomem *mmio; - resource_size_t bar_size; phys_addr_t bar_addr; - int b2b_bar; - u8 bar_sz; pdev = ndev->ntb.pdev; mmio = ndev->self_mmio; - if (ndev->b2b_idx == UINT_MAX) { - dev_dbg(&pdev->dev, "not using b2b mw\n"); - b2b_bar = 0; - ndev->b2b_off = 0; - } else { - b2b_bar = ndev_mw_to_bar(ndev, ndev->b2b_idx); - if (b2b_bar < 0) - return -EIO; - - dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar); - - bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); - - dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size); - - if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) { - dev_dbg(&pdev->dev, "b2b using first half of bar\n"); - ndev->b2b_off = bar_size >> 1; - } else if (bar_size >= XEON_B2B_MIN_SIZE) { - dev_dbg(&pdev->dev, "b2b using whole bar\n"); - ndev->b2b_off = 0; - --ndev->mw_count; - } else { - dev_dbg(&pdev->dev, "b2b bar size is too small\n"); - return -EIO; - } - } - - /* - * Reset the secondary bar sizes to match the primary bar sizes, - * except disable or halve the size of the b2b secondary bar. - */ - pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz); - dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz); - if (b2b_bar == 1) { - if (ndev->b2b_off) - bar_sz -= 1; - else - bar_sz = 0; - } - - pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz); - pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz); - dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz); - - pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz); - dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz); - if (b2b_bar == 2) { - if (ndev->b2b_off) - bar_sz -= 1; - else - bar_sz = 0; - } - - pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz); - pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz); - dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz); - - /* SBAR01 hit by first part of the b2b bar */ - if (b2b_bar == 0) - bar_addr = addr->bar0_addr; - else if (b2b_bar == 1) - bar_addr = addr->bar2_addr64; - else if (b2b_bar == 2) - bar_addr = addr->bar4_addr64; - else - return -EIO; - /* setup incoming bar limits == base addrs (zero length windows) */ - bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); + bar_addr = addr->bar2_addr64; iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); - bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); + bar_addr = addr->bar4_addr64; iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); diff --git a/drivers/ntb/hw/mscc/Kconfig b/drivers/ntb/hw/mscc/Kconfig new file mode 100644 index 000000000000..013ed6716438 --- /dev/null +++ b/drivers/ntb/hw/mscc/Kconfig @@ -0,0 +1,9 @@ +config NTB_SWITCHTEC + tristate "MicroSemi Switchtec Non-Transparent Bridge Support" + select PCI_SW_SWITCHTEC + help + Enables NTB support for Switchtec PCI switches. This also + selects the Switchtec management driver as they share the same + hardware interface. + + If unsure, say N. diff --git a/drivers/ntb/hw/mscc/Makefile b/drivers/ntb/hw/mscc/Makefile new file mode 100644 index 000000000000..064686ead1ba --- /dev/null +++ b/drivers/ntb/hw/mscc/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NTB_SWITCHTEC) += ntb_hw_switchtec.o diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c new file mode 100644 index 000000000000..afe8ed6f3b23 --- /dev/null +++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c @@ -0,0 +1,1216 @@ +/* + * Microsemi Switchtec(tm) PCIe Management Driver + * Copyright (c) 2017, Microsemi Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include <linux/switchtec.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/kthread.h> +#include <linux/interrupt.h> +#include <linux/ntb.h> + +MODULE_DESCRIPTION("Microsemi Switchtec(tm) NTB Driver"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Microsemi Corporation"); + +static ulong max_mw_size = SZ_2M; +module_param(max_mw_size, ulong, 0644); +MODULE_PARM_DESC(max_mw_size, + "Max memory window size reported to the upper layer"); + +static bool use_lut_mws; +module_param(use_lut_mws, bool, 0644); +MODULE_PARM_DESC(use_lut_mws, + "Enable the use of the LUT based memory windows"); + +#ifndef ioread64 +#ifdef readq +#define ioread64 readq +#else +#define ioread64 _ioread64 +static inline u64 _ioread64(void __iomem *mmio) +{ + u64 low, high; + + low = ioread32(mmio); + high = ioread32(mmio + sizeof(u32)); + return low | (high << 32); +} +#endif +#endif + +#ifndef iowrite64 +#ifdef writeq +#define iowrite64 writeq +#else +#define iowrite64 _iowrite64 +static inline void _iowrite64(u64 val, void __iomem *mmio) +{ + iowrite32(val, mmio); + iowrite32(val >> 32, mmio + sizeof(u32)); +} +#endif +#endif + +#define SWITCHTEC_NTB_MAGIC 0x45CC0001 +#define MAX_MWS 128 + +struct shared_mw { + u32 magic; + u32 link_sta; + u32 partition_id; + u64 mw_sizes[MAX_MWS]; + u32 spad[128]; +}; + +#define MAX_DIRECT_MW ARRAY_SIZE(((struct ntb_ctrl_regs *)(0))->bar_entry) +#define LUT_SIZE SZ_64K + +struct switchtec_ntb { + struct ntb_dev ntb; + struct switchtec_dev *stdev; + + int self_partition; + int peer_partition; + + int doorbell_irq; + int message_irq; + + struct ntb_info_regs __iomem *mmio_ntb; + struct ntb_ctrl_regs __iomem *mmio_ctrl; + struct ntb_dbmsg_regs __iomem *mmio_dbmsg; + struct ntb_ctrl_regs __iomem *mmio_self_ctrl; + struct ntb_ctrl_regs __iomem *mmio_peer_ctrl; + struct ntb_dbmsg_regs __iomem *mmio_self_dbmsg; + + struct shared_mw *self_shared; + struct shared_mw __iomem *peer_shared; + dma_addr_t self_shared_dma; + + u64 db_mask; + u64 db_valid_mask; + int db_shift; + int db_peer_shift; + + /* synchronize rmw access of db_mask and hw reg */ + spinlock_t db_mask_lock; + + int nr_direct_mw; + int nr_lut_mw; + int direct_mw_to_bar[MAX_DIRECT_MW]; + + int peer_nr_direct_mw; + int peer_nr_lut_mw; + int peer_direct_mw_to_bar[MAX_DIRECT_MW]; + + bool link_is_up; + enum ntb_speed link_speed; + enum ntb_width link_width; +}; + +static struct switchtec_ntb *ntb_sndev(struct ntb_dev *ntb) +{ + return container_of(ntb, struct switchtec_ntb, ntb); +} + +static int switchtec_ntb_part_op(struct switchtec_ntb *sndev, + struct ntb_ctrl_regs __iomem *ctl, + u32 op, int wait_status) +{ + static const char * const op_text[] = { + [NTB_CTRL_PART_OP_LOCK] = "lock", + [NTB_CTRL_PART_OP_CFG] = "configure", + [NTB_CTRL_PART_OP_RESET] = "reset", + }; + + int i; + u32 ps; + int status; + + switch (op) { + case NTB_CTRL_PART_OP_LOCK: + status = NTB_CTRL_PART_STATUS_LOCKING; + break; + case NTB_CTRL_PART_OP_CFG: + status = NTB_CTRL_PART_STATUS_CONFIGURING; + break; + case NTB_CTRL_PART_OP_RESET: + status = NTB_CTRL_PART_STATUS_RESETTING; + break; + default: + return -EINVAL; + } + + iowrite32(op, &ctl->partition_op); + + for (i = 0; i < 1000; i++) { + if (msleep_interruptible(50) != 0) { + iowrite32(NTB_CTRL_PART_OP_RESET, &ctl->partition_op); + return -EINTR; + } + + ps = ioread32(&ctl->partition_status) & 0xFFFF; + + if (ps != status) + break; + } + + if (ps == wait_status) + return 0; + + if (ps == status) { + dev_err(&sndev->stdev->dev, + "Timed out while peforming %s (%d). (%08x)", + op_text[op], op, + ioread32(&ctl->partition_status)); + + return -ETIMEDOUT; + } + + return -EIO; +} + +static int switchtec_ntb_send_msg(struct switchtec_ntb *sndev, int idx, + u32 val) +{ + if (idx < 0 || idx >= ARRAY_SIZE(sndev->mmio_self_dbmsg->omsg)) + return -EINVAL; + + iowrite32(val, &sndev->mmio_self_dbmsg->omsg[idx].msg); + + return 0; +} + +static int switchtec_ntb_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + int nr_direct_mw = sndev->peer_nr_direct_mw; + int nr_lut_mw = sndev->peer_nr_lut_mw - 1; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (!use_lut_mws) + nr_lut_mw = 0; + + return nr_direct_mw + nr_lut_mw; +} + +static int lut_index(struct switchtec_ntb *sndev, int mw_idx) +{ + return mw_idx - sndev->nr_direct_mw + 1; +} + +static int peer_lut_index(struct switchtec_ntb *sndev, int mw_idx) +{ + return mw_idx - sndev->peer_nr_direct_mw + 1; +} + +static int switchtec_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, + int widx, resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + int lut; + resource_size_t size; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + lut = widx >= sndev->peer_nr_direct_mw; + size = ioread64(&sndev->peer_shared->mw_sizes[widx]); + + if (size == 0) + return -EINVAL; + + if (addr_align) + *addr_align = lut ? size : SZ_4K; + + if (size_align) + *size_align = lut ? size : SZ_4K; + + if (size_max) + *size_max = size; + + return 0; +} + +static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx) +{ + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + int bar = sndev->peer_direct_mw_to_bar[idx]; + u32 ctl_val; + + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); + ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); + iowrite32(0, &ctl->bar_entry[bar].win_size); + iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); +} + +static void switchtec_ntb_mw_clr_lut(struct switchtec_ntb *sndev, int idx) +{ + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + + iowrite64(0, &ctl->lut_entry[peer_lut_index(sndev, idx)]); +} + +static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx, + dma_addr_t addr, resource_size_t size) +{ + int xlate_pos = ilog2(size); + int bar = sndev->peer_direct_mw_to_bar[idx]; + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + u32 ctl_val; + + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); + ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; + + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); + iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); + iowrite64(sndev->self_partition | addr, + &ctl->bar_entry[bar].xlate_addr); +} + +static void switchtec_ntb_mw_set_lut(struct switchtec_ntb *sndev, int idx, + dma_addr_t addr, resource_size_t size) +{ + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + + iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | addr), + &ctl->lut_entry[peer_lut_index(sndev, idx)]); +} + +static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, + dma_addr_t addr, resource_size_t size) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + int xlate_pos = ilog2(size); + int nr_direct_mw = sndev->peer_nr_direct_mw; + int rc; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + dev_dbg(&sndev->stdev->dev, "MW %d: part %d addr %pad size %pap", + widx, pidx, &addr, &size); + + if (widx >= switchtec_ntb_mw_count(ntb, pidx)) + return -EINVAL; + + if (xlate_pos < 12) + return -EINVAL; + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, + NTB_CTRL_PART_STATUS_LOCKED); + if (rc) + return rc; + + if (addr == 0 || size == 0) { + if (widx < nr_direct_mw) + switchtec_ntb_mw_clr_direct(sndev, widx); + else + switchtec_ntb_mw_clr_lut(sndev, widx); + } else { + if (widx < nr_direct_mw) + switchtec_ntb_mw_set_direct(sndev, widx, addr, size); + else + switchtec_ntb_mw_set_lut(sndev, widx, addr, size); + } + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + + if (rc == -EIO) { + dev_err(&sndev->stdev->dev, + "Hardware reported an error configuring mw %d: %08x", + widx, ioread32(&ctl->bar_error)); + + if (widx < nr_direct_mw) + switchtec_ntb_mw_clr_direct(sndev, widx); + else + switchtec_ntb_mw_clr_lut(sndev, widx); + + switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + } + + return rc; +} + +static int switchtec_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + return sndev->nr_direct_mw + (use_lut_mws ? sndev->nr_lut_mw - 1 : 0); +} + +static int switchtec_ntb_direct_get_addr(struct switchtec_ntb *sndev, + int idx, phys_addr_t *base, + resource_size_t *size) +{ + int bar = sndev->direct_mw_to_bar[idx]; + size_t offset = 0; + + if (bar < 0) + return -EINVAL; + + if (idx == 0) { + /* + * This is the direct BAR shared with the LUTs + * which means the actual window will be offset + * by the size of all the LUT entries. + */ + + offset = LUT_SIZE * sndev->nr_lut_mw; + } + + if (base) + *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; + + if (size) { + *size = pci_resource_len(sndev->ntb.pdev, bar) - offset; + if (offset && *size > offset) + *size = offset; + + if (*size > max_mw_size) + *size = max_mw_size; + } + + return 0; +} + +static int switchtec_ntb_lut_get_addr(struct switchtec_ntb *sndev, + int idx, phys_addr_t *base, + resource_size_t *size) +{ + int bar = sndev->direct_mw_to_bar[0]; + int offset; + + offset = LUT_SIZE * lut_index(sndev, idx); + + if (base) + *base = pci_resource_start(sndev->ntb.pdev, bar) + offset; + + if (size) + *size = LUT_SIZE; + + return 0; +} + +static int switchtec_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, + resource_size_t *size) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (idx < sndev->nr_direct_mw) + return switchtec_ntb_direct_get_addr(sndev, idx, base, size); + else if (idx < switchtec_ntb_peer_mw_count(ntb)) + return switchtec_ntb_lut_get_addr(sndev, idx, base, size); + else + return -EINVAL; +} + +static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev, + int partition, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct switchtec_dev *stdev = sndev->stdev; + + u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id); + u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]); + + if (speed) + *speed = (linksta >> 16) & 0xF; + + if (width) + *width = (linksta >> 20) & 0x3F; +} + +static void switchtec_ntb_set_link_speed(struct switchtec_ntb *sndev) +{ + enum ntb_speed self_speed, peer_speed; + enum ntb_width self_width, peer_width; + + if (!sndev->link_is_up) { + sndev->link_speed = NTB_SPEED_NONE; + sndev->link_width = NTB_WIDTH_NONE; + return; + } + + switchtec_ntb_part_link_speed(sndev, sndev->self_partition, + &self_speed, &self_width); + switchtec_ntb_part_link_speed(sndev, sndev->peer_partition, + &peer_speed, &peer_width); + + sndev->link_speed = min(self_speed, peer_speed); + sndev->link_width = min(self_width, peer_width); +} + +enum { + LINK_MESSAGE = 0, + MSG_LINK_UP = 1, + MSG_LINK_DOWN = 2, + MSG_CHECK_LINK = 3, +}; + +static void switchtec_ntb_check_link(struct switchtec_ntb *sndev) +{ + int link_sta; + int old = sndev->link_is_up; + + link_sta = sndev->self_shared->link_sta; + if (link_sta) { + u64 peer = ioread64(&sndev->peer_shared->magic); + + if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC) + link_sta = peer >> 32; + else + link_sta = 0; + } + + sndev->link_is_up = link_sta; + switchtec_ntb_set_link_speed(sndev); + + if (link_sta != old) { + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_CHECK_LINK); + ntb_link_event(&sndev->ntb); + dev_info(&sndev->stdev->dev, "ntb link %s", + link_sta ? "up" : "down"); + } +} + +static void switchtec_ntb_link_notification(struct switchtec_dev *stdev) +{ + struct switchtec_ntb *sndev = stdev->sndev; + + switchtec_ntb_check_link(sndev); +} + +static u64 switchtec_ntb_link_is_up(struct ntb_dev *ntb, + enum ntb_speed *speed, + enum ntb_width *width) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (speed) + *speed = sndev->link_speed; + if (width) + *width = sndev->link_width; + + return sndev->link_is_up; +} + +static int switchtec_ntb_link_enable(struct ntb_dev *ntb, + enum ntb_speed max_speed, + enum ntb_width max_width) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + dev_dbg(&sndev->stdev->dev, "enabling link"); + + sndev->self_shared->link_sta = 1; + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); + + switchtec_ntb_check_link(sndev); + + return 0; +} + +static int switchtec_ntb_link_disable(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + dev_dbg(&sndev->stdev->dev, "disabling link"); + + sndev->self_shared->link_sta = 0; + switchtec_ntb_send_msg(sndev, LINK_MESSAGE, MSG_LINK_UP); + + switchtec_ntb_check_link(sndev); + + return 0; +} + +static u64 switchtec_ntb_db_valid_mask(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + return sndev->db_valid_mask; +} + +static int switchtec_ntb_db_vector_count(struct ntb_dev *ntb) +{ + return 1; +} + +static u64 switchtec_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (db_vector < 0 || db_vector > 1) + return 0; + + return sndev->db_valid_mask; +} + +static u64 switchtec_ntb_db_read(struct ntb_dev *ntb) +{ + u64 ret; + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + ret = ioread64(&sndev->mmio_self_dbmsg->idb) >> sndev->db_shift; + + return ret & sndev->db_valid_mask; +} + +static int switchtec_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + iowrite64(db_bits << sndev->db_shift, &sndev->mmio_self_dbmsg->idb); + + return 0; +} + +static int switchtec_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + unsigned long irqflags; + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (db_bits & ~sndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&sndev->db_mask_lock, irqflags); + + sndev->db_mask |= db_bits << sndev->db_shift; + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); + + spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); + + return 0; +} + +static int switchtec_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + unsigned long irqflags; + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (db_bits & ~sndev->db_valid_mask) + return -EINVAL; + + spin_lock_irqsave(&sndev->db_mask_lock, irqflags); + + sndev->db_mask &= ~(db_bits << sndev->db_shift); + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); + + spin_unlock_irqrestore(&sndev->db_mask_lock, irqflags); + + return 0; +} + +static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + return (sndev->db_mask >> sndev->db_shift) & sndev->db_valid_mask; +} + +static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, + phys_addr_t *db_addr, + resource_size_t *db_size) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + unsigned long offset; + + offset = (unsigned long)sndev->mmio_self_dbmsg->odb - + (unsigned long)sndev->stdev->mmio; + + offset += sndev->db_shift / 8; + + if (db_addr) + *db_addr = pci_resource_start(ntb->pdev, 0) + offset; + if (db_size) + *db_size = sizeof(u32); + + return 0; +} + +static int switchtec_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + iowrite64(db_bits << sndev->db_peer_shift, + &sndev->mmio_self_dbmsg->odb); + + return 0; +} + +static int switchtec_ntb_spad_count(struct ntb_dev *ntb) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + return ARRAY_SIZE(sndev->self_shared->spad); +} + +static u32 switchtec_ntb_spad_read(struct ntb_dev *ntb, int idx) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) + return 0; + + if (!sndev->self_shared) + return 0; + + return sndev->self_shared->spad[idx]; +} + +static int switchtec_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (idx < 0 || idx >= ARRAY_SIZE(sndev->self_shared->spad)) + return -EINVAL; + + if (!sndev->self_shared) + return -EIO; + + sndev->self_shared->spad[idx] = val; + + return 0; +} + +static u32 switchtec_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, + int sidx) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) + return 0; + + if (!sndev->peer_shared) + return 0; + + return ioread32(&sndev->peer_shared->spad[sidx]); +} + +static int switchtec_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, + int sidx, u32 val) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + if (sidx < 0 || sidx >= ARRAY_SIZE(sndev->peer_shared->spad)) + return -EINVAL; + + if (!sndev->peer_shared) + return -EIO; + + iowrite32(val, &sndev->peer_shared->spad[sidx]); + + return 0; +} + +static int switchtec_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, + int sidx, phys_addr_t *spad_addr) +{ + struct switchtec_ntb *sndev = ntb_sndev(ntb); + unsigned long offset; + + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + offset = (unsigned long)&sndev->peer_shared->spad[sidx] - + (unsigned long)sndev->stdev->mmio; + + if (spad_addr) + *spad_addr = pci_resource_start(ntb->pdev, 0) + offset; + + return 0; +} + +static const struct ntb_dev_ops switchtec_ntb_ops = { + .mw_count = switchtec_ntb_mw_count, + .mw_get_align = switchtec_ntb_mw_get_align, + .mw_set_trans = switchtec_ntb_mw_set_trans, + .peer_mw_count = switchtec_ntb_peer_mw_count, + .peer_mw_get_addr = switchtec_ntb_peer_mw_get_addr, + .link_is_up = switchtec_ntb_link_is_up, + .link_enable = switchtec_ntb_link_enable, + .link_disable = switchtec_ntb_link_disable, + .db_valid_mask = switchtec_ntb_db_valid_mask, + .db_vector_count = switchtec_ntb_db_vector_count, + .db_vector_mask = switchtec_ntb_db_vector_mask, + .db_read = switchtec_ntb_db_read, + .db_clear = switchtec_ntb_db_clear, + .db_set_mask = switchtec_ntb_db_set_mask, + .db_clear_mask = switchtec_ntb_db_clear_mask, + .db_read_mask = switchtec_ntb_db_read_mask, + .peer_db_addr = switchtec_ntb_peer_db_addr, + .peer_db_set = switchtec_ntb_peer_db_set, + .spad_count = switchtec_ntb_spad_count, + .spad_read = switchtec_ntb_spad_read, + .spad_write = switchtec_ntb_spad_write, + .peer_spad_read = switchtec_ntb_peer_spad_read, + .peer_spad_write = switchtec_ntb_peer_spad_write, + .peer_spad_addr = switchtec_ntb_peer_spad_addr, +}; + +static void switchtec_ntb_init_sndev(struct switchtec_ntb *sndev) +{ + u64 part_map; + + sndev->ntb.pdev = sndev->stdev->pdev; + sndev->ntb.topo = NTB_TOPO_SWITCH; + sndev->ntb.ops = &switchtec_ntb_ops; + + sndev->self_partition = sndev->stdev->partition; + + sndev->mmio_ntb = sndev->stdev->mmio_ntb; + part_map = ioread64(&sndev->mmio_ntb->ep_map); + part_map &= ~(1 << sndev->self_partition); + sndev->peer_partition = ffs(part_map) - 1; + + dev_dbg(&sndev->stdev->dev, "Partition ID %d of %d (%llx)", + sndev->self_partition, sndev->stdev->partition_count, + part_map); + + sndev->mmio_ctrl = (void * __iomem)sndev->mmio_ntb + + SWITCHTEC_NTB_REG_CTRL_OFFSET; + sndev->mmio_dbmsg = (void * __iomem)sndev->mmio_ntb + + SWITCHTEC_NTB_REG_DBMSG_OFFSET; + + sndev->mmio_self_ctrl = &sndev->mmio_ctrl[sndev->self_partition]; + sndev->mmio_peer_ctrl = &sndev->mmio_ctrl[sndev->peer_partition]; + sndev->mmio_self_dbmsg = &sndev->mmio_dbmsg[sndev->self_partition]; +} + +static int map_bars(int *map, struct ntb_ctrl_regs __iomem *ctrl) +{ + int i; + int cnt = 0; + + for (i = 0; i < ARRAY_SIZE(ctrl->bar_entry); i++) { + u32 r = ioread32(&ctrl->bar_entry[i].ctl); + + if (r & NTB_CTRL_BAR_VALID) + map[cnt++] = i; + } + + return cnt; +} + +static void switchtec_ntb_init_mw(struct switchtec_ntb *sndev) +{ + sndev->nr_direct_mw = map_bars(sndev->direct_mw_to_bar, + sndev->mmio_self_ctrl); + + sndev->nr_lut_mw = ioread16(&sndev->mmio_self_ctrl->lut_table_entries); + sndev->nr_lut_mw = rounddown_pow_of_two(sndev->nr_lut_mw); + + dev_dbg(&sndev->stdev->dev, "MWs: %d direct, %d lut", + sndev->nr_direct_mw, sndev->nr_lut_mw); + + sndev->peer_nr_direct_mw = map_bars(sndev->peer_direct_mw_to_bar, + sndev->mmio_peer_ctrl); + + sndev->peer_nr_lut_mw = + ioread16(&sndev->mmio_peer_ctrl->lut_table_entries); + sndev->peer_nr_lut_mw = rounddown_pow_of_two(sndev->peer_nr_lut_mw); + + dev_dbg(&sndev->stdev->dev, "Peer MWs: %d direct, %d lut", + sndev->peer_nr_direct_mw, sndev->peer_nr_lut_mw); + +} + +/* + * There are 64 doorbells in the switch hardware but this is + * shared among all partitions. So we must split them in half + * (32 for each partition). However, the message interrupts are + * also shared with the top 4 doorbells so we just limit this to + * 28 doorbells per partition + */ +static void switchtec_ntb_init_db(struct switchtec_ntb *sndev) +{ + sndev->db_valid_mask = 0x0FFFFFFF; + + if (sndev->self_partition < sndev->peer_partition) { + sndev->db_shift = 0; + sndev->db_peer_shift = 32; + } else { + sndev->db_shift = 32; + sndev->db_peer_shift = 0; + } + + sndev->db_mask = 0x0FFFFFFFFFFFFFFFULL; + iowrite64(~sndev->db_mask, &sndev->mmio_self_dbmsg->idb_mask); + iowrite64(sndev->db_valid_mask << sndev->db_peer_shift, + &sndev->mmio_self_dbmsg->odb_mask); +} + +static void switchtec_ntb_init_msgs(struct switchtec_ntb *sndev) +{ + int i; + u32 msg_map = 0; + + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { + int m = i | sndev->peer_partition << 2; + + msg_map |= m << i * 8; + } + + iowrite32(msg_map, &sndev->mmio_self_dbmsg->msg_map); + + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) + iowrite64(NTB_DBMSG_IMSG_STATUS | NTB_DBMSG_IMSG_MASK, + &sndev->mmio_self_dbmsg->imsg[i]); +} + +static int switchtec_ntb_init_req_id_table(struct switchtec_ntb *sndev) +{ + int rc = 0; + u16 req_id; + u32 error; + + req_id = ioread16(&sndev->mmio_ntb->requester_id); + + if (ioread32(&sndev->mmio_self_ctrl->req_id_table_size) < 2) { + dev_err(&sndev->stdev->dev, + "Not enough requester IDs available."); + return -EFAULT; + } + + rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl, + NTB_CTRL_PART_OP_LOCK, + NTB_CTRL_PART_STATUS_LOCKED); + if (rc) + return rc; + + iowrite32(NTB_PART_CTRL_ID_PROT_DIS, + &sndev->mmio_self_ctrl->partition_ctrl); + + /* + * Root Complex Requester ID (which is 0:00.0) + */ + iowrite32(0 << 16 | NTB_CTRL_REQ_ID_EN, + &sndev->mmio_self_ctrl->req_id_table[0]); + + /* + * Host Bridge Requester ID (as read from the mmap address) + */ + iowrite32(req_id << 16 | NTB_CTRL_REQ_ID_EN, + &sndev->mmio_self_ctrl->req_id_table[1]); + + rc = switchtec_ntb_part_op(sndev, sndev->mmio_self_ctrl, + NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + if (rc == -EIO) { + error = ioread32(&sndev->mmio_self_ctrl->req_id_error); + dev_err(&sndev->stdev->dev, + "Error setting up the requester ID table: %08x", + error); + } + + return rc; +} + +static void switchtec_ntb_init_shared(struct switchtec_ntb *sndev) +{ + int i; + + memset(sndev->self_shared, 0, LUT_SIZE); + sndev->self_shared->magic = SWITCHTEC_NTB_MAGIC; + sndev->self_shared->partition_id = sndev->stdev->partition; + + for (i = 0; i < sndev->nr_direct_mw; i++) { + int bar = sndev->direct_mw_to_bar[i]; + resource_size_t sz = pci_resource_len(sndev->stdev->pdev, bar); + + if (i == 0) + sz = min_t(resource_size_t, sz, + LUT_SIZE * sndev->nr_lut_mw); + + sndev->self_shared->mw_sizes[i] = sz; + } + + for (i = 0; i < sndev->nr_lut_mw; i++) { + int idx = sndev->nr_direct_mw + i; + + sndev->self_shared->mw_sizes[idx] = LUT_SIZE; + } +} + +static int switchtec_ntb_init_shared_mw(struct switchtec_ntb *sndev) +{ + struct ntb_ctrl_regs __iomem *ctl = sndev->mmio_peer_ctrl; + int bar = sndev->direct_mw_to_bar[0]; + u32 ctl_val; + int rc; + + sndev->self_shared = dma_zalloc_coherent(&sndev->stdev->pdev->dev, + LUT_SIZE, + &sndev->self_shared_dma, + GFP_KERNEL); + if (!sndev->self_shared) { + dev_err(&sndev->stdev->dev, + "unable to allocate memory for shared mw"); + return -ENOMEM; + } + + switchtec_ntb_init_shared(sndev); + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_LOCK, + NTB_CTRL_PART_STATUS_LOCKED); + if (rc) + goto unalloc_and_exit; + + ctl_val = ioread32(&ctl->bar_entry[bar].ctl); + ctl_val &= 0xFF; + ctl_val |= NTB_CTRL_BAR_LUT_WIN_EN; + ctl_val |= ilog2(LUT_SIZE) << 8; + ctl_val |= (sndev->nr_lut_mw - 1) << 14; + iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); + + iowrite64((NTB_CTRL_LUT_EN | (sndev->self_partition << 1) | + sndev->self_shared_dma), + &ctl->lut_entry[0]); + + rc = switchtec_ntb_part_op(sndev, ctl, NTB_CTRL_PART_OP_CFG, + NTB_CTRL_PART_STATUS_NORMAL); + if (rc) { + u32 bar_error, lut_error; + + bar_error = ioread32(&ctl->bar_error); + lut_error = ioread32(&ctl->lut_error); + dev_err(&sndev->stdev->dev, + "Error setting up shared MW: %08x / %08x", + bar_error, lut_error); + goto unalloc_and_exit; + } + + sndev->peer_shared = pci_iomap(sndev->stdev->pdev, bar, LUT_SIZE); + if (!sndev->peer_shared) { + rc = -ENOMEM; + goto unalloc_and_exit; + } + + dev_dbg(&sndev->stdev->dev, "Shared MW Ready"); + return 0; + +unalloc_and_exit: + dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, + sndev->self_shared, sndev->self_shared_dma); + + return rc; +} + +static void switchtec_ntb_deinit_shared_mw(struct switchtec_ntb *sndev) +{ + if (sndev->peer_shared) + pci_iounmap(sndev->stdev->pdev, sndev->peer_shared); + + if (sndev->self_shared) + dma_free_coherent(&sndev->stdev->pdev->dev, LUT_SIZE, + sndev->self_shared, + sndev->self_shared_dma); +} + +static irqreturn_t switchtec_ntb_doorbell_isr(int irq, void *dev) +{ + struct switchtec_ntb *sndev = dev; + + dev_dbg(&sndev->stdev->dev, "doorbell\n"); + + ntb_db_event(&sndev->ntb, 0); + + return IRQ_HANDLED; +} + +static irqreturn_t switchtec_ntb_message_isr(int irq, void *dev) +{ + int i; + struct switchtec_ntb *sndev = dev; + + for (i = 0; i < ARRAY_SIZE(sndev->mmio_self_dbmsg->imsg); i++) { + u64 msg = ioread64(&sndev->mmio_self_dbmsg->imsg[i]); + + if (msg & NTB_DBMSG_IMSG_STATUS) { + dev_dbg(&sndev->stdev->dev, "message: %d %08x\n", i, + (u32)msg); + iowrite8(1, &sndev->mmio_self_dbmsg->imsg[i].status); + + if (i == LINK_MESSAGE) + switchtec_ntb_check_link(sndev); + } + } + + return IRQ_HANDLED; +} + +static int switchtec_ntb_init_db_msg_irq(struct switchtec_ntb *sndev) +{ + int i; + int rc; + int doorbell_irq = 0; + int message_irq = 0; + int event_irq; + int idb_vecs = sizeof(sndev->mmio_self_dbmsg->idb_vec_map); + + event_irq = ioread32(&sndev->stdev->mmio_part_cfg->vep_vector_number); + + while (doorbell_irq == event_irq) + doorbell_irq++; + while (message_irq == doorbell_irq || + message_irq == event_irq) + message_irq++; + + dev_dbg(&sndev->stdev->dev, "irqs - event: %d, db: %d, msgs: %d", + event_irq, doorbell_irq, message_irq); + + for (i = 0; i < idb_vecs - 4; i++) + iowrite8(doorbell_irq, + &sndev->mmio_self_dbmsg->idb_vec_map[i]); + + for (; i < idb_vecs; i++) + iowrite8(message_irq, + &sndev->mmio_self_dbmsg->idb_vec_map[i]); + + sndev->doorbell_irq = pci_irq_vector(sndev->stdev->pdev, doorbell_irq); + sndev->message_irq = pci_irq_vector(sndev->stdev->pdev, message_irq); + + rc = request_irq(sndev->doorbell_irq, + switchtec_ntb_doorbell_isr, 0, + "switchtec_ntb_doorbell", sndev); + if (rc) + return rc; + + rc = request_irq(sndev->message_irq, + switchtec_ntb_message_isr, 0, + "switchtec_ntb_message", sndev); + if (rc) { + free_irq(sndev->doorbell_irq, sndev); + return rc; + } + + return 0; +} + +static void switchtec_ntb_deinit_db_msg_irq(struct switchtec_ntb *sndev) +{ + free_irq(sndev->doorbell_irq, sndev); + free_irq(sndev->message_irq, sndev); +} + +static int switchtec_ntb_add(struct device *dev, + struct class_interface *class_intf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + struct switchtec_ntb *sndev; + int rc; + + stdev->sndev = NULL; + + if (stdev->pdev->class != MICROSEMI_NTB_CLASSCODE) + return -ENODEV; + + if (stdev->partition_count != 2) + dev_warn(dev, "ntb driver only supports 2 partitions"); + + sndev = kzalloc_node(sizeof(*sndev), GFP_KERNEL, dev_to_node(dev)); + if (!sndev) + return -ENOMEM; + + sndev->stdev = stdev; + switchtec_ntb_init_sndev(sndev); + switchtec_ntb_init_mw(sndev); + switchtec_ntb_init_db(sndev); + switchtec_ntb_init_msgs(sndev); + + rc = switchtec_ntb_init_req_id_table(sndev); + if (rc) + goto free_and_exit; + + rc = switchtec_ntb_init_shared_mw(sndev); + if (rc) + goto free_and_exit; + + rc = switchtec_ntb_init_db_msg_irq(sndev); + if (rc) + goto deinit_shared_and_exit; + + rc = ntb_register_device(&sndev->ntb); + if (rc) + goto deinit_and_exit; + + stdev->sndev = sndev; + stdev->link_notifier = switchtec_ntb_link_notification; + dev_info(dev, "NTB device registered"); + + return 0; + +deinit_and_exit: + switchtec_ntb_deinit_db_msg_irq(sndev); +deinit_shared_and_exit: + switchtec_ntb_deinit_shared_mw(sndev); +free_and_exit: + kfree(sndev); + dev_err(dev, "failed to register ntb device: %d", rc); + return rc; +} + +void switchtec_ntb_remove(struct device *dev, + struct class_interface *class_intf) +{ + struct switchtec_dev *stdev = to_stdev(dev); + struct switchtec_ntb *sndev = stdev->sndev; + + if (!sndev) + return; + + stdev->link_notifier = NULL; + stdev->sndev = NULL; + ntb_unregister_device(&sndev->ntb); + switchtec_ntb_deinit_db_msg_irq(sndev); + switchtec_ntb_deinit_shared_mw(sndev); + kfree(sndev); + dev_info(dev, "ntb device unregistered"); +} + +static struct class_interface switchtec_interface = { + .add_dev = switchtec_ntb_add, + .remove_dev = switchtec_ntb_remove, +}; + +static int __init switchtec_ntb_init(void) +{ + switchtec_interface.class = switchtec_class; + return class_interface_register(&switchtec_interface); +} +module_init(switchtec_ntb_init); + +static void __exit switchtec_ntb_exit(void) +{ + class_interface_unregister(&switchtec_interface); +} +module_exit(switchtec_ntb_exit); diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index f58d8e305323..045e3dd4750e 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -191,8 +191,6 @@ struct ntb_transport_qp { struct ntb_transport_mw { phys_addr_t phys_addr; resource_size_t phys_size; - resource_size_t xlat_align; - resource_size_t xlat_align_size; void __iomem *vbase; size_t xlat_size; size_t buff_size; @@ -687,13 +685,20 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; struct pci_dev *pdev = nt->ndev->pdev; size_t xlat_size, buff_size; + resource_size_t xlat_align; + resource_size_t xlat_align_size; int rc; if (!size) return -EINVAL; - xlat_size = round_up(size, mw->xlat_align_size); - buff_size = round_up(size, mw->xlat_align); + rc = ntb_mw_get_align(nt->ndev, PIDX, num_mw, &xlat_align, + &xlat_align_size, NULL); + if (rc) + return rc; + + xlat_size = round_up(size, xlat_align_size); + buff_size = round_up(size, xlat_align); /* No need to re-setup */ if (mw->xlat_size == xlat_size) @@ -722,7 +727,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, * is a requirement of the hardware. It is recommended to setup CMA * for BAR sizes equal or greater than 4MB. */ - if (!IS_ALIGNED(mw->dma_addr, mw->xlat_align)) { + if (!IS_ALIGNED(mw->dma_addr, xlat_align)) { dev_err(&pdev->dev, "DMA memory %pad is not aligned\n", &mw->dma_addr); ntb_free_mw(nt, num_mw); @@ -1104,11 +1109,6 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) for (i = 0; i < mw_count; i++) { mw = &nt->mw_vec[i]; - rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align, - &mw->xlat_align_size, NULL); - if (rc) - goto err1; - rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, &mw->phys_size); if (rc) diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 759f772fa00c..427112cf101a 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -108,8 +108,6 @@ MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)") struct perf_mw { phys_addr_t phys_addr; resource_size_t phys_size; - resource_size_t xlat_align; - resource_size_t xlat_align_size; void __iomem *vbase; size_t xlat_size; size_t buf_size; @@ -472,13 +470,20 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) { struct perf_mw *mw = &perf->mw; size_t xlat_size, buf_size; + resource_size_t xlat_align; + resource_size_t xlat_align_size; int rc; if (!size) return -EINVAL; - xlat_size = round_up(size, mw->xlat_align_size); - buf_size = round_up(size, mw->xlat_align); + rc = ntb_mw_get_align(perf->ntb, PIDX, 0, &xlat_align, + &xlat_align_size, NULL); + if (rc) + return rc; + + xlat_size = round_up(size, xlat_align_size); + buf_size = round_up(size, xlat_align); if (mw->xlat_size == xlat_size) return 0; @@ -567,11 +572,6 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf) mw = &perf->mw; - rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align, - &mw->xlat_align_size, NULL); - if (rc) - return rc; - rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size); if (rc) return rc; diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index a69815c45ce6..91526a986caa 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -753,9 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, phys_addr_t base; resource_size_t mw_size; - resource_size_t align_addr; - resource_size_t align_size; - resource_size_t max_size; + resource_size_t align_addr = 0; + resource_size_t align_size = 0; + resource_size_t max_size = 0; buf_size = min_t(size_t, size, 512); diff --git a/drivers/of/base.c b/drivers/of/base.c index f2e649ff746f..26618ba8f92a 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -761,10 +761,10 @@ EXPORT_SYMBOL(of_find_node_opts_by_path); /** * of_find_node_by_name - Find a node by its "name" property - * @from: The node to start searching from or NULL, the node + * @from: The node to start searching from or NULL; the node * you pass will not be searched, only the next one - * will; typically, you pass what the previous call - * returned. of_node_put() will be called on it + * will. Typically, you pass what the previous call + * returned. of_node_put() will be called on @from. * @name: The name string to match against * * Returns a node pointer with refcount incremented, use diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index e9ec931f5b9a..a7b1cb6c2f65 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c @@ -374,7 +374,7 @@ int of_pci_map_rid(struct device_node *np, u32 rid, pr_debug("%pOF: %s, using mask %08x, rid-base: %08x, out-base: %08x, length: %08x, rid: %08x -> %08x\n", np, map_name, map_mask, rid_base, out_base, - rid_len, rid, *id_out); + rid_len, rid, masked_rid - rid_base + out_base); return 0; } diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile index 3031fc2f18f6..32389acfa616 100644 --- a/drivers/of/unittest-data/Makefile +++ b/drivers/of/unittest-data/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +DTC_FLAGS_testcases := -Wno-interrupts_property obj-y += testcases.dtb.o targets += testcases.dtb testcases.dtb.S diff --git a/drivers/of/unittest-data/testcases.dts b/drivers/of/unittest-data/testcases.dts index ce49463d9d32..55fe0ee20109 100644 --- a/drivers/of/unittest-data/testcases.dts +++ b/drivers/of/unittest-data/testcases.dts @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /dts-v1/; +/plugin/; + / { testcase-data { changeset { @@ -15,66 +17,3 @@ #include "tests-match.dtsi" #include "tests-platform.dtsi" #include "tests-overlay.dtsi" - -/* - * phandle fixup data - generated by dtc patches that aren't upstream. - * This data must be regenerated whenever phandle references are modified in - * the testdata tree. - * - * The format of this data may be subject to change. For the time being consider - * this a kernel-internal data format. - */ -/ { __local_fixups__ { - testcase-data { - phandle-tests { - consumer-a { - phandle-list = <0x00000000 0x00000008 - 0x00000018 0x00000028 - 0x00000034 0x00000038>; - phandle-list-bad-args = <0x00000000 0x0000000c>; - }; - }; - interrupts { - intmap0 { - interrupt-map = <0x00000004 0x00000010 - 0x00000024 0x00000034>; - }; - intmap1 { - interrupt-map = <0x0000000c>; - }; - interrupts0 { - interrupt-parent = <0x00000000>; - }; - interrupts1 { - interrupt-parent = <0x00000000>; - }; - interrupts-extended0 { - interrupts-extended = <0x00000000 0x00000008 - 0x00000018 0x00000024 - 0x0000002c 0x00000034 - 0x0000003c>; - }; - }; - testcase-device1 { - interrupt-parent = <0x00000000>; - }; - testcase-device2 { - interrupt-parent = <0x00000000>; - }; - overlay2 { - fragment@0 { - target = <0x00000000>; - }; - }; - overlay3 { - fragment@0 { - target = <0x00000000>; - }; - }; - overlay4 { - fragment@0 { - target = <0x00000000>; - }; - }; - }; -}; }; diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index da45dbea20ce..730cc897b94d 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -13,6 +13,7 @@ * */ +#include <linux/switchtec.h> #include <linux/switchtec_ioctl.h> #include <linux/interrupt.h> @@ -20,8 +21,6 @@ #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/poll.h> -#include <linux/pci.h> -#include <linux/cdev.h> #include <linux/wait.h> MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); @@ -34,265 +33,10 @@ module_param(max_devices, int, 0644); MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); static dev_t switchtec_devt; -static struct class *switchtec_class; static DEFINE_IDA(switchtec_minor_ida); -#define MICROSEMI_VENDOR_ID 0x11f8 -#define MICROSEMI_NTB_CLASSCODE 0x068000 -#define MICROSEMI_MGMT_CLASSCODE 0x058000 - -#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 -#define SWITCHTEC_MAX_PFF_CSR 48 - -#define SWITCHTEC_EVENT_OCCURRED BIT(0) -#define SWITCHTEC_EVENT_CLEAR BIT(0) -#define SWITCHTEC_EVENT_EN_LOG BIT(1) -#define SWITCHTEC_EVENT_EN_CLI BIT(2) -#define SWITCHTEC_EVENT_EN_IRQ BIT(3) -#define SWITCHTEC_EVENT_FATAL BIT(4) - -enum { - SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, - SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, - SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800, - SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000, - SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200, - SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000, - SWITCHTEC_GAS_NTB_OFFSET = 0x10000, - SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000, -}; - -struct mrpc_regs { - u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; - u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; - u32 cmd; - u32 status; - u32 ret_value; -} __packed; - -enum mrpc_status { - SWITCHTEC_MRPC_STATUS_INPROGRESS = 1, - SWITCHTEC_MRPC_STATUS_DONE = 2, - SWITCHTEC_MRPC_STATUS_ERROR = 0xFF, - SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100, -}; - -struct sw_event_regs { - u64 event_report_ctrl; - u64 reserved1; - u64 part_event_bitmap; - u64 reserved2; - u32 global_summary; - u32 reserved3[3]; - u32 stack_error_event_hdr; - u32 stack_error_event_data; - u32 reserved4[4]; - u32 ppu_error_event_hdr; - u32 ppu_error_event_data; - u32 reserved5[4]; - u32 isp_error_event_hdr; - u32 isp_error_event_data; - u32 reserved6[4]; - u32 sys_reset_event_hdr; - u32 reserved7[5]; - u32 fw_exception_hdr; - u32 reserved8[5]; - u32 fw_nmi_hdr; - u32 reserved9[5]; - u32 fw_non_fatal_hdr; - u32 reserved10[5]; - u32 fw_fatal_hdr; - u32 reserved11[5]; - u32 twi_mrpc_comp_hdr; - u32 twi_mrpc_comp_data; - u32 reserved12[4]; - u32 twi_mrpc_comp_async_hdr; - u32 twi_mrpc_comp_async_data; - u32 reserved13[4]; - u32 cli_mrpc_comp_hdr; - u32 cli_mrpc_comp_data; - u32 reserved14[4]; - u32 cli_mrpc_comp_async_hdr; - u32 cli_mrpc_comp_async_data; - u32 reserved15[4]; - u32 gpio_interrupt_hdr; - u32 gpio_interrupt_data; - u32 reserved16[4]; -} __packed; - -enum { - SWITCHTEC_CFG0_RUNNING = 0x04, - SWITCHTEC_CFG1_RUNNING = 0x05, - SWITCHTEC_IMG0_RUNNING = 0x03, - SWITCHTEC_IMG1_RUNNING = 0x07, -}; - -struct sys_info_regs { - u32 device_id; - u32 device_version; - u32 firmware_version; - u32 reserved1; - u32 vendor_table_revision; - u32 table_format_version; - u32 partition_id; - u32 cfg_file_fmt_version; - u16 cfg_running; - u16 img_running; - u32 reserved2[57]; - char vendor_id[8]; - char product_id[16]; - char product_revision[4]; - char component_vendor[8]; - u16 component_id; - u8 component_revision; -} __packed; - -struct flash_info_regs { - u32 flash_part_map_upd_idx; - - struct active_partition_info { - u32 address; - u32 build_version; - u32 build_string; - } active_img; - - struct active_partition_info active_cfg; - struct active_partition_info inactive_img; - struct active_partition_info inactive_cfg; - - u32 flash_length; - - struct partition_info { - u32 address; - u32 length; - } cfg0; - - struct partition_info cfg1; - struct partition_info img0; - struct partition_info img1; - struct partition_info nvlog; - struct partition_info vendor[8]; -}; - -struct ntb_info_regs { - u8 partition_count; - u8 partition_id; - u16 reserved1; - u64 ep_map; - u16 requester_id; -} __packed; - -struct part_cfg_regs { - u32 status; - u32 state; - u32 port_cnt; - u32 usp_port_mode; - u32 usp_pff_inst_id; - u32 vep_pff_inst_id; - u32 dsp_pff_inst_id[47]; - u32 reserved1[11]; - u16 vep_vector_number; - u16 usp_vector_number; - u32 port_event_bitmap; - u32 reserved2[3]; - u32 part_event_summary; - u32 reserved3[3]; - u32 part_reset_hdr; - u32 part_reset_data[5]; - u32 mrpc_comp_hdr; - u32 mrpc_comp_data[5]; - u32 mrpc_comp_async_hdr; - u32 mrpc_comp_async_data[5]; - u32 dyn_binding_hdr; - u32 dyn_binding_data[5]; - u32 reserved4[159]; -} __packed; - -enum { - SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0, - SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1, - SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2, - SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3, -}; - -struct pff_csr_regs { - u16 vendor_id; - u16 device_id; - u32 pci_cfg_header[15]; - u32 pci_cap_region[48]; - u32 pcie_cap_region[448]; - u32 indirect_gas_window[128]; - u32 indirect_gas_window_off; - u32 reserved[127]; - u32 pff_event_summary; - u32 reserved2[3]; - u32 aer_in_p2p_hdr; - u32 aer_in_p2p_data[5]; - u32 aer_in_vep_hdr; - u32 aer_in_vep_data[5]; - u32 dpc_hdr; - u32 dpc_data[5]; - u32 cts_hdr; - u32 cts_data[5]; - u32 reserved3[6]; - u32 hotplug_hdr; - u32 hotplug_data[5]; - u32 ier_hdr; - u32 ier_data[5]; - u32 threshold_hdr; - u32 threshold_data[5]; - u32 power_mgmt_hdr; - u32 power_mgmt_data[5]; - u32 tlp_throttling_hdr; - u32 tlp_throttling_data[5]; - u32 force_speed_hdr; - u32 force_speed_data[5]; - u32 credit_timeout_hdr; - u32 credit_timeout_data[5]; - u32 link_state_hdr; - u32 link_state_data[5]; - u32 reserved4[174]; -} __packed; - -struct switchtec_dev { - struct pci_dev *pdev; - struct device dev; - struct cdev cdev; - - int partition; - int partition_count; - int pff_csr_count; - char pff_local[SWITCHTEC_MAX_PFF_CSR]; - - void __iomem *mmio; - struct mrpc_regs __iomem *mmio_mrpc; - struct sw_event_regs __iomem *mmio_sw_event; - struct sys_info_regs __iomem *mmio_sys_info; - struct flash_info_regs __iomem *mmio_flash_info; - struct ntb_info_regs __iomem *mmio_ntb; - struct part_cfg_regs __iomem *mmio_part_cfg; - struct part_cfg_regs __iomem *mmio_part_cfg_all; - struct pff_csr_regs __iomem *mmio_pff_csr; - - /* - * The mrpc mutex must be held when accessing the other - * mrpc_ fields, alive flag and stuser->state field - */ - struct mutex mrpc_mutex; - struct list_head mrpc_queue; - int mrpc_busy; - struct work_struct mrpc_work; - struct delayed_work mrpc_timeout; - bool alive; - - wait_queue_head_t event_wq; - atomic_t event_cnt; -}; - -static struct switchtec_dev *to_stdev(struct device *dev) -{ - return container_of(dev, struct switchtec_dev, dev); -} +struct class *switchtec_class; +EXPORT_SYMBOL_GPL(switchtec_class); enum mrpc_state { MRPC_IDLE = 0, @@ -1234,6 +978,49 @@ static const struct file_operations switchtec_fops = { .compat_ioctl = switchtec_dev_ioctl, }; +static void link_event_work(struct work_struct *work) +{ + struct switchtec_dev *stdev; + + stdev = container_of(work, struct switchtec_dev, link_event_work); + + if (stdev->link_notifier) + stdev->link_notifier(stdev); +} + +static void check_link_state_events(struct switchtec_dev *stdev) +{ + int idx; + u32 reg; + int count; + int occurred = 0; + + for (idx = 0; idx < stdev->pff_csr_count; idx++) { + reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr); + dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg); + count = (reg >> 5) & 0xFF; + + if (count != stdev->link_event_count[idx]) { + occurred = 1; + stdev->link_event_count[idx] = count; + } + } + + if (occurred) + schedule_work(&stdev->link_event_work); +} + +static void enable_link_state_events(struct switchtec_dev *stdev) +{ + int idx; + + for (idx = 0; idx < stdev->pff_csr_count; idx++) { + iowrite32(SWITCHTEC_EVENT_CLEAR | + SWITCHTEC_EVENT_EN_IRQ, + &stdev->mmio_pff_csr[idx].link_state_hdr); + } +} + static void stdev_release(struct device *dev) { struct switchtec_dev *stdev = to_stdev(dev); @@ -1286,6 +1073,7 @@ static struct switchtec_dev *stdev_create(struct pci_dev *pdev) stdev->mrpc_busy = 0; INIT_WORK(&stdev->mrpc_work, mrpc_event_work); INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); + INIT_WORK(&stdev->link_event_work, link_event_work); init_waitqueue_head(&stdev->event_wq); atomic_set(&stdev->event_cnt, 0); @@ -1329,6 +1117,9 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) return 0; + if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE) + return 0; + dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); iowrite32(hdr, hdr_reg); @@ -1348,6 +1139,7 @@ static int mask_all_events(struct switchtec_dev *stdev, int eid) for (idx = 0; idx < stdev->pff_csr_count; idx++) { if (!stdev->pff_local[idx]) continue; + count += mask_event(stdev, eid, idx); } } else { @@ -1372,6 +1164,8 @@ static irqreturn_t switchtec_event_isr(int irq, void *dev) iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); } + check_link_state_events(stdev); + for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) event_count += mask_all_events(stdev, eid); @@ -1481,6 +1275,9 @@ static int switchtec_pci_probe(struct pci_dev *pdev, struct switchtec_dev *stdev; int rc; + if (pdev->class == MICROSEMI_NTB_CLASSCODE) + request_module_nowait("ntb_hw_switchtec"); + stdev = stdev_create(pdev); if (IS_ERR(stdev)) return PTR_ERR(stdev); @@ -1498,6 +1295,7 @@ static int switchtec_pci_probe(struct pci_dev *pdev, iowrite32(SWITCHTEC_EVENT_CLEAR | SWITCHTEC_EVENT_EN_IRQ, &stdev->mmio_part_cfg->mrpc_comp_hdr); + enable_link_state_events(stdev); rc = cdev_device_add(&stdev->cdev, &stdev->dev); if (rc) diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 09dac11337d1..2c745e8ccad6 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -93,12 +93,31 @@ config ASUS_LAPTOP config DELL_SMBIOS tristate - select DCDBAS + +config DELL_SMBIOS_WMI + tristate "Dell SMBIOS calling interface (WMI implementation)" + depends on ACPI_WMI + select DELL_WMI_DESCRIPTOR + select DELL_SMBIOS + ---help--- + This provides an implementation for the Dell SMBIOS calling interface + communicated over ACPI-WMI. + + If you have a Dell computer from >2007 you should say Y or M here. + If you aren't sure and this module doesn't work for your computer + it just won't load. + +config DELL_SMBIOS_SMM + tristate "Dell SMBIOS calling interface (SMM implementation)" + depends on DCDBAS + select DELL_SMBIOS ---help--- - This module provides common functions for kernel modules using - Dell SMBIOS. + This provides an implementation for the Dell SMBIOS calling interface + communicated over SMI/SMM. - If you have a Dell laptop, say Y or M here. + If you have a Dell computer from <=2017 you should say Y or M here. + If you aren't sure and this module doesn't work for your computer + it just won't load. config DELL_LAPTOP tristate "Dell Laptop Extras" @@ -116,11 +135,12 @@ config DELL_LAPTOP laptops (except for some models covered by the Compal driver). config DELL_WMI - tristate "Dell WMI extras" + tristate "Dell WMI notifications" depends on ACPI_WMI depends on DMI depends on INPUT depends on ACPI_VIDEO || ACPI_VIDEO = n + select DELL_WMI_DESCRIPTOR select DELL_SMBIOS select INPUT_SPARSEKMAP ---help--- @@ -129,6 +149,10 @@ config DELL_WMI To compile this driver as a module, choose M here: the module will be called dell-wmi. +config DELL_WMI_DESCRIPTOR + tristate + depends on ACPI_WMI + config DELL_WMI_AIO tristate "WMI Hotkeys for Dell All-In-One series" depends on ACPI_WMI @@ -426,7 +450,6 @@ config THINKPAD_ACPI_ALSA_SUPPORT config THINKPAD_ACPI_DEBUGFACILITIES bool "Maintainer debug facilities" depends on THINKPAD_ACPI - default n ---help--- Enables extra stuff in the thinkpad-acpi which is completely useless for normal use. Read the driver source to find out what it does. @@ -437,7 +460,6 @@ config THINKPAD_ACPI_DEBUGFACILITIES config THINKPAD_ACPI_DEBUG bool "Verbose debug mode" depends on THINKPAD_ACPI - default n ---help--- Enables extra debugging information, at the expense of a slightly increase in driver size. @@ -447,7 +469,6 @@ config THINKPAD_ACPI_DEBUG config THINKPAD_ACPI_UNSAFE_LEDS bool "Allow control of important LEDs (unsafe)" depends on THINKPAD_ACPI - default n ---help--- Overriding LED state on ThinkPads can mask important firmware alerts (like critical battery condition), or misled @@ -515,7 +536,6 @@ config SENSORS_HDAPS tristate "Thinkpad Hard Drive Active Protection System (hdaps)" depends on INPUT select INPUT_POLLDEV - default n help This driver provides support for the IBM Hard Drive Active Protection System (hdaps), which provides an accelerometer and other misc. data. @@ -658,6 +678,18 @@ config WMI_BMOF To compile this driver as a module, choose M here: the module will be called wmi-bmof. +config INTEL_WMI_THUNDERBOLT + tristate "Intel WMI thunderbolt force power driver" + depends on ACPI_WMI + ---help--- + Say Y here if you want to be able to use the WMI interface on select + systems to force the power control of Intel Thunderbolt controllers. + This is useful for updating the firmware when devices are not plugged + into the controller. + + To compile this driver as a module, choose M here: the module will + be called intel-wmi-thunderbolt. + config MSI_WMI tristate "MSI WMI extras" depends on ACPI_WMI @@ -763,7 +795,6 @@ config TOSHIBA_HAPS config TOSHIBA_WMI tristate "Toshiba WMI Hotkeys Driver (EXPERIMENTAL)" - default n depends on ACPI_WMI depends on INPUT select INPUT_SPARSEKMAP @@ -785,7 +816,6 @@ config ACPI_CMPC depends on RFKILL || RFKILL=n select INPUT select BACKLIGHT_CLASS_DEVICE - default n help Support for Intel Classmate PC ACPI devices, including some keys as input device, backlight device, tablet and accelerometer @@ -793,7 +823,7 @@ config ACPI_CMPC config INTEL_CHT_INT33FE tristate "Intel Cherry Trail ACPI INT33FE Driver" - depends on X86 && ACPI && I2C + depends on X86 && ACPI && I2C && REGULATOR ---help--- This driver add support for the INT33FE ACPI device found on some Intel Cherry Trail devices. @@ -804,6 +834,10 @@ config INTEL_CHT_INT33FE This driver instantiates i2c-clients for these, so that standard i2c drivers for these chips can bind to the them. + If you enable this driver it is advised to also select + CONFIG_TYPEC_FUSB302=m, CONFIG_CHARGER_BQ24190=m and + CONFIG_BATTERY_MAX17042=m. + config INTEL_INT0002_VGPIO tristate "Intel ACPI INT0002 Virtual GPIO driver" depends on GPIOLIB && ACPI @@ -892,7 +926,6 @@ config INTEL_IPS config INTEL_IMR bool "Intel Isolated Memory Region support" - default n depends on X86_INTEL_QUARK && IOSF_MBI ---help--- This option provides a means to manipulate Isolated Memory Regions. @@ -1088,7 +1121,6 @@ config INTEL_PUNIT_IPC config INTEL_TELEMETRY tristate "Intel SoC Telemetry Driver" - default n depends on INTEL_PMC_IPC && INTEL_PUNIT_IPC && X86_64 ---help--- This driver provides interfaces to configure and use @@ -1111,7 +1143,6 @@ config MLX_PLATFORM config MLX_CPLD_PLATFORM tristate "Mellanox platform hotplug driver support" - default n select HWMON select I2C ---help--- diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index f9e3ae683bbe..c32b34a72467 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -13,8 +13,11 @@ obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o obj-$(CONFIG_DELL_SMBIOS) += dell-smbios.o +obj-$(CONFIG_DELL_SMBIOS_WMI) += dell-smbios-wmi.o +obj-$(CONFIG_DELL_SMBIOS_SMM) += dell-smbios-smm.o obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o obj-$(CONFIG_DELL_WMI) += dell-wmi.o +obj-$(CONFIG_DELL_WMI_DESCRIPTOR) += dell-wmi-descriptor.o obj-$(CONFIG_DELL_WMI_AIO) += dell-wmi-aio.o obj-$(CONFIG_DELL_WMI_LED) += dell-wmi-led.o obj-$(CONFIG_DELL_SMO8800) += dell-smo8800.o @@ -40,6 +43,7 @@ obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o obj-$(CONFIG_SURFACE3_WMI) += surface3-wmi.o obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o +obj-$(CONFIG_INTEL_WMI_THUNDERBOLT) += intel-wmi-thunderbolt.o # toshiba_acpi must link after wmi to ensure that wmi devices are found # before toshiba_acpi initializes diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 48e1541dc8d4..a32c5c00e0e7 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -119,6 +119,7 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012 #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ +#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 /* Misc */ #define ASUS_WMI_DEVID_CAMERA 0x00060013 @@ -148,6 +149,7 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_DSTS_BIOS_BIT 0x00040000 #define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF #define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00 +#define ASUS_WMI_DSTS_LIGHTBAR_MASK 0x0000000F #define ASUS_FAN_DESC "cpu_fan" #define ASUS_FAN_MFUN 0x13 @@ -222,10 +224,13 @@ struct asus_wmi { int tpd_led_wk; struct led_classdev kbd_led; int kbd_led_wk; + struct led_classdev lightbar_led; + int lightbar_led_wk; struct workqueue_struct *led_workqueue; struct work_struct tpd_led_work; struct work_struct kbd_led_work; struct work_struct wlan_led_work; + struct work_struct lightbar_led_work; struct asus_rfkill wlan; struct asus_rfkill bluetooth; @@ -567,6 +572,48 @@ static enum led_brightness wlan_led_get(struct led_classdev *led_cdev) return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK; } +static void lightbar_led_update(struct work_struct *work) +{ + struct asus_wmi *asus; + int ctrl_param; + + asus = container_of(work, struct asus_wmi, lightbar_led_work); + + ctrl_param = asus->lightbar_led_wk; + asus_wmi_set_devstate(ASUS_WMI_DEVID_LIGHTBAR, ctrl_param, NULL); +} + +static void lightbar_led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct asus_wmi *asus; + + asus = container_of(led_cdev, struct asus_wmi, lightbar_led); + + asus->lightbar_led_wk = !!value; + queue_work(asus->led_workqueue, &asus->lightbar_led_work); +} + +static enum led_brightness lightbar_led_get(struct led_classdev *led_cdev) +{ + struct asus_wmi *asus; + u32 result; + + asus = container_of(led_cdev, struct asus_wmi, lightbar_led); + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result); + + return result & ASUS_WMI_DSTS_LIGHTBAR_MASK; +} + +static int lightbar_led_presence(struct asus_wmi *asus) +{ + u32 result; + + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_LIGHTBAR, &result); + + return result & ASUS_WMI_DSTS_PRESENCE_BIT; +} + static void asus_wmi_led_exit(struct asus_wmi *asus) { if (!IS_ERR_OR_NULL(asus->kbd_led.dev)) @@ -575,6 +622,8 @@ static void asus_wmi_led_exit(struct asus_wmi *asus) led_classdev_unregister(&asus->tpd_led); if (!IS_ERR_OR_NULL(asus->wlan_led.dev)) led_classdev_unregister(&asus->wlan_led); + if (!IS_ERR_OR_NULL(asus->lightbar_led.dev)) + led_classdev_unregister(&asus->lightbar_led); if (asus->led_workqueue) destroy_workqueue(asus->led_workqueue); } @@ -630,6 +679,20 @@ static int asus_wmi_led_init(struct asus_wmi *asus) rv = led_classdev_register(&asus->platform_device->dev, &asus->wlan_led); + if (rv) + goto error; + } + + if (lightbar_led_presence(asus)) { + INIT_WORK(&asus->lightbar_led_work, lightbar_led_update); + + asus->lightbar_led.name = "asus::lightbar"; + asus->lightbar_led.brightness_set = lightbar_led_set; + asus->lightbar_led.brightness_get = lightbar_led_get; + asus->lightbar_led.max_brightness = 1; + + rv = led_classdev_register(&asus->platform_device->dev, + &asus->lightbar_led); } error: diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index f42159fd2031..bf897b1832b1 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -35,18 +35,6 @@ #include "dell-rbtn.h" #include "dell-smbios.h" -#define BRIGHTNESS_TOKEN 0x7d -#define KBD_LED_OFF_TOKEN 0x01E1 -#define KBD_LED_ON_TOKEN 0x01E2 -#define KBD_LED_AUTO_TOKEN 0x01E3 -#define KBD_LED_AUTO_25_TOKEN 0x02EA -#define KBD_LED_AUTO_50_TOKEN 0x02EB -#define KBD_LED_AUTO_75_TOKEN 0x02EC -#define KBD_LED_AUTO_100_TOKEN 0x02F6 -#define GLOBAL_MIC_MUTE_ENABLE 0x0364 -#define GLOBAL_MIC_MUTE_DISABLE 0x0365 -#define KBD_LED_AC_TOKEN 0x0451 - struct quirk_entry { u8 touchpad_led; @@ -85,6 +73,7 @@ static struct platform_driver platform_driver = { } }; +static struct calling_interface_buffer *buffer; static struct platform_device *platform_device; static struct backlight_device *dell_backlight_device; static struct rfkill *wifi_rfkill; @@ -283,6 +272,27 @@ static const struct dmi_system_id dell_quirks[] __initconst = { { } }; +void dell_set_arguments(u32 arg0, u32 arg1, u32 arg2, u32 arg3) +{ + memset(buffer, 0, sizeof(struct calling_interface_buffer)); + buffer->input[0] = arg0; + buffer->input[1] = arg1; + buffer->input[2] = arg2; + buffer->input[3] = arg3; +} + +int dell_send_request(u16 class, u16 select) +{ + int ret; + + buffer->cmd_class = class; + buffer->cmd_select = select; + ret = dell_smbios_call(buffer); + if (ret != 0) + return ret; + return dell_smbios_error(buffer->output[0]); +} + /* * Derived from information in smbios-wireless-ctl: * @@ -405,7 +415,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = { static int dell_rfkill_set(void *data, bool blocked) { - struct calling_interface_buffer *buffer; int disable = blocked ? 1 : 0; unsigned long radio = (unsigned long)data; int hwswitch_bit = (unsigned long)data - 1; @@ -413,20 +422,16 @@ static int dell_rfkill_set(void *data, bool blocked) int status; int ret; - buffer = dell_smbios_get_buffer(); - - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0, 0, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); + if (ret) + return ret; status = buffer->output[1]; - if (ret != 0) - goto out; - - dell_smbios_clear_buffer(); - - buffer->input[0] = 0x2; - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0x2, 0, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); + if (ret) + return ret; hwswitch = buffer->output[1]; /* If the hardware switch controls this radio, and the hardware @@ -435,28 +440,19 @@ static int dell_rfkill_set(void *data, bool blocked) (status & BIT(0)) && !(status & BIT(16))) disable = 1; - dell_smbios_clear_buffer(); - - buffer->input[0] = (1 | (radio<<8) | (disable << 16)); - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; - - out: - dell_smbios_release_buffer(); - return dell_smbios_error(ret); + dell_set_arguments(1 | (radio<<8) | (disable << 16), 0, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); + return ret; } -/* Must be called with the buffer held */ static void dell_rfkill_update_sw_state(struct rfkill *rfkill, int radio, - int status, - struct calling_interface_buffer *buffer) + int status) { if (status & BIT(0)) { /* Has hw-switch, sync sw_state to BIOS */ int block = rfkill_blocked(rfkill); - dell_smbios_clear_buffer(); - buffer->input[0] = (1 | (radio << 8) | (block << 16)); - dell_smbios_send_request(17, 11); + dell_set_arguments(1 | (radio << 8) | (block << 16), 0, 0, 0); + dell_send_request(CLASS_INFO, SELECT_RFKILL); } else { /* No hw-switch, sync BIOS state to sw_state */ rfkill_set_sw_state(rfkill, !!(status & BIT(radio + 16))); @@ -472,32 +468,23 @@ static void dell_rfkill_update_hw_state(struct rfkill *rfkill, int radio, static void dell_rfkill_query(struct rfkill *rfkill, void *data) { - struct calling_interface_buffer *buffer; int radio = ((unsigned long)data & 0xF); int hwswitch; int status; int ret; - buffer = dell_smbios_get_buffer(); - - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0, 0, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); status = buffer->output[1]; if (ret != 0 || !(status & BIT(0))) { - dell_smbios_release_buffer(); return; } - dell_smbios_clear_buffer(); - - buffer->input[0] = 0x2; - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0, 0x2, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); hwswitch = buffer->output[1]; - dell_smbios_release_buffer(); - if (ret != 0) return; @@ -513,27 +500,23 @@ static struct dentry *dell_laptop_dir; static int dell_debugfs_show(struct seq_file *s, void *data) { - struct calling_interface_buffer *buffer; int hwswitch_state; int hwswitch_ret; int status; int ret; - buffer = dell_smbios_get_buffer(); - - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0, 0, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); + if (ret) + return ret; status = buffer->output[1]; - dell_smbios_clear_buffer(); - - buffer->input[0] = 0x2; - dell_smbios_send_request(17, 11); - hwswitch_ret = buffer->output[0]; + dell_set_arguments(0, 0x2, 0, 0); + hwswitch_ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); + if (hwswitch_ret) + return hwswitch_ret; hwswitch_state = buffer->output[1]; - dell_smbios_release_buffer(); - seq_printf(s, "return:\t%d\n", ret); seq_printf(s, "status:\t0x%X\n", status); seq_printf(s, "Bit 0 : Hardware switch supported: %lu\n", @@ -613,46 +596,36 @@ static const struct file_operations dell_debugfs_fops = { static void dell_update_rfkill(struct work_struct *ignored) { - struct calling_interface_buffer *buffer; int hwswitch = 0; int status; int ret; - buffer = dell_smbios_get_buffer(); - - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0, 0, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); status = buffer->output[1]; if (ret != 0) - goto out; - - dell_smbios_clear_buffer(); + return; - buffer->input[0] = 0x2; - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0, 0x2, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); if (ret == 0 && (status & BIT(0))) hwswitch = buffer->output[1]; if (wifi_rfkill) { dell_rfkill_update_hw_state(wifi_rfkill, 1, status, hwswitch); - dell_rfkill_update_sw_state(wifi_rfkill, 1, status, buffer); + dell_rfkill_update_sw_state(wifi_rfkill, 1, status); } if (bluetooth_rfkill) { dell_rfkill_update_hw_state(bluetooth_rfkill, 2, status, hwswitch); - dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status, - buffer); + dell_rfkill_update_sw_state(bluetooth_rfkill, 2, status); } if (wwan_rfkill) { dell_rfkill_update_hw_state(wwan_rfkill, 3, status, hwswitch); - dell_rfkill_update_sw_state(wwan_rfkill, 3, status, buffer); + dell_rfkill_update_sw_state(wwan_rfkill, 3, status); } - - out: - dell_smbios_release_buffer(); } static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill); @@ -696,7 +669,6 @@ static struct notifier_block dell_laptop_rbtn_notifier = { static int __init dell_setup_rfkill(void) { - struct calling_interface_buffer *buffer; int status, ret, whitelisted; const char *product; @@ -712,11 +684,9 @@ static int __init dell_setup_rfkill(void) if (!force_rfkill && !whitelisted) return 0; - buffer = dell_smbios_get_buffer(); - dell_smbios_send_request(17, 11); - ret = buffer->output[0]; + dell_set_arguments(0, 0, 0, 0); + ret = dell_send_request(CLASS_INFO, SELECT_RFKILL); status = buffer->output[1]; - dell_smbios_release_buffer(); /* dell wireless info smbios call is not supported */ if (ret != 0) @@ -869,7 +839,6 @@ static void dell_cleanup_rfkill(void) static int dell_send_intensity(struct backlight_device *bd) { - struct calling_interface_buffer *buffer; struct calling_interface_token *token; int ret; @@ -877,24 +846,17 @@ static int dell_send_intensity(struct backlight_device *bd) if (!token) return -ENODEV; - buffer = dell_smbios_get_buffer(); - buffer->input[0] = token->location; - buffer->input[1] = bd->props.brightness; - + dell_set_arguments(token->location, bd->props.brightness, 0, 0); if (power_supply_is_system_supplied() > 0) - dell_smbios_send_request(1, 2); + ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_AC); else - dell_smbios_send_request(1, 1); - - ret = dell_smbios_error(buffer->output[0]); + ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT); - dell_smbios_release_buffer(); return ret; } static int dell_get_intensity(struct backlight_device *bd) { - struct calling_interface_buffer *buffer; struct calling_interface_token *token; int ret; @@ -902,20 +864,14 @@ static int dell_get_intensity(struct backlight_device *bd) if (!token) return -ENODEV; - buffer = dell_smbios_get_buffer(); - buffer->input[0] = token->location; - + dell_set_arguments(token->location, 0, 0, 0); if (power_supply_is_system_supplied() > 0) - dell_smbios_send_request(0, 2); + ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC); else - dell_smbios_send_request(0, 1); + ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_BAT); - if (buffer->output[0]) - ret = dell_smbios_error(buffer->output[0]); - else + if (ret == 0) ret = buffer->output[1]; - - dell_smbios_release_buffer(); return ret; } @@ -1179,20 +1135,13 @@ static DEFINE_MUTEX(kbd_led_mutex); static int kbd_get_info(struct kbd_info *info) { - struct calling_interface_buffer *buffer; u8 units; int ret; - buffer = dell_smbios_get_buffer(); - - buffer->input[0] = 0x0; - dell_smbios_send_request(4, 11); - ret = buffer->output[0]; - - if (ret) { - ret = dell_smbios_error(ret); - goto out; - } + dell_set_arguments(0, 0, 0, 0); + ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT); + if (ret) + return ret; info->modes = buffer->output[1] & 0xFFFF; info->type = (buffer->output[1] >> 24) & 0xFF; @@ -1209,8 +1158,6 @@ static int kbd_get_info(struct kbd_info *info) if (units & BIT(3)) info->days = (buffer->output[3] >> 24) & 0xFF; - out: - dell_smbios_release_buffer(); return ret; } @@ -1269,19 +1216,12 @@ static int kbd_set_level(struct kbd_state *state, u8 level) static int kbd_get_state(struct kbd_state *state) { - struct calling_interface_buffer *buffer; int ret; - buffer = dell_smbios_get_buffer(); - - buffer->input[0] = 0x1; - dell_smbios_send_request(4, 11); - ret = buffer->output[0]; - - if (ret) { - ret = dell_smbios_error(ret); - goto out; - } + dell_set_arguments(0x1, 0, 0, 0); + ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT); + if (ret) + return ret; state->mode_bit = ffs(buffer->output[1] & 0xFFFF); if (state->mode_bit != 0) @@ -1296,31 +1236,27 @@ static int kbd_get_state(struct kbd_state *state) state->timeout_value_ac = (buffer->output[2] >> 24) & 0x3F; state->timeout_unit_ac = (buffer->output[2] >> 30) & 0x3; - out: - dell_smbios_release_buffer(); return ret; } static int kbd_set_state(struct kbd_state *state) { - struct calling_interface_buffer *buffer; int ret; + u32 input1; + u32 input2; + + input1 = BIT(state->mode_bit) & 0xFFFF; + input1 |= (state->triggers & 0xFF) << 16; + input1 |= (state->timeout_value & 0x3F) << 24; + input1 |= (state->timeout_unit & 0x3) << 30; + input2 = state->als_setting & 0xFF; + input2 |= (state->level & 0xFF) << 16; + input2 |= (state->timeout_value_ac & 0x3F) << 24; + input2 |= (state->timeout_unit_ac & 0x3) << 30; + dell_set_arguments(0x2, input1, input2, 0); + ret = dell_send_request(CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT); - buffer = dell_smbios_get_buffer(); - buffer->input[0] = 0x2; - buffer->input[1] = BIT(state->mode_bit) & 0xFFFF; - buffer->input[1] |= (state->triggers & 0xFF) << 16; - buffer->input[1] |= (state->timeout_value & 0x3F) << 24; - buffer->input[1] |= (state->timeout_unit & 0x3) << 30; - buffer->input[2] = state->als_setting & 0xFF; - buffer->input[2] |= (state->level & 0xFF) << 16; - buffer->input[2] |= (state->timeout_value_ac & 0x3F) << 24; - buffer->input[2] |= (state->timeout_unit_ac & 0x3) << 30; - dell_smbios_send_request(4, 11); - ret = buffer->output[0]; - dell_smbios_release_buffer(); - - return dell_smbios_error(ret); + return ret; } static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old) @@ -1345,7 +1281,6 @@ static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old) static int kbd_set_token_bit(u8 bit) { - struct calling_interface_buffer *buffer; struct calling_interface_token *token; int ret; @@ -1356,19 +1291,14 @@ static int kbd_set_token_bit(u8 bit) if (!token) return -EINVAL; - buffer = dell_smbios_get_buffer(); - buffer->input[0] = token->location; - buffer->input[1] = token->value; - dell_smbios_send_request(1, 0); - ret = buffer->output[0]; - dell_smbios_release_buffer(); + dell_set_arguments(token->location, token->value, 0, 0); + ret = dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD); - return dell_smbios_error(ret); + return ret; } static int kbd_get_token_bit(u8 bit) { - struct calling_interface_buffer *buffer; struct calling_interface_token *token; int ret; int val; @@ -1380,15 +1310,12 @@ static int kbd_get_token_bit(u8 bit) if (!token) return -EINVAL; - buffer = dell_smbios_get_buffer(); - buffer->input[0] = token->location; - dell_smbios_send_request(0, 0); - ret = buffer->output[0]; + dell_set_arguments(token->location, 0, 0, 0); + ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_STD); val = buffer->output[1]; - dell_smbios_release_buffer(); if (ret) - return dell_smbios_error(ret); + return ret; return (val == token->value); } @@ -2102,7 +2029,6 @@ static struct notifier_block dell_laptop_notifier = { int dell_micmute_led_set(int state) { - struct calling_interface_buffer *buffer; struct calling_interface_token *token; if (state == 0) @@ -2115,11 +2041,8 @@ int dell_micmute_led_set(int state) if (!token) return -ENODEV; - buffer = dell_smbios_get_buffer(); - buffer->input[0] = token->location; - buffer->input[1] = token->value; - dell_smbios_send_request(1, 0); - dell_smbios_release_buffer(); + dell_set_arguments(token->location, token->value, 0, 0); + dell_send_request(CLASS_TOKEN_WRITE, SELECT_TOKEN_STD); return state; } @@ -2127,7 +2050,6 @@ EXPORT_SYMBOL_GPL(dell_micmute_led_set); static int __init dell_init(void) { - struct calling_interface_buffer *buffer; struct calling_interface_token *token; int max_intensity = 0; int ret; @@ -2151,6 +2073,13 @@ static int __init dell_init(void) if (ret) goto fail_platform_device2; + buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL); + if (!buffer) { + ret = -ENOMEM; + goto fail_buffer; + } + + ret = dell_setup_rfkill(); if (ret) { @@ -2175,12 +2104,10 @@ static int __init dell_init(void) token = dell_smbios_find_token(BRIGHTNESS_TOKEN); if (token) { - buffer = dell_smbios_get_buffer(); - buffer->input[0] = token->location; - dell_smbios_send_request(0, 2); - if (buffer->output[0] == 0) + dell_set_arguments(token->location, 0, 0, 0); + ret = dell_send_request(CLASS_TOKEN_READ, SELECT_TOKEN_AC); + if (ret) max_intensity = buffer->output[3]; - dell_smbios_release_buffer(); } if (max_intensity) { @@ -2214,6 +2141,8 @@ static int __init dell_init(void) fail_get_brightness: backlight_device_unregister(dell_backlight_device); fail_backlight: + kfree(buffer); +fail_buffer: dell_cleanup_rfkill(); fail_rfkill: platform_device_del(platform_device); @@ -2233,6 +2162,7 @@ static void __exit dell_exit(void) touchpad_led_exit(); kbd_led_exit(); backlight_device_unregister(dell_backlight_device); + kfree(buffer); dell_cleanup_rfkill(); if (platform_device) { platform_device_unregister(platform_device); diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c new file mode 100644 index 000000000000..89f65c4651a0 --- /dev/null +++ b/drivers/platform/x86/dell-smbios-smm.c @@ -0,0 +1,196 @@ +/* + * SMI methods for use with dell-smbios + * + * Copyright (c) Red Hat <[email protected]> + * Copyright (c) 2014 Gabriele Mazzotta <[email protected]> + * Copyright (c) 2014 Pali Rohár <[email protected]> + * Copyright (c) 2017 Dell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/dmi.h> +#include <linux/gfp.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include "../../firmware/dcdbas.h" +#include "dell-smbios.h" + +static int da_command_address; +static int da_command_code; +static struct calling_interface_buffer *buffer; +struct platform_device *platform_device; +static DEFINE_MUTEX(smm_mutex); + +static const struct dmi_system_id dell_device_table[] __initconst = { + { + .ident = "Dell laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_CHASSIS_TYPE, "8"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_CHASSIS_TYPE, "9"), /*Laptop*/ + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /*Notebook*/ + }, + }, + { + .ident = "Dell Computer Corporation", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"), + DMI_MATCH(DMI_CHASSIS_TYPE, "8"), + }, + }, + { } +}; +MODULE_DEVICE_TABLE(dmi, dell_device_table); + +static void __init parse_da_table(const struct dmi_header *dm) +{ + struct calling_interface_structure *table = + container_of(dm, struct calling_interface_structure, header); + + /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least + * 6 bytes of entry + */ + if (dm->length < 17) + return; + + da_command_address = table->cmdIOAddress; + da_command_code = table->cmdIOCode; +} + +static void __init find_cmd_address(const struct dmi_header *dm, void *dummy) +{ + switch (dm->type) { + case 0xda: /* Calling interface */ + parse_da_table(dm); + break; + } +} + +int dell_smbios_smm_call(struct calling_interface_buffer *input) +{ + struct smi_cmd command; + size_t size; + + size = sizeof(struct calling_interface_buffer); + command.magic = SMI_CMD_MAGIC; + command.command_address = da_command_address; + command.command_code = da_command_code; + command.ebx = virt_to_phys(buffer); + command.ecx = 0x42534931; + + mutex_lock(&smm_mutex); + memcpy(buffer, input, size); + dcdbas_smi_request(&command); + memcpy(input, buffer, size); + mutex_unlock(&smm_mutex); + return 0; +} + +/* When enabled this indicates that SMM won't work */ +static bool test_wsmt_enabled(void) +{ + struct calling_interface_token *wsmt; + + /* if token doesn't exist, SMM will work */ + wsmt = dell_smbios_find_token(WSMT_EN_TOKEN); + if (!wsmt) + return false; + + /* If token exists, try to access over SMM but set a dummy return. + * - If WSMT disabled it will be overwritten by SMM + * - If WSMT enabled then dummy value will remain + */ + buffer->cmd_class = CLASS_TOKEN_READ; + buffer->cmd_select = SELECT_TOKEN_STD; + memset(buffer, 0, sizeof(struct calling_interface_buffer)); + buffer->input[0] = wsmt->location; + buffer->output[0] = 99; + dell_smbios_smm_call(buffer); + if (buffer->output[0] == 99) + return true; + + return false; +} + +static int __init dell_smbios_smm_init(void) +{ + int ret; + /* + * Allocate buffer below 4GB for SMI data--only 32-bit physical addr + * is passed to SMI handler. + */ + buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); + if (!buffer) + return -ENOMEM; + + dmi_walk(find_cmd_address, NULL); + + if (test_wsmt_enabled()) { + pr_debug("Disabling due to WSMT enabled\n"); + ret = -ENODEV; + goto fail_wsmt; + } + + platform_device = platform_device_alloc("dell-smbios", 1); + if (!platform_device) { + ret = -ENOMEM; + goto fail_platform_device_alloc; + } + + ret = platform_device_add(platform_device); + if (ret) + goto fail_platform_device_add; + + ret = dell_smbios_register_device(&platform_device->dev, + &dell_smbios_smm_call); + if (ret) + goto fail_register; + + return 0; + +fail_register: + platform_device_del(platform_device); + +fail_platform_device_add: + platform_device_put(platform_device); + +fail_wsmt: +fail_platform_device_alloc: + free_page((unsigned long)buffer); + return ret; +} + +static void __exit dell_smbios_smm_exit(void) +{ + if (platform_device) { + dell_smbios_unregister_device(&platform_device->dev); + platform_device_unregister(platform_device); + free_page((unsigned long)buffer); + } +} + +subsys_initcall(dell_smbios_smm_init); +module_exit(dell_smbios_smm_exit); + +MODULE_AUTHOR("Matthew Garrett <[email protected]>"); +MODULE_AUTHOR("Gabriele Mazzotta <[email protected]>"); +MODULE_AUTHOR("Pali Rohár <[email protected]>"); +MODULE_AUTHOR("Mario Limonciello <[email protected]>"); +MODULE_DESCRIPTION("Dell SMBIOS communications over SMI"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c new file mode 100644 index 000000000000..609557aa5868 --- /dev/null +++ b/drivers/platform/x86/dell-smbios-wmi.c @@ -0,0 +1,285 @@ +/* + * WMI methods for use with dell-smbios + * + * Copyright (c) 2017 Dell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/dmi.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/uaccess.h> +#include <linux/wmi.h> +#include "dell-smbios.h" +#include "dell-wmi-descriptor.h" + +static DEFINE_MUTEX(call_mutex); +static DEFINE_MUTEX(list_mutex); +static int wmi_supported; + +struct misc_bios_flags_structure { + struct dmi_header header; + u16 flags0; +} __packed; +#define FLAG_HAS_ACPI_WMI 0x02 + +#define DELL_WMI_SMBIOS_GUID "A80593CE-A997-11DA-B012-B622A1EF5492" + +struct wmi_smbios_priv { + struct dell_wmi_smbios_buffer *buf; + struct list_head list; + struct wmi_device *wdev; + struct device *child; + u32 req_buf_size; +}; +static LIST_HEAD(wmi_list); + +static inline struct wmi_smbios_priv *get_first_smbios_priv(void) +{ + return list_first_entry_or_null(&wmi_list, + struct wmi_smbios_priv, + list); +} + +static int run_smbios_call(struct wmi_device *wdev) +{ + struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; + struct wmi_smbios_priv *priv; + struct acpi_buffer input; + union acpi_object *obj; + acpi_status status; + + priv = dev_get_drvdata(&wdev->dev); + input.length = priv->req_buf_size - sizeof(u64); + input.pointer = &priv->buf->std; + + dev_dbg(&wdev->dev, "evaluating: %u/%u [%x,%x,%x,%x]\n", + priv->buf->std.cmd_class, priv->buf->std.cmd_select, + priv->buf->std.input[0], priv->buf->std.input[1], + priv->buf->std.input[2], priv->buf->std.input[3]); + + status = wmidev_evaluate_method(wdev, 0, 1, &input, &output); + if (ACPI_FAILURE(status)) + return -EIO; + obj = (union acpi_object *)output.pointer; + if (obj->type != ACPI_TYPE_BUFFER) { + dev_dbg(&wdev->dev, "received type: %d\n", obj->type); + if (obj->type == ACPI_TYPE_INTEGER) + dev_dbg(&wdev->dev, "SMBIOS call failed: %llu\n", + obj->integer.value); + return -EIO; + } + memcpy(&priv->buf->std, obj->buffer.pointer, obj->buffer.length); + dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", + priv->buf->std.output[0], priv->buf->std.output[1], + priv->buf->std.output[2], priv->buf->std.output[3]); + + return 0; +} + +int dell_smbios_wmi_call(struct calling_interface_buffer *buffer) +{ + struct wmi_smbios_priv *priv; + size_t difference; + size_t size; + int ret; + + mutex_lock(&call_mutex); + priv = get_first_smbios_priv(); + if (!priv) { + ret = -ENODEV; + goto out_wmi_call; + } + + size = sizeof(struct calling_interface_buffer); + difference = priv->req_buf_size - sizeof(u64) - size; + + memset(&priv->buf->ext, 0, difference); + memcpy(&priv->buf->std, buffer, size); + ret = run_smbios_call(priv->wdev); + memcpy(buffer, &priv->buf->std, size); +out_wmi_call: + mutex_unlock(&call_mutex); + + return ret; +} + +static long dell_smbios_wmi_filter(struct wmi_device *wdev, unsigned int cmd, + struct wmi_ioctl_buffer *arg) +{ + struct wmi_smbios_priv *priv; + int ret = 0; + + switch (cmd) { + case DELL_WMI_SMBIOS_CMD: + mutex_lock(&call_mutex); + priv = dev_get_drvdata(&wdev->dev); + if (!priv) { + ret = -ENODEV; + goto fail_smbios_cmd; + } + memcpy(priv->buf, arg, priv->req_buf_size); + if (dell_smbios_call_filter(&wdev->dev, &priv->buf->std)) { + dev_err(&wdev->dev, "Invalid call %d/%d:%8x\n", + priv->buf->std.cmd_class, + priv->buf->std.cmd_select, + priv->buf->std.input[0]); + ret = -EFAULT; + goto fail_smbios_cmd; + } + ret = run_smbios_call(priv->wdev); + if (ret) + goto fail_smbios_cmd; + memcpy(arg, priv->buf, priv->req_buf_size); +fail_smbios_cmd: + mutex_unlock(&call_mutex); + break; + default: + ret = -ENOIOCTLCMD; + } + return ret; +} + +static int dell_smbios_wmi_probe(struct wmi_device *wdev) +{ + struct wmi_driver *wdriver = + container_of(wdev->dev.driver, struct wmi_driver, driver); + struct wmi_smbios_priv *priv; + u32 hotfix; + int count; + int ret; + + ret = dell_wmi_get_descriptor_valid(); + if (ret) + return ret; + + priv = devm_kzalloc(&wdev->dev, sizeof(struct wmi_smbios_priv), + GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* WMI buffer size will be either 4k or 32k depending on machine */ + if (!dell_wmi_get_size(&priv->req_buf_size)) + return -EPROBE_DEFER; + + /* some SMBIOS calls fail unless BIOS contains hotfix */ + if (!dell_wmi_get_hotfix(&hotfix)) + return -EPROBE_DEFER; + if (!hotfix) { + dev_warn(&wdev->dev, + "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n", + hotfix); + wdriver->filter_callback = NULL; + } + + /* add in the length object we will use internally with ioctl */ + priv->req_buf_size += sizeof(u64); + ret = set_required_buffer_size(wdev, priv->req_buf_size); + if (ret) + return ret; + + count = get_order(priv->req_buf_size); + priv->buf = (void *)__get_free_pages(GFP_KERNEL, count); + if (!priv->buf) + return -ENOMEM; + + /* ID is used by dell-smbios to set priority of drivers */ + wdev->dev.id = 1; + ret = dell_smbios_register_device(&wdev->dev, &dell_smbios_wmi_call); + if (ret) + goto fail_register; + + priv->wdev = wdev; + dev_set_drvdata(&wdev->dev, priv); + mutex_lock(&list_mutex); + list_add_tail(&priv->list, &wmi_list); + mutex_unlock(&list_mutex); + + return 0; + +fail_register: + free_pages((unsigned long)priv->buf, count); + return ret; +} + +static int dell_smbios_wmi_remove(struct wmi_device *wdev) +{ + struct wmi_smbios_priv *priv = dev_get_drvdata(&wdev->dev); + int count; + + mutex_lock(&call_mutex); + mutex_lock(&list_mutex); + list_del(&priv->list); + mutex_unlock(&list_mutex); + dell_smbios_unregister_device(&wdev->dev); + count = get_order(priv->req_buf_size); + free_pages((unsigned long)priv->buf, count); + mutex_unlock(&call_mutex); + return 0; +} + +static const struct wmi_device_id dell_smbios_wmi_id_table[] = { + { .guid_string = DELL_WMI_SMBIOS_GUID }, + { }, +}; + +static void __init parse_b1_table(const struct dmi_header *dm) +{ + struct misc_bios_flags_structure *flags = + container_of(dm, struct misc_bios_flags_structure, header); + + /* 4 bytes header, 8 bytes flags */ + if (dm->length < 12) + return; + if (dm->handle != 0xb100) + return; + if ((flags->flags0 & FLAG_HAS_ACPI_WMI)) + wmi_supported = 1; +} + +static void __init find_b1(const struct dmi_header *dm, void *dummy) +{ + switch (dm->type) { + case 0xb1: /* misc bios flags */ + parse_b1_table(dm); + break; + } +} + +static struct wmi_driver dell_smbios_wmi_driver = { + .driver = { + .name = "dell-smbios", + }, + .probe = dell_smbios_wmi_probe, + .remove = dell_smbios_wmi_remove, + .id_table = dell_smbios_wmi_id_table, + .filter_callback = dell_smbios_wmi_filter, +}; + +static int __init init_dell_smbios_wmi(void) +{ + dmi_walk(find_b1, NULL); + + if (!wmi_supported) + return -ENODEV; + + return wmi_driver_register(&dell_smbios_wmi_driver); +} + +static void __exit exit_dell_smbios_wmi(void) +{ + wmi_driver_unregister(&dell_smbios_wmi_driver); +} + +module_init(init_dell_smbios_wmi); +module_exit(exit_dell_smbios_wmi); + +MODULE_ALIAS("wmi:" DELL_WMI_SMBIOS_GUID); +MODULE_AUTHOR("Mario Limonciello <[email protected]>"); +MODULE_DESCRIPTION("Dell SMBIOS communications over WMI"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-smbios.c b/drivers/platform/x86/dell-smbios.c index 0a5723468bff..6a60db515bda 100644 --- a/drivers/platform/x86/dell-smbios.c +++ b/drivers/platform/x86/dell-smbios.c @@ -12,33 +12,119 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> +#include <linux/capability.h> #include <linux/dmi.h> #include <linux/err.h> -#include <linux/gfp.h> #include <linux/mutex.h> +#include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/io.h> -#include "../../firmware/dcdbas.h" #include "dell-smbios.h" -struct calling_interface_structure { - struct dmi_header header; - u16 cmdIOAddress; - u8 cmdIOCode; - u32 supportedCmds; - struct calling_interface_token tokens[]; -} __packed; - -static struct calling_interface_buffer *buffer; -static DEFINE_MUTEX(buffer_mutex); - -static int da_command_address; -static int da_command_code; +static u32 da_supported_commands; static int da_num_tokens; +static struct platform_device *platform_device; static struct calling_interface_token *da_tokens; +static struct device_attribute *token_location_attrs; +static struct device_attribute *token_value_attrs; +static struct attribute **token_attrs; +static DEFINE_MUTEX(smbios_mutex); + +struct smbios_device { + struct list_head list; + struct device *device; + int (*call_fn)(struct calling_interface_buffer *); +}; + +struct smbios_call { + u32 need_capability; + int cmd_class; + int cmd_select; +}; + +/* calls that are whitelisted for given capabilities */ +static struct smbios_call call_whitelist[] = { + /* generally tokens are allowed, but may be further filtered or + * restricted by token blacklist or whitelist + */ + {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_STD}, + {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_AC}, + {CAP_SYS_ADMIN, CLASS_TOKEN_READ, SELECT_TOKEN_BAT}, + {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_STD}, + {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_AC}, + {CAP_SYS_ADMIN, CLASS_TOKEN_WRITE, SELECT_TOKEN_BAT}, + /* used by userspace: fwupdate */ + {CAP_SYS_ADMIN, CLASS_ADMIN_PROP, SELECT_ADMIN_PROP}, + /* used by userspace: fwupd */ + {CAP_SYS_ADMIN, CLASS_INFO, SELECT_DOCK}, + {CAP_SYS_ADMIN, CLASS_FLASH_INTERFACE, SELECT_FLASH_INTERFACE}, +}; + +/* calls that are explicitly blacklisted */ +static struct smbios_call call_blacklist[] = { + {0x0000, 01, 07}, /* manufacturing use */ + {0x0000, 06, 05}, /* manufacturing use */ + {0x0000, 11, 03}, /* write once */ + {0x0000, 11, 07}, /* write once */ + {0x0000, 11, 11}, /* write once */ + {0x0000, 19, -1}, /* diagnostics */ + /* handled by kernel: dell-laptop */ + {0x0000, CLASS_INFO, SELECT_RFKILL}, + {0x0000, CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT}, +}; + +struct token_range { + u32 need_capability; + u16 min; + u16 max; +}; + +/* tokens that are whitelisted for given capabilities */ +static struct token_range token_whitelist[] = { + /* used by userspace: fwupdate */ + {CAP_SYS_ADMIN, CAPSULE_EN_TOKEN, CAPSULE_DIS_TOKEN}, + /* can indicate to userspace that WMI is needed */ + {0x0000, WSMT_EN_TOKEN, WSMT_DIS_TOKEN} +}; + +/* tokens that are explicitly blacklisted */ +static struct token_range token_blacklist[] = { + {0x0000, 0x0058, 0x0059}, /* ME use */ + {0x0000, 0x00CD, 0x00D0}, /* raid shadow copy */ + {0x0000, 0x013A, 0x01FF}, /* sata shadow copy */ + {0x0000, 0x0175, 0x0176}, /* write once */ + {0x0000, 0x0195, 0x0197}, /* diagnostics */ + {0x0000, 0x01DC, 0x01DD}, /* manufacturing use */ + {0x0000, 0x027D, 0x0284}, /* diagnostics */ + {0x0000, 0x02E3, 0x02E3}, /* manufacturing use */ + {0x0000, 0x02FF, 0x02FF}, /* manufacturing use */ + {0x0000, 0x0300, 0x0302}, /* manufacturing use */ + {0x0000, 0x0325, 0x0326}, /* manufacturing use */ + {0x0000, 0x0332, 0x0335}, /* fan control */ + {0x0000, 0x0350, 0x0350}, /* manufacturing use */ + {0x0000, 0x0363, 0x0363}, /* manufacturing use */ + {0x0000, 0x0368, 0x0368}, /* manufacturing use */ + {0x0000, 0x03F6, 0x03F7}, /* manufacturing use */ + {0x0000, 0x049E, 0x049F}, /* manufacturing use */ + {0x0000, 0x04A0, 0x04A3}, /* disagnostics */ + {0x0000, 0x04E6, 0x04E7}, /* manufacturing use */ + {0x0000, 0x4000, 0x7FFF}, /* internal BIOS use */ + {0x0000, 0x9000, 0x9001}, /* internal BIOS use */ + {0x0000, 0xA000, 0xBFFF}, /* write only */ + {0x0000, 0xEFF0, 0xEFFF}, /* internal BIOS use */ + /* handled by kernel: dell-laptop */ + {0x0000, BRIGHTNESS_TOKEN, BRIGHTNESS_TOKEN}, + {0x0000, KBD_LED_OFF_TOKEN, KBD_LED_AUTO_TOKEN}, + {0x0000, KBD_LED_AC_TOKEN, KBD_LED_AC_TOKEN}, + {0x0000, KBD_LED_AUTO_25_TOKEN, KBD_LED_AUTO_75_TOKEN}, + {0x0000, KBD_LED_AUTO_100_TOKEN, KBD_LED_AUTO_100_TOKEN}, + {0x0000, GLOBAL_MIC_MUTE_ENABLE, GLOBAL_MIC_MUTE_DISABLE}, +}; + +static LIST_HEAD(smbios_device_list); int dell_smbios_error(int value) { @@ -55,42 +141,175 @@ int dell_smbios_error(int value) } EXPORT_SYMBOL_GPL(dell_smbios_error); -struct calling_interface_buffer *dell_smbios_get_buffer(void) +int dell_smbios_register_device(struct device *d, void *call_fn) { - mutex_lock(&buffer_mutex); - dell_smbios_clear_buffer(); - return buffer; + struct smbios_device *priv; + + priv = devm_kzalloc(d, sizeof(struct smbios_device), GFP_KERNEL); + if (!priv) + return -ENOMEM; + get_device(d); + priv->device = d; + priv->call_fn = call_fn; + mutex_lock(&smbios_mutex); + list_add_tail(&priv->list, &smbios_device_list); + mutex_unlock(&smbios_mutex); + dev_dbg(d, "Added device: %s\n", d->driver->name); + return 0; } -EXPORT_SYMBOL_GPL(dell_smbios_get_buffer); +EXPORT_SYMBOL_GPL(dell_smbios_register_device); -void dell_smbios_clear_buffer(void) +void dell_smbios_unregister_device(struct device *d) { - memset(buffer, 0, sizeof(struct calling_interface_buffer)); + struct smbios_device *priv; + + mutex_lock(&smbios_mutex); + list_for_each_entry(priv, &smbios_device_list, list) { + if (priv->device == d) { + list_del(&priv->list); + put_device(d); + break; + } + } + mutex_unlock(&smbios_mutex); + dev_dbg(d, "Remove device: %s\n", d->driver->name); } -EXPORT_SYMBOL_GPL(dell_smbios_clear_buffer); +EXPORT_SYMBOL_GPL(dell_smbios_unregister_device); -void dell_smbios_release_buffer(void) +int dell_smbios_call_filter(struct device *d, + struct calling_interface_buffer *buffer) { - mutex_unlock(&buffer_mutex); + u16 t = 0; + int i; + + /* can't make calls over 30 */ + if (buffer->cmd_class > 30) { + dev_dbg(d, "class too big: %u\n", buffer->cmd_class); + return -EINVAL; + } + + /* supported calls on the particular system */ + if (!(da_supported_commands & (1 << buffer->cmd_class))) { + dev_dbg(d, "invalid command, supported commands: 0x%8x\n", + da_supported_commands); + return -EINVAL; + } + + /* match against call blacklist */ + for (i = 0; i < ARRAY_SIZE(call_blacklist); i++) { + if (buffer->cmd_class != call_blacklist[i].cmd_class) + continue; + if (buffer->cmd_select != call_blacklist[i].cmd_select && + call_blacklist[i].cmd_select != -1) + continue; + dev_dbg(d, "blacklisted command: %u/%u\n", + buffer->cmd_class, buffer->cmd_select); + return -EINVAL; + } + + /* if a token call, find token ID */ + + if ((buffer->cmd_class == CLASS_TOKEN_READ || + buffer->cmd_class == CLASS_TOKEN_WRITE) && + buffer->cmd_select < 3) { + /* find the matching token ID */ + for (i = 0; i < da_num_tokens; i++) { + if (da_tokens[i].location != buffer->input[0]) + continue; + t = da_tokens[i].tokenID; + break; + } + + /* token call; but token didn't exist */ + if (!t) { + dev_dbg(d, "token at location %04x doesn't exist\n", + buffer->input[0]); + return -EINVAL; + } + + /* match against token blacklist */ + for (i = 0; i < ARRAY_SIZE(token_blacklist); i++) { + if (!token_blacklist[i].min || !token_blacklist[i].max) + continue; + if (t >= token_blacklist[i].min && + t <= token_blacklist[i].max) + return -EINVAL; + } + + /* match against token whitelist */ + for (i = 0; i < ARRAY_SIZE(token_whitelist); i++) { + if (!token_whitelist[i].min || !token_whitelist[i].max) + continue; + if (t < token_whitelist[i].min || + t > token_whitelist[i].max) + continue; + if (!token_whitelist[i].need_capability || + capable(token_whitelist[i].need_capability)) { + dev_dbg(d, "whitelisted token: %x\n", t); + return 0; + } + + } + } + /* match against call whitelist */ + for (i = 0; i < ARRAY_SIZE(call_whitelist); i++) { + if (buffer->cmd_class != call_whitelist[i].cmd_class) + continue; + if (buffer->cmd_select != call_whitelist[i].cmd_select) + continue; + if (!call_whitelist[i].need_capability || + capable(call_whitelist[i].need_capability)) { + dev_dbg(d, "whitelisted capable command: %u/%u\n", + buffer->cmd_class, buffer->cmd_select); + return 0; + } + dev_dbg(d, "missing capability %d for %u/%u\n", + call_whitelist[i].need_capability, + buffer->cmd_class, buffer->cmd_select); + + } + + /* not in a whitelist, only allow processes with capabilities */ + if (capable(CAP_SYS_RAWIO)) { + dev_dbg(d, "Allowing %u/%u due to CAP_SYS_RAWIO\n", + buffer->cmd_class, buffer->cmd_select); + return 0; + } + + return -EACCES; } -EXPORT_SYMBOL_GPL(dell_smbios_release_buffer); +EXPORT_SYMBOL_GPL(dell_smbios_call_filter); -void dell_smbios_send_request(int class, int select) +int dell_smbios_call(struct calling_interface_buffer *buffer) { - struct smi_cmd command; + int (*call_fn)(struct calling_interface_buffer *) = NULL; + struct device *selected_dev = NULL; + struct smbios_device *priv; + int ret; - command.magic = SMI_CMD_MAGIC; - command.command_address = da_command_address; - command.command_code = da_command_code; - command.ebx = virt_to_phys(buffer); - command.ecx = 0x42534931; + mutex_lock(&smbios_mutex); + list_for_each_entry(priv, &smbios_device_list, list) { + if (!selected_dev || priv->device->id >= selected_dev->id) { + dev_dbg(priv->device, "Trying device ID: %d\n", + priv->device->id); + call_fn = priv->call_fn; + selected_dev = priv->device; + } + } + + if (!selected_dev) { + ret = -ENODEV; + pr_err("No dell-smbios drivers are loaded\n"); + goto out_smbios_call; + } - buffer->class = class; - buffer->select = select; + ret = call_fn(buffer); - dcdbas_smi_request(&command); +out_smbios_call: + mutex_unlock(&smbios_mutex); + return ret; } -EXPORT_SYMBOL_GPL(dell_smbios_send_request); +EXPORT_SYMBOL_GPL(dell_smbios_call); struct calling_interface_token *dell_smbios_find_token(int tokenid) { @@ -139,8 +358,7 @@ static void __init parse_da_table(const struct dmi_header *dm) if (dm->length < 17) return; - da_command_address = table->cmdIOAddress; - da_command_code = table->cmdIOCode; + da_supported_commands = table->supportedCmds; new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * sizeof(struct calling_interface_token), @@ -156,6 +374,27 @@ static void __init parse_da_table(const struct dmi_header *dm) da_num_tokens += tokens; } +static void zero_duplicates(struct device *dev) +{ + int i, j; + + for (i = 0; i < da_num_tokens; i++) { + if (da_tokens[i].tokenID == 0) + continue; + for (j = i+1; j < da_num_tokens; j++) { + if (da_tokens[j].tokenID == 0) + continue; + if (da_tokens[i].tokenID == da_tokens[j].tokenID) { + dev_dbg(dev, "Zeroing dup token ID %x(%x/%x)\n", + da_tokens[j].tokenID, + da_tokens[j].location, + da_tokens[j].value); + da_tokens[j].tokenID = 0; + } + } + } +} + static void __init find_tokens(const struct dmi_header *dm, void *dummy) { switch (dm->type) { @@ -169,10 +408,160 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy) } } +static int match_attribute(struct device *dev, + struct device_attribute *attr) +{ + int i; + + for (i = 0; i < da_num_tokens * 2; i++) { + if (!token_attrs[i]) + continue; + if (strcmp(token_attrs[i]->name, attr->attr.name) == 0) + return i/2; + } + dev_dbg(dev, "couldn't match: %s\n", attr->attr.name); + return -EINVAL; +} + +static ssize_t location_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + i = match_attribute(dev, attr); + if (i > 0) + return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].location); + return 0; +} + +static ssize_t value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int i; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + i = match_attribute(dev, attr); + if (i > 0) + return scnprintf(buf, PAGE_SIZE, "%08x", da_tokens[i].value); + return 0; +} + +static struct attribute_group smbios_attribute_group = { + .name = "tokens" +}; + +static struct platform_driver platform_driver = { + .driver = { + .name = "dell-smbios", + }, +}; + +static int build_tokens_sysfs(struct platform_device *dev) +{ + char *location_name; + char *value_name; + size_t size; + int ret; + int i, j; + + /* (number of tokens + 1 for null terminated */ + size = sizeof(struct device_attribute) * (da_num_tokens + 1); + token_location_attrs = kzalloc(size, GFP_KERNEL); + if (!token_location_attrs) + return -ENOMEM; + token_value_attrs = kzalloc(size, GFP_KERNEL); + if (!token_value_attrs) + goto out_allocate_value; + + /* need to store both location and value + terminator*/ + size = sizeof(struct attribute *) * ((2 * da_num_tokens) + 1); + token_attrs = kzalloc(size, GFP_KERNEL); + if (!token_attrs) + goto out_allocate_attrs; + + for (i = 0, j = 0; i < da_num_tokens; i++) { + /* skip empty */ + if (da_tokens[i].tokenID == 0) + continue; + /* add location */ + location_name = kasprintf(GFP_KERNEL, "%04x_location", + da_tokens[i].tokenID); + if (location_name == NULL) + goto out_unwind_strings; + sysfs_attr_init(&token_location_attrs[i].attr); + token_location_attrs[i].attr.name = location_name; + token_location_attrs[i].attr.mode = 0444; + token_location_attrs[i].show = location_show; + token_attrs[j++] = &token_location_attrs[i].attr; + + /* add value */ + value_name = kasprintf(GFP_KERNEL, "%04x_value", + da_tokens[i].tokenID); + if (value_name == NULL) + goto loop_fail_create_value; + sysfs_attr_init(&token_value_attrs[i].attr); + token_value_attrs[i].attr.name = value_name; + token_value_attrs[i].attr.mode = 0444; + token_value_attrs[i].show = value_show; + token_attrs[j++] = &token_value_attrs[i].attr; + continue; + +loop_fail_create_value: + kfree(value_name); + goto out_unwind_strings; + } + smbios_attribute_group.attrs = token_attrs; + + ret = sysfs_create_group(&dev->dev.kobj, &smbios_attribute_group); + if (ret) + goto out_unwind_strings; + return 0; + +out_unwind_strings: + for (i = i-1; i > 0; i--) { + kfree(token_location_attrs[i].attr.name); + kfree(token_value_attrs[i].attr.name); + } + kfree(token_attrs); +out_allocate_attrs: + kfree(token_value_attrs); +out_allocate_value: + kfree(token_location_attrs); + + return -ENOMEM; +} + +static void free_group(struct platform_device *pdev) +{ + int i; + + sysfs_remove_group(&pdev->dev.kobj, + &smbios_attribute_group); + for (i = 0; i < da_num_tokens; i++) { + kfree(token_location_attrs[i].attr.name); + kfree(token_value_attrs[i].attr.name); + } + kfree(token_attrs); + kfree(token_value_attrs); + kfree(token_location_attrs); +} + static int __init dell_smbios_init(void) { + const struct dmi_device *valid; int ret; + valid = dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL); + if (!valid) { + pr_err("Unable to run on non-Dell system\n"); + return -ENODEV; + } + dmi_walk(find_tokens, NULL); if (!da_tokens) { @@ -180,27 +569,52 @@ static int __init dell_smbios_init(void) return -ENODEV; } - /* - * Allocate buffer below 4GB for SMI data--only 32-bit physical addr - * is passed to SMI handler. - */ - buffer = (void *)__get_free_page(GFP_KERNEL | GFP_DMA32); - if (!buffer) { + ret = platform_driver_register(&platform_driver); + if (ret) + goto fail_platform_driver; + + platform_device = platform_device_alloc("dell-smbios", 0); + if (!platform_device) { ret = -ENOMEM; - goto fail_buffer; + goto fail_platform_device_alloc; } + ret = platform_device_add(platform_device); + if (ret) + goto fail_platform_device_add; + + /* duplicate tokens will cause problems building sysfs files */ + zero_duplicates(&platform_device->dev); + + ret = build_tokens_sysfs(platform_device); + if (ret) + goto fail_create_group; return 0; -fail_buffer: +fail_create_group: + platform_device_del(platform_device); + +fail_platform_device_add: + platform_device_put(platform_device); + +fail_platform_device_alloc: + platform_driver_unregister(&platform_driver); + +fail_platform_driver: kfree(da_tokens); return ret; } static void __exit dell_smbios_exit(void) { + mutex_lock(&smbios_mutex); + if (platform_device) { + free_group(platform_device); + platform_device_unregister(platform_device); + platform_driver_unregister(&platform_driver); + } kfree(da_tokens); - free_page((unsigned long)buffer); + mutex_unlock(&smbios_mutex); } subsys_initcall(dell_smbios_init); diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h index 45cbc2292cd3..138d478d9adc 100644 --- a/drivers/platform/x86/dell-smbios.h +++ b/drivers/platform/x86/dell-smbios.h @@ -16,17 +16,29 @@ #ifndef _DELL_SMBIOS_H_ #define _DELL_SMBIOS_H_ -struct notifier_block; +#include <linux/device.h> +#include <uapi/linux/wmi.h> -/* This structure will be modified by the firmware when we enter - * system management mode, hence the volatiles */ +/* Classes and selects used only in kernel drivers */ +#define CLASS_KBD_BACKLIGHT 4 +#define SELECT_KBD_BACKLIGHT 11 -struct calling_interface_buffer { - u16 class; - u16 select; - volatile u32 input[4]; - volatile u32 output[4]; -} __packed; +/* Tokens used in kernel drivers, any of these + * should be filtered from userspace access + */ +#define BRIGHTNESS_TOKEN 0x007d +#define KBD_LED_AC_TOKEN 0x0451 +#define KBD_LED_OFF_TOKEN 0x01E1 +#define KBD_LED_ON_TOKEN 0x01E2 +#define KBD_LED_AUTO_TOKEN 0x01E3 +#define KBD_LED_AUTO_25_TOKEN 0x02EA +#define KBD_LED_AUTO_50_TOKEN 0x02EB +#define KBD_LED_AUTO_75_TOKEN 0x02EC +#define KBD_LED_AUTO_100_TOKEN 0x02F6 +#define GLOBAL_MIC_MUTE_ENABLE 0x0364 +#define GLOBAL_MIC_MUTE_DISABLE 0x0365 + +struct notifier_block; struct calling_interface_token { u16 tokenID; @@ -37,12 +49,21 @@ struct calling_interface_token { }; }; -int dell_smbios_error(int value); +struct calling_interface_structure { + struct dmi_header header; + u16 cmdIOAddress; + u8 cmdIOCode; + u32 supportedCmds; + struct calling_interface_token tokens[]; +} __packed; -struct calling_interface_buffer *dell_smbios_get_buffer(void); -void dell_smbios_clear_buffer(void); -void dell_smbios_release_buffer(void); -void dell_smbios_send_request(int class, int select); +int dell_smbios_register_device(struct device *d, void *call_fn); +void dell_smbios_unregister_device(struct device *d); + +int dell_smbios_error(int value); +int dell_smbios_call_filter(struct device *d, + struct calling_interface_buffer *buffer); +int dell_smbios_call(struct calling_interface_buffer *buffer); struct calling_interface_token *dell_smbios_find_token(int tokenid); diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c index 37e646034ef8..1d87237bc731 100644 --- a/drivers/platform/x86/dell-smo8800.c +++ b/drivers/platform/x86/dell-smo8800.c @@ -90,7 +90,7 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf, struct smo8800_device, miscdev); u32 data = 0; - unsigned char byte_data = 0; + unsigned char byte_data; ssize_t retval = 1; if (count < 1) @@ -103,7 +103,6 @@ static ssize_t smo8800_misc_read(struct file *file, char __user *buf, if (retval) return retval; - byte_data = 1; retval = 1; if (data < 255) diff --git a/drivers/platform/x86/dell-wmi-descriptor.c b/drivers/platform/x86/dell-wmi-descriptor.c new file mode 100644 index 000000000000..072821aa47fc --- /dev/null +++ b/drivers/platform/x86/dell-wmi-descriptor.c @@ -0,0 +1,213 @@ +/* + * Dell WMI descriptor driver + * + * Copyright (C) 2017 Dell Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/wmi.h> +#include "dell-wmi-descriptor.h" + +#define DELL_WMI_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492" + +struct descriptor_priv { + struct list_head list; + u32 interface_version; + u32 size; + u32 hotfix; +}; +static int descriptor_valid = -EPROBE_DEFER; +static LIST_HEAD(wmi_list); +static DEFINE_MUTEX(list_mutex); + +int dell_wmi_get_descriptor_valid(void) +{ + if (!wmi_has_guid(DELL_WMI_DESCRIPTOR_GUID)) + return -ENODEV; + + return descriptor_valid; +} +EXPORT_SYMBOL_GPL(dell_wmi_get_descriptor_valid); + +bool dell_wmi_get_interface_version(u32 *version) +{ + struct descriptor_priv *priv; + bool ret = false; + + mutex_lock(&list_mutex); + priv = list_first_entry_or_null(&wmi_list, + struct descriptor_priv, + list); + if (priv) { + *version = priv->interface_version; + ret = true; + } + mutex_unlock(&list_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(dell_wmi_get_interface_version); + +bool dell_wmi_get_size(u32 *size) +{ + struct descriptor_priv *priv; + bool ret = false; + + mutex_lock(&list_mutex); + priv = list_first_entry_or_null(&wmi_list, + struct descriptor_priv, + list); + if (priv) { + *size = priv->size; + ret = true; + } + mutex_unlock(&list_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(dell_wmi_get_size); + +bool dell_wmi_get_hotfix(u32 *hotfix) +{ + struct descriptor_priv *priv; + bool ret = false; + + mutex_lock(&list_mutex); + priv = list_first_entry_or_null(&wmi_list, + struct descriptor_priv, + list); + if (priv) { + *hotfix = priv->hotfix; + ret = true; + } + mutex_unlock(&list_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix); + +/* + * Descriptor buffer is 128 byte long and contains: + * + * Name Offset Length Value + * Vendor Signature 0 4 "DELL" + * Object Signature 4 4 " WMI" + * WMI Interface Version 8 4 <version> + * WMI buffer length 12 4 <length> + * WMI hotfix number 16 4 <hotfix> + */ +static int dell_wmi_descriptor_probe(struct wmi_device *wdev) +{ + union acpi_object *obj = NULL; + struct descriptor_priv *priv; + u32 *buffer; + int ret; + + obj = wmidev_block_query(wdev, 0); + if (!obj) { + dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n"); + ret = -EIO; + goto out; + } + + if (obj->type != ACPI_TYPE_BUFFER) { + dev_err(&wdev->dev, "Dell descriptor has wrong type\n"); + ret = -EINVAL; + descriptor_valid = ret; + goto out; + } + + /* Although it's not technically a failure, this would lead to + * unexpected behavior + */ + if (obj->buffer.length != 128) { + dev_err(&wdev->dev, + "Dell descriptor buffer has unexpected length (%d)\n", + obj->buffer.length); + ret = -EINVAL; + descriptor_valid = ret; + goto out; + } + + buffer = (u32 *)obj->buffer.pointer; + + if (strncmp(obj->string.pointer, "DELL WMI", 8) != 0) { + dev_err(&wdev->dev, "Dell descriptor buffer has invalid signature (%8ph)\n", + buffer); + ret = -EINVAL; + descriptor_valid = ret; + goto out; + } + descriptor_valid = 0; + + if (buffer[2] != 0 && buffer[2] != 1) + dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%lu)\n", + (unsigned long) buffer[2]); + + priv = devm_kzalloc(&wdev->dev, sizeof(struct descriptor_priv), + GFP_KERNEL); + + if (!priv) { + ret = -ENOMEM; + goto out; + } + + priv->interface_version = buffer[2]; + priv->size = buffer[3]; + priv->hotfix = buffer[4]; + ret = 0; + dev_set_drvdata(&wdev->dev, priv); + mutex_lock(&list_mutex); + list_add_tail(&priv->list, &wmi_list); + mutex_unlock(&list_mutex); + + dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n", + (unsigned long) priv->interface_version, + (unsigned long) priv->size, + (unsigned long) priv->hotfix); + +out: + kfree(obj); + return ret; +} + +static int dell_wmi_descriptor_remove(struct wmi_device *wdev) +{ + struct descriptor_priv *priv = dev_get_drvdata(&wdev->dev); + + mutex_lock(&list_mutex); + list_del(&priv->list); + mutex_unlock(&list_mutex); + return 0; +} + +static const struct wmi_device_id dell_wmi_descriptor_id_table[] = { + { .guid_string = DELL_WMI_DESCRIPTOR_GUID }, + { }, +}; + +static struct wmi_driver dell_wmi_descriptor_driver = { + .driver = { + .name = "dell-wmi-descriptor", + }, + .probe = dell_wmi_descriptor_probe, + .remove = dell_wmi_descriptor_remove, + .id_table = dell_wmi_descriptor_id_table, +}; + +module_wmi_driver(dell_wmi_descriptor_driver); + +MODULE_ALIAS("wmi:" DELL_WMI_DESCRIPTOR_GUID); +MODULE_AUTHOR("Mario Limonciello <[email protected]>"); +MODULE_DESCRIPTION("Dell WMI descriptor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell-wmi-descriptor.h b/drivers/platform/x86/dell-wmi-descriptor.h new file mode 100644 index 000000000000..a6123a4d06a7 --- /dev/null +++ b/drivers/platform/x86/dell-wmi-descriptor.h @@ -0,0 +1,28 @@ +/* + * Dell WMI descriptor driver + * + * Copyright (c) 2017 Dell Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _DELL_WMI_DESCRIPTOR_H_ +#define _DELL_WMI_DESCRIPTOR_H_ + +#include <linux/wmi.h> + +/* possible return values: + * -ENODEV: Descriptor GUID missing from WMI bus + * -EPROBE_DEFER: probing for dell-wmi-descriptor not yet run + * 0: valid descriptor, successfully probed + * < 0: invalid descriptor, don't probe dependent devices + */ +int dell_wmi_get_descriptor_valid(void); + +bool dell_wmi_get_interface_version(u32 *version); +bool dell_wmi_get_size(u32 *size); +bool dell_wmi_get_hotfix(u32 *hotfix); + +#endif diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 28d9f8696081..39d2f4518483 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c @@ -39,6 +39,7 @@ #include <linux/wmi.h> #include <acpi/video.h> #include "dell-smbios.h" +#include "dell-wmi-descriptor.h" MODULE_AUTHOR("Matthew Garrett <[email protected]>"); MODULE_AUTHOR("Pali Rohár <[email protected]>"); @@ -46,12 +47,10 @@ MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver"); MODULE_LICENSE("GPL"); #define DELL_EVENT_GUID "9DBB5994-A997-11DA-B012-B622A1EF5492" -#define DELL_DESCRIPTOR_GUID "8D9DDCBC-A997-11DA-B012-B622A1EF5492" static bool wmi_requires_smbios_request; MODULE_ALIAS("wmi:"DELL_EVENT_GUID); -MODULE_ALIAS("wmi:"DELL_DESCRIPTOR_GUID); struct dell_wmi_priv { struct input_dev *input_dev; @@ -619,78 +618,6 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev) } /* - * Descriptor buffer is 128 byte long and contains: - * - * Name Offset Length Value - * Vendor Signature 0 4 "DELL" - * Object Signature 4 4 " WMI" - * WMI Interface Version 8 4 <version> - * WMI buffer length 12 4 4096 - */ -static int dell_wmi_check_descriptor_buffer(struct wmi_device *wdev) -{ - struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev); - union acpi_object *obj = NULL; - struct wmi_device *desc_dev; - u32 *buffer; - int ret; - - desc_dev = wmidev_get_other_guid(wdev, DELL_DESCRIPTOR_GUID); - if (!desc_dev) { - dev_err(&wdev->dev, "Dell WMI descriptor does not exist\n"); - return -ENODEV; - } - - obj = wmidev_block_query(desc_dev, 0); - if (!obj) { - dev_err(&wdev->dev, "failed to read Dell WMI descriptor\n"); - ret = -EIO; - goto out; - } - - if (obj->type != ACPI_TYPE_BUFFER) { - dev_err(&wdev->dev, "Dell descriptor has wrong type\n"); - ret = -EINVAL; - goto out; - } - - if (obj->buffer.length != 128) { - dev_err(&wdev->dev, - "Dell descriptor buffer has invalid length (%d)\n", - obj->buffer.length); - if (obj->buffer.length < 16) { - ret = -EINVAL; - goto out; - } - } - - buffer = (u32 *)obj->buffer.pointer; - - if (buffer[0] != 0x4C4C4544 && buffer[1] != 0x494D5720) - dev_warn(&wdev->dev, "Dell descriptor buffer has invalid signature (%*ph)\n", - 8, buffer); - - if (buffer[2] != 0 && buffer[2] != 1) - dev_warn(&wdev->dev, "Dell descriptor buffer has unknown version (%d)\n", - buffer[2]); - - if (buffer[3] != 4096) - dev_warn(&wdev->dev, "Dell descriptor buffer has invalid buffer length (%d)\n", - buffer[3]); - - priv->interface_version = buffer[2]; - ret = 0; - - dev_info(&wdev->dev, "Detected Dell WMI interface version %u\n", - priv->interface_version); - -out: - kfree(obj); - put_device(&desc_dev->dev); - return ret; -} - -/* * According to Dell SMBIOS documentation: * * 17 3 Application Program Registration @@ -711,13 +638,16 @@ static int dell_wmi_events_set_enabled(bool enable) struct calling_interface_buffer *buffer; int ret; - buffer = dell_smbios_get_buffer(); + buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL); + buffer->cmd_class = CLASS_INFO; + buffer->cmd_select = SELECT_APP_REGISTRATION; buffer->input[0] = 0x10000; buffer->input[1] = 0x51534554; buffer->input[3] = enable; - dell_smbios_send_request(17, 3); - ret = buffer->output[0]; - dell_smbios_release_buffer(); + ret = dell_smbios_call(buffer); + if (ret == 0) + ret = buffer->output[0]; + kfree(buffer); return dell_smbios_error(ret); } @@ -725,7 +655,11 @@ static int dell_wmi_events_set_enabled(bool enable) static int dell_wmi_probe(struct wmi_device *wdev) { struct dell_wmi_priv *priv; - int err; + int ret; + + ret = dell_wmi_get_descriptor_valid(); + if (ret) + return ret; priv = devm_kzalloc( &wdev->dev, sizeof(struct dell_wmi_priv), GFP_KERNEL); @@ -733,9 +667,8 @@ static int dell_wmi_probe(struct wmi_device *wdev) return -ENOMEM; dev_set_drvdata(&wdev->dev, priv); - err = dell_wmi_check_descriptor_buffer(wdev); - if (err) - return err; + if (!dell_wmi_get_interface_version(&priv->interface_version)) + return -EPROBE_DEFER; return dell_wmi_input_setup(wdev); } diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 56a8195096a2..2cfbd3fa5136 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -691,6 +691,7 @@ static enum led_brightness eco_led_get(struct led_classdev *cdev) static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device) { + struct fujitsu_laptop *priv = acpi_driver_data(device); struct led_classdev *led; int result; @@ -724,12 +725,15 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device) } /* - * BTNI bit 24 seems to indicate the presence of a radio toggle - * button in place of a slide switch, and all such machines appear - * to also have an RF LED. Therefore use bit 24 as an indicator - * that an RF LED is present. + * Some Fujitsu laptops have a radio toggle button in place of a slide + * switch and all such machines appear to also have an RF LED. Based on + * comparing DSDT tables of four Fujitsu Lifebook models (E744, E751, + * S7110, S8420; the first one has a radio toggle button, the other + * three have slide switches), bit 17 of flags_supported (the value + * returned by method S000 of ACPI device FUJ02E3) seems to indicate + * whether given model has a radio toggle button. */ - if (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) { + if (priv->flags_supported & BIT(17)) { led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL); if (!led) return -ENOMEM; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index b4ed3dc983d5..b4224389febe 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -297,7 +297,7 @@ static int hp_wmi_hw_state(int mask) if (state < 0) return state; - return state & 0x1; + return !!(state & mask); } static int __init hp_wmi_bios_2008_later(void) diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 493d8910a74e..7b12abe86b94 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -240,6 +240,7 @@ static const struct dmi_system_id lis3lv02d_dmi_ids[] = { AXIS_DMI_MATCH("HDX18", "HP HDX 18", x_inverted), AXIS_DMI_MATCH("HPB432x", "HP ProBook 432", xy_rotated_left), AXIS_DMI_MATCH("HPB440G3", "HP ProBook 440 G3", x_inverted_usd), + AXIS_DMI_MATCH("HPB440G4", "HP ProBook 440 G4", x_inverted), AXIS_DMI_MATCH("HPB442x", "HP ProBook 442", xy_rotated_left), AXIS_DMI_MATCH("HPB452x", "HP ProBook 452", y_inverted), AXIS_DMI_MATCH("HPB522x", "HP ProBook 522", xy_swap), diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index fe98d4ac0df3..53ab4e0f8962 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1166,6 +1166,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 910-13IKB"), }, }, + { + .ident = "Lenovo YOGA 920-13IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920-13IKB"), + }, + }, {} }; diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index e34fd70b67af..f470279c4c10 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -226,6 +226,24 @@ wakeup: return; } + /* + * Needed for suspend to work on some platforms that don't expose + * the 5-button array, but still send notifies with power button + * event code to this device object on power button actions. + * + * Report the power button press; catch and ignore the button release. + */ + if (!priv->array) { + if (event == 0xce) { + input_report_key(priv->input_dev, KEY_POWER, 1); + input_sync(priv->input_dev); + return; + } + + if (event == 0xcf) + return; + } + /* 0xC0 is for HID events, other values are for 5 button array */ if (event != 0xc0) { if (!priv->array || diff --git a/drivers/platform/x86/intel-wmi-thunderbolt.c b/drivers/platform/x86/intel-wmi-thunderbolt.c new file mode 100644 index 000000000000..c2257bd06f18 --- /dev/null +++ b/drivers/platform/x86/intel-wmi-thunderbolt.c @@ -0,0 +1,98 @@ +/* + * WMI Thunderbolt driver + * + * Copyright (C) 2017 Dell Inc. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/wmi.h> + +#define INTEL_WMI_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341" + +static ssize_t force_power_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct acpi_buffer input; + acpi_status status; + u8 mode; + + input.length = sizeof(u8); + input.pointer = &mode; + mode = hex_to_bin(buf[0]); + if (mode == 0 || mode == 1) { + status = wmi_evaluate_method(INTEL_WMI_THUNDERBOLT_GUID, 0, 1, + &input, NULL); + if (ACPI_FAILURE(status)) + return -ENODEV; + } else { + return -EINVAL; + } + return count; +} + +static DEVICE_ATTR_WO(force_power); + +static struct attribute *tbt_attrs[] = { + &dev_attr_force_power.attr, + NULL +}; + +static const struct attribute_group tbt_attribute_group = { + .attrs = tbt_attrs, +}; + +static int intel_wmi_thunderbolt_probe(struct wmi_device *wdev) +{ + int ret; + + ret = sysfs_create_group(&wdev->dev.kobj, &tbt_attribute_group); + kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE); + return ret; +} + +static int intel_wmi_thunderbolt_remove(struct wmi_device *wdev) +{ + sysfs_remove_group(&wdev->dev.kobj, &tbt_attribute_group); + kobject_uevent(&wdev->dev.kobj, KOBJ_CHANGE); + return 0; +} + +static const struct wmi_device_id intel_wmi_thunderbolt_id_table[] = { + { .guid_string = INTEL_WMI_THUNDERBOLT_GUID }, + { }, +}; + +static struct wmi_driver intel_wmi_thunderbolt_driver = { + .driver = { + .name = "intel-wmi-thunderbolt", + }, + .probe = intel_wmi_thunderbolt_probe, + .remove = intel_wmi_thunderbolt_remove, + .id_table = intel_wmi_thunderbolt_id_table, +}; + +module_wmi_driver(intel_wmi_thunderbolt_driver); + +MODULE_ALIAS("wmi:" INTEL_WMI_THUNDERBOLT_GUID); +MODULE_AUTHOR("Mario Limonciello <[email protected]>"); +MODULE_DESCRIPTION("Intel WMI Thunderbolt force power driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/intel_cht_int33fe.c b/drivers/platform/x86/intel_cht_int33fe.c index da706e2c4232..380ef7ec094f 100644 --- a/drivers/platform/x86/intel_cht_int33fe.c +++ b/drivers/platform/x86/intel_cht_int33fe.c @@ -24,6 +24,7 @@ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/module.h> +#include <linux/regulator/consumer.h> #include <linux/slab.h> #define EXPECTED_PTYPE 4 @@ -34,6 +35,42 @@ struct cht_int33fe_data { struct i2c_client *pi3usb30532; }; +/* + * Grrr I severly dislike buggy BIOS-es. At least one BIOS enumerates + * the max17047 both through the INT33FE ACPI device (it is right there + * in the resources table) as well as through a separate MAX17047 device. + * + * These helpers are used to work around this by checking if an i2c-client + * for the max17047 has already been registered. + */ +static int cht_int33fe_check_for_max17047(struct device *dev, void *data) +{ + struct i2c_client **max17047 = data; + struct acpi_device *adev; + const char *hid; + + adev = ACPI_COMPANION(dev); + if (!adev) + return 0; + + hid = acpi_device_hid(adev); + + /* The MAX17047 ACPI node doesn't have an UID, so we don't check that */ + if (strcmp(hid, "MAX17047")) + return 0; + + *max17047 = to_i2c_client(dev); + return 1; +} + +static struct i2c_client *cht_int33fe_find_max17047(void) +{ + struct i2c_client *max17047 = NULL; + + i2c_for_each_dev(&max17047, cht_int33fe_check_for_max17047); + return max17047; +} + static const char * const max17047_suppliers[] = { "bq24190-charger" }; static const struct property_entry max17047_props[] = { @@ -41,14 +78,25 @@ static const struct property_entry max17047_props[] = { { } }; +static const struct property_entry fusb302_props[] = { + PROPERTY_ENTRY_STRING("fcs,extcon-name", "cht_wcove_pwrsrc"), + PROPERTY_ENTRY_U32("fcs,max-sink-microvolt", 12000000), + PROPERTY_ENTRY_U32("fcs,max-sink-microamp", 3000000), + PROPERTY_ENTRY_U32("fcs,max-sink-microwatt", 36000000), + { } +}; + static int cht_int33fe_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct i2c_board_info board_info; struct cht_int33fe_data *data; + struct i2c_client *max17047; + struct regulator *regulator; unsigned long long ptyp; acpi_status status; int fusb302_irq; + int ret; status = acpi_evaluate_integer(ACPI_HANDLE(dev), "PTYP", NULL, &ptyp); if (ACPI_FAILURE(status)) { @@ -63,6 +111,34 @@ static int cht_int33fe_probe(struct i2c_client *client) if (ptyp != EXPECTED_PTYPE) return -ENODEV; + /* Check presence of INT34D3 (hardware-rev 3) expected for ptype == 4 */ + if (!acpi_dev_present("INT34D3", "1", 3)) { + dev_err(dev, "Error PTYPE == %d, but no INT34D3 device\n", + EXPECTED_PTYPE); + return -ENODEV; + } + + /* + * We expect the WC PMIC to be paired with a TI bq24292i charger-IC. + * We check for the bq24292i vbus regulator here, this has 2 purposes: + * 1) The bq24292i allows charging with up to 12V, setting the fusb302's + * max-snk voltage to 12V with another charger-IC is not good. + * 2) For the fusb302 driver to get the bq24292i vbus regulator, the + * regulator-map, which is part of the bq24292i regulator_init_data, + * must be registered before the fusb302 is instantiated, otherwise + * it will end up with a dummy-regulator. + * Note "cht_wc_usb_typec_vbus" comes from the regulator_init_data + * which is defined in i2c-cht-wc.c from where the bq24292i i2c-client + * gets instantiated. We use regulator_get_optional here so that we + * don't end up getting a dummy-regulator ourselves. + */ + regulator = regulator_get_optional(dev, "cht_wc_usb_typec_vbus"); + if (IS_ERR(regulator)) { + ret = PTR_ERR(regulator); + return (ret == -ENODEV) ? -EPROBE_DEFER : ret; + } + regulator_put(regulator); + /* The FUSB302 uses the irq at index 1 and is the only irq user */ fusb302_irq = acpi_dev_gpio_irq_get(ACPI_COMPANION(dev), 1); if (fusb302_irq < 0) { @@ -75,16 +151,31 @@ static int cht_int33fe_probe(struct i2c_client *client) if (!data) return -ENOMEM; - memset(&board_info, 0, sizeof(board_info)); - strlcpy(board_info.type, "max17047", I2C_NAME_SIZE); - board_info.properties = max17047_props; - - data->max17047 = i2c_acpi_new_device(dev, 1, &board_info); - if (!data->max17047) - return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */ + /* Work around BIOS bug, see comment on cht_int33fe_find_max17047 */ + max17047 = cht_int33fe_find_max17047(); + if (max17047) { + /* Pre-existing i2c-client for the max17047, add device-props */ + ret = device_add_properties(&max17047->dev, max17047_props); + if (ret) + return ret; + /* And re-probe to get the new device-props applied. */ + ret = device_reprobe(&max17047->dev); + if (ret) + dev_warn(dev, "Reprobing max17047 error: %d\n", ret); + } else { + memset(&board_info, 0, sizeof(board_info)); + strlcpy(board_info.type, "max17047", I2C_NAME_SIZE); + board_info.dev_name = "max17047"; + board_info.properties = max17047_props; + data->max17047 = i2c_acpi_new_device(dev, 1, &board_info); + if (!data->max17047) + return -EPROBE_DEFER; /* Wait for i2c-adapter to load */ + } memset(&board_info, 0, sizeof(board_info)); - strlcpy(board_info.type, "fusb302", I2C_NAME_SIZE); + strlcpy(board_info.type, "typec_fusb302", I2C_NAME_SIZE); + board_info.dev_name = "fusb302"; + board_info.properties = fusb302_props; board_info.irq = fusb302_irq; data->fusb302 = i2c_acpi_new_device(dev, 2, &board_info); @@ -92,6 +183,7 @@ static int cht_int33fe_probe(struct i2c_client *client) goto out_unregister_max17047; memset(&board_info, 0, sizeof(board_info)); + board_info.dev_name = "pi3usb30532"; strlcpy(board_info.type, "pi3usb30532", I2C_NAME_SIZE); data->pi3usb30532 = i2c_acpi_new_device(dev, 3, &board_info); @@ -106,7 +198,8 @@ out_unregister_fusb302: i2c_unregister_device(data->fusb302); out_unregister_max17047: - i2c_unregister_device(data->max17047); + if (data->max17047) + i2c_unregister_device(data->max17047); return -EPROBE_DEFER; /* Wait for the i2c-adapter to load */ } @@ -117,7 +210,8 @@ static int cht_int33fe_remove(struct i2c_client *i2c) i2c_unregister_device(data->pi3usb30532); i2c_unregister_device(data->fusb302); - i2c_unregister_device(data->max17047); + if (data->max17047) + i2c_unregister_device(data->max17047); return 0; } diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index 58dcee562d64..a0c95853fd3f 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -10,10 +10,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * @@ -259,8 +255,6 @@ static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */ /* Per-SKU limits */ struct ips_mcp_limits { - int cpu_family; - int cpu_model; /* includes extended model... */ int mcp_power_limit; /* mW units */ int core_power_limit; int mch_power_limit; @@ -295,11 +289,14 @@ static struct ips_mcp_limits ips_ulv_limits = { }; struct ips_driver { - struct pci_dev *dev; - void *regmap; + struct device *dev; + void __iomem *regmap; + int irq; + struct task_struct *monitor; struct task_struct *adjust; struct dentry *debug_root; + struct timer_list timer; /* Average CPU core temps (all averages in .01 degrees C for precision) */ u16 ctv1_avg_temp; @@ -594,7 +591,7 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips) return; if (!ips->gpu_turbo_disable()) - dev_err(&ips->dev->dev, "failed to disable graphics turbo\n"); + dev_err(ips->dev, "failed to disable graphics turbo\n"); else ips->__gpu_turbo_on = false; } @@ -649,8 +646,7 @@ static bool cpu_exceeded(struct ips_driver *ips, int cpu) spin_unlock_irqrestore(&ips->turbo_status_lock, flags); if (ret) - dev_info(&ips->dev->dev, - "CPU power or thermal limit exceeded\n"); + dev_info(ips->dev, "CPU power or thermal limit exceeded\n"); return ret; } @@ -769,7 +765,7 @@ static int ips_adjust(void *data) struct ips_driver *ips = data; unsigned long flags; - dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n"); + dev_dbg(ips->dev, "starting ips-adjust thread\n"); /* * Adjust CPU and GPU clamps every 5s if needed. Doing it more @@ -816,7 +812,7 @@ sleep: schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD)); } while (!kthread_should_stop()); - dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n"); + dev_dbg(ips->dev, "ips-adjust thread stopped\n"); return 0; } @@ -942,9 +938,10 @@ static u32 calc_avg_power(struct ips_driver *ips, u32 *array) return avg; } -static void monitor_timeout(unsigned long arg) +static void monitor_timeout(struct timer_list *t) { - wake_up_process((struct task_struct *)arg); + struct ips_driver *ips = from_timer(ips, t, timer); + wake_up_process(ips->monitor); } /** @@ -961,7 +958,6 @@ static void monitor_timeout(unsigned long arg) static int ips_monitor(void *data) { struct ips_driver *ips = data; - struct timer_list timer; unsigned long seqno_timestamp, expire, last_msecs, last_sample_period; int i; u32 *cpu_samples, *mchp_samples, old_cpu_power; @@ -976,7 +972,7 @@ static int ips_monitor(void *data) mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL); if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples || !cpu_samples || !mchp_samples) { - dev_err(&ips->dev->dev, + dev_err(ips->dev, "failed to allocate sample array, ips disabled\n"); kfree(mcp_samples); kfree(ctv1_samples); @@ -1049,8 +1045,7 @@ static int ips_monitor(void *data) schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); last_sample_period = IPS_SAMPLE_PERIOD; - setup_deferrable_timer_on_stack(&timer, monitor_timeout, - (unsigned long)current); + timer_setup(&ips->timer, monitor_timeout, TIMER_DEFERRABLE); do { u32 cpu_val, mch_val; u16 val; @@ -1097,7 +1092,8 @@ static int ips_monitor(void *data) ITV_ME_SEQNO_SHIFT; if (cur_seqno == last_seqno && time_after(jiffies, seqno_timestamp + HZ)) { - dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n"); + dev_warn(ips->dev, + "ME failed to update for more than 1s, likely hung\n"); } else { seqno_timestamp = get_jiffies_64(); last_seqno = cur_seqno; @@ -1107,7 +1103,7 @@ static int ips_monitor(void *data) expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); __set_current_state(TASK_INTERRUPTIBLE); - mod_timer(&timer, expire); + mod_timer(&ips->timer, expire); schedule(); /* Calculate actual sample period for power averaging */ @@ -1116,10 +1112,9 @@ static int ips_monitor(void *data) last_sample_period = 1; } while (!kthread_should_stop()); - del_timer_sync(&timer); - destroy_timer_on_stack(&timer); + del_timer_sync(&ips->timer); - dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n"); + dev_dbg(ips->dev, "ips-monitor thread stopped\n"); return 0; } @@ -1128,17 +1123,17 @@ static int ips_monitor(void *data) #define THM_DUMPW(reg) \ { \ u16 val = thm_readw(reg); \ - dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \ + dev_dbg(ips->dev, #reg ": 0x%04x\n", val); \ } #define THM_DUMPL(reg) \ { \ u32 val = thm_readl(reg); \ - dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \ + dev_dbg(ips->dev, #reg ": 0x%08x\n", val); \ } #define THM_DUMPQ(reg) \ { \ u64 val = thm_readq(reg); \ - dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \ + dev_dbg(ips->dev, #reg ": 0x%016x\n", val); \ } static void dump_thermal_info(struct ips_driver *ips) @@ -1146,7 +1141,7 @@ static void dump_thermal_info(struct ips_driver *ips) u16 ptl; ptl = thm_readw(THM_PTL); - dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl); + dev_dbg(ips->dev, "Processor temp limit: %d\n", ptl); THM_DUMPW(THM_CTA); THM_DUMPW(THM_TRC); @@ -1175,8 +1170,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg) if (!tses && !tes) return IRQ_NONE; - dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses); - dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes); + dev_info(ips->dev, "TSES: 0x%02x\n", tses); + dev_info(ips->dev, "TES: 0x%02x\n", tes); /* STS update from EC? */ if (tes & 1) { @@ -1214,8 +1209,8 @@ static irqreturn_t ips_irq_handler(int irq, void *arg) /* Thermal trip */ if (tses) { - dev_warn(&ips->dev->dev, - "thermal trip occurred, tses: 0x%04x\n", tses); + dev_warn(ips->dev, "thermal trip occurred, tses: 0x%04x\n", + tses); thm_writeb(THM_TSES, tses); } @@ -1330,8 +1325,7 @@ static void ips_debugfs_init(struct ips_driver *ips) ips->debug_root = debugfs_create_dir("ips", NULL); if (!ips->debug_root) { - dev_err(&ips->dev->dev, - "failed to create debugfs entries: %ld\n", + dev_err(ips->dev, "failed to create debugfs entries: %ld\n", PTR_ERR(ips->debug_root)); return; } @@ -1345,8 +1339,7 @@ static void ips_debugfs_init(struct ips_driver *ips) ips->debug_root, node, &ips_debugfs_ops); if (!ent) { - dev_err(&ips->dev->dev, - "failed to create debug file: %ld\n", + dev_err(ips->dev, "failed to create debug file: %ld\n", PTR_ERR(ent)); goto err_cleanup; } @@ -1373,8 +1366,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) u16 tdp; if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) { - dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n"); - goto out; + dev_info(ips->dev, "Non-IPS CPU detected.\n"); + return NULL; } rdmsrl(IA32_MISC_ENABLE, misc_en); @@ -1395,8 +1388,8 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) limits = &ips_ulv_limits; else { - dev_info(&ips->dev->dev, "No CPUID match found.\n"); - goto out; + dev_info(ips->dev, "No CPUID match found.\n"); + return NULL; } rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); @@ -1404,12 +1397,12 @@ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) /* Sanity check TDP against CPU */ if (limits->core_power_limit != (tdp / 8) * 1000) { - dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n", + dev_info(ips->dev, + "CPU TDP doesn't match expected value (found %d, expected %d)\n", tdp / 8, limits->core_power_limit / 1000); limits->core_power_limit = (tdp / 8) * 1000; } -out: return limits; } @@ -1459,7 +1452,7 @@ ips_gpu_turbo_enabled(struct ips_driver *ips) { if (!ips->gpu_busy && late_i915_load) { if (ips_get_i915_syms(ips)) { - dev_info(&ips->dev->dev, + dev_info(ips->dev, "i915 driver attached, reenabling gpu turbo\n"); ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS); } @@ -1480,8 +1473,7 @@ ips_link_to_i915_driver(void) EXPORT_SYMBOL_GPL(ips_link_to_i915_driver); static const struct pci_device_id ips_id_table[] = { - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), }, { 0, } }; @@ -1517,62 +1509,45 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) if (dmi_check_system(ips_blacklist)) return -ENODEV; - ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL); + ips = devm_kzalloc(&dev->dev, sizeof(*ips), GFP_KERNEL); if (!ips) return -ENOMEM; - pci_set_drvdata(dev, ips); - ips->dev = dev; + spin_lock_init(&ips->turbo_status_lock); + ips->dev = &dev->dev; ips->limits = ips_detect_cpu(ips); if (!ips->limits) { dev_info(&dev->dev, "IPS not supported on this CPU\n"); - ret = -ENXIO; - goto error_free; + return -ENXIO; } - spin_lock_init(&ips->turbo_status_lock); - - ret = pci_enable_device(dev); + ret = pcim_enable_device(dev); if (ret) { dev_err(&dev->dev, "can't enable PCI device, aborting\n"); - goto error_free; + return ret; } - if (!pci_resource_start(dev, 0)) { - dev_err(&dev->dev, "TBAR not assigned, aborting\n"); - ret = -ENXIO; - goto error_free; - } - - ret = pci_request_regions(dev, "ips thermal sensor"); + ret = pcim_iomap_regions(dev, 1 << 0, pci_name(dev)); if (ret) { - dev_err(&dev->dev, "thermal resource busy, aborting\n"); - goto error_free; - } - - - ips->regmap = ioremap(pci_resource_start(dev, 0), - pci_resource_len(dev, 0)); - if (!ips->regmap) { dev_err(&dev->dev, "failed to map thermal regs, aborting\n"); - ret = -EBUSY; - goto error_release; + return ret; } + ips->regmap = pcim_iomap_table(dev)[0]; + + pci_set_drvdata(dev, ips); tse = thm_readb(THM_TSE); if (tse != TSE_EN) { dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse); - ret = -ENXIO; - goto error_unmap; + return -ENXIO; } trc = thm_readw(THM_TRC); trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN; if ((trc & trc_required_mask) != trc_required_mask) { dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n"); - ret = -ENXIO; - goto error_unmap; + return -ENXIO; } if (trc & TRC_CORE2_EN) @@ -1602,20 +1577,23 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) rdmsrl(PLATFORM_INFO, platform_info); if (!(platform_info & PLATFORM_TDP)) { dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n"); - ret = -ENODEV; - goto error_unmap; + return -ENODEV; } /* * IRQ handler for ME interaction * Note: don't use MSI here as the PCH has bugs. */ - pci_disable_msi(dev); - ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips", - ips); + ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); + if (ret < 0) + return ret; + + ips->irq = pci_irq_vector(dev, 0); + + ret = request_irq(ips->irq, ips_irq_handler, IRQF_SHARED, "ips", ips); if (ret) { dev_err(&dev->dev, "request irq failed, aborting\n"); - goto error_unmap; + return ret; } /* Enable aux, hot & critical interrupts */ @@ -1672,13 +1650,8 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) error_thread_cleanup: kthread_stop(ips->adjust); error_free_irq: - free_irq(ips->dev->irq, ips); -error_unmap: - iounmap(ips->regmap); -error_release: - pci_release_regions(dev); -error_free: - kfree(ips); + free_irq(ips->irq, ips); + pci_free_irq_vectors(dev); return ret; } @@ -1709,27 +1682,20 @@ static void ips_remove(struct pci_dev *dev) wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); - free_irq(ips->dev->irq, ips); + free_irq(ips->irq, ips); + pci_free_irq_vectors(dev); if (ips->adjust) kthread_stop(ips->adjust); if (ips->monitor) kthread_stop(ips->monitor); - iounmap(ips->regmap); - pci_release_regions(dev); - kfree(ips); dev_dbg(&dev->dev, "IPS driver removed\n"); } -static void ips_shutdown(struct pci_dev *dev) -{ -} - static struct pci_driver ips_pci_driver = { .name = "intel ips", .id_table = ips_id_table, .probe = ips_probe, .remove = ips_remove, - .shutdown = ips_shutdown, }; module_pci_driver(ips_pci_driver); diff --git a/drivers/platform/x86/intel_ips.h b/drivers/platform/x86/intel_ips.h index 73299beff5b3..60f4e3ddbe9f 100644 --- a/drivers/platform/x86/intel_ips.h +++ b/drivers/platform/x86/intel_ips.h @@ -10,10 +10,6 @@ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * * The full GNU General Public License is included in this distribution in * the file called "COPYING". */ diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c index a47a41fc10ad..b5b890127479 100644 --- a/drivers/platform/x86/intel_punit_ipc.c +++ b/drivers/platform/x86/intel_punit_ipc.c @@ -252,28 +252,28 @@ static int intel_punit_get_bars(struct platform_device *pdev) * - GTDRIVER_IPC BASE_IFACE */ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 3); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[ISPDRIVER_IPC][BASE_IFACE] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 4); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_DATA] = addr; } res = platform_get_resource(pdev, IORESOURCE_MEM, 5); - if (res) { + if (res && resource_size(res) > 1) { addr = devm_ioremap_resource(&pdev->dev, res); if (!IS_ERR(addr)) punit_ipcdev->base[GTDRIVER_IPC][BASE_IFACE] = addr; diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c index 0d4c3808a6d8..f378621b5fe9 100644 --- a/drivers/platform/x86/intel_telemetry_core.c +++ b/drivers/platform/x86/intel_telemetry_core.c @@ -15,9 +15,8 @@ * Telemetry Framework provides platform related PM and performance statistics. * This file provides the core telemetry API implementation. */ -#include <linux/module.h> -#include <linux/init.h> #include <linux/device.h> +#include <linux/module.h> #include <asm/intel_telemetry.h> diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index d4fc42b4cbeb..4249e8267bbc 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -21,14 +21,12 @@ * /sys/kernel/debug/telemetry/ioss_race_verbosity: Write and Change Tracing * Verbosity via firmware */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/device.h> #include <linux/debugfs.h> -#include <linux/seq_file.h> +#include <linux/device.h> #include <linux/io.h> -#include <linux/uaccess.h> +#include <linux/module.h> #include <linux/pci.h> +#include <linux/seq_file.h> #include <linux/suspend.h> #include <asm/cpu_device_id.h> @@ -76,8 +74,6 @@ #define TELEM_IOSS_DX_D0IX_EVTS 25 #define TELEM_IOSS_PG_EVTS 30 -#define TELEM_EVT_LEN(x) (sizeof(x)/sizeof((x)[0])) - #define TELEM_DEBUGFS_CPU(model, data) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&data} @@ -304,13 +300,13 @@ static struct telemetry_debugfs_conf telem_apl_debugfs_conf = { .ioss_d0ix_data = telem_apl_ioss_d0ix_data, .ioss_pg_data = telem_apl_ioss_pg_data, - .pss_idle_evts = TELEM_EVT_LEN(telem_apl_pss_idle_data), - .pcs_idle_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_idle_blkd_data), - .pcs_s0ix_blkd_evts = TELEM_EVT_LEN(telem_apl_pcs_s0ix_blkd_data), - .pss_ltr_evts = TELEM_EVT_LEN(telem_apl_pss_ltr_data), - .pss_wakeup_evts = TELEM_EVT_LEN(telem_apl_pss_wakeup), - .ioss_d0ix_evts = TELEM_EVT_LEN(telem_apl_ioss_d0ix_data), - .ioss_pg_evts = TELEM_EVT_LEN(telem_apl_ioss_pg_data), + .pss_idle_evts = ARRAY_SIZE(telem_apl_pss_idle_data), + .pcs_idle_blkd_evts = ARRAY_SIZE(telem_apl_pcs_idle_blkd_data), + .pcs_s0ix_blkd_evts = ARRAY_SIZE(telem_apl_pcs_s0ix_blkd_data), + .pss_ltr_evts = ARRAY_SIZE(telem_apl_pss_ltr_data), + .pss_wakeup_evts = ARRAY_SIZE(telem_apl_pss_wakeup), + .ioss_d0ix_evts = ARRAY_SIZE(telem_apl_ioss_d0ix_data), + .ioss_pg_evts = ARRAY_SIZE(telem_apl_ioss_pg_data), .pstates_id = TELEM_APL_PSS_PSTATES_ID, .pss_idle_id = TELEM_APL_PSS_IDLE_ID, diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index e0424d5a795a..2f889d6c270e 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -16,15 +16,9 @@ * It used the PUNIT and PMC IPC interfaces for configuring the counters. * The accumulated results are fetched from SRAM. */ -#include <linux/module.h> -#include <linux/init.h> -#include <linux/device.h> -#include <linux/debugfs.h> -#include <linux/seq_file.h> + #include <linux/io.h> -#include <linux/uaccess.h> -#include <linux/pci.h> -#include <linux/suspend.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <asm/cpu_device_id.h> @@ -256,7 +250,7 @@ static int telemetry_check_evtid(enum telemetry_unit telem_unit, break; default: - pr_err("Unknown Telemetry action Specified %d\n", action); + pr_err("Unknown Telemetry action specified %d\n", action); return -EINVAL; } @@ -659,7 +653,7 @@ static int telemetry_setup(struct platform_device *pdev) ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, TELEM_RESET); if (ret) { - dev_err(&pdev->dev, "TELEMTRY Setup Failed\n"); + dev_err(&pdev->dev, "TELEMETRY Setup Failed\n"); return ret; } return 0; @@ -685,7 +679,7 @@ static int telemetry_plt_update_events(struct telemetry_evtconfig pss_evtconfig, ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, TELEM_UPDATE); if (ret) - pr_err("TELEMTRY Config Failed\n"); + pr_err("TELEMETRY Config Failed\n"); return ret; } @@ -822,7 +816,7 @@ static int telemetry_plt_reset_events(void) ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, TELEM_RESET); if (ret) - pr_err("TELEMTRY Reset Failed\n"); + pr_err("TELEMETRY Reset Failed\n"); return ret; } @@ -885,7 +879,7 @@ static int telemetry_plt_add_events(u8 num_pss_evts, u8 num_ioss_evts, ret = telemetry_setup_evtconfig(pss_evtconfig, ioss_evtconfig, TELEM_ADD); if (ret) - pr_err("TELEMTRY ADD Failed\n"); + pr_err("TELEMETRY ADD Failed\n"); return ret; } @@ -1195,7 +1189,7 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) ret = telemetry_set_pltdata(&telm_pltops, telm_conf); if (ret) { - dev_err(&pdev->dev, "TELEMTRY Set Pltops Failed.\n"); + dev_err(&pdev->dev, "TELEMETRY Set Pltops Failed.\n"); goto out; } @@ -1210,7 +1204,7 @@ out: iounmap(telm_conf->pss_config.regmap); if (telm_conf->ioss_config.regmap) iounmap(telm_conf->ioss_config.regmap); - dev_err(&pdev->dev, "TELEMTRY Setup Failed.\n"); + dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n"); return ret; } @@ -1234,7 +1228,6 @@ static struct platform_driver telemetry_soc_driver = { static int __init telemetry_module_init(void) { - pr_info(DRIVER_NAME ": version %s loaded\n", DRIVER_VERSION); return platform_driver_register(&telemetry_soc_driver); } diff --git a/drivers/platform/x86/intel_turbo_max_3.c b/drivers/platform/x86/intel_turbo_max_3.c index 4f60d8e32a0a..d4ea01805879 100644 --- a/drivers/platform/x86/intel_turbo_max_3.c +++ b/drivers/platform/x86/intel_turbo_max_3.c @@ -125,6 +125,7 @@ static int itmt_legacy_cpu_online(unsigned int cpu) static const struct x86_cpu_id itmt_legacy_cpu_ids[] = { ICPU(INTEL_FAM6_BROADWELL_X), + ICPU(INTEL_FAM6_SKYLAKE_X), {} }; diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 4f3de2a8c4df..504256c3660d 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -216,8 +216,8 @@ static struct resource mlxplat_mlxcpld_resources[] = { [0] = DEFINE_RES_IRQ_NAMED(17, "mlxcpld-hotplug"), }; -struct platform_device *mlxplat_dev; -struct mlxcpld_hotplug_platform_data *mlxplat_hotplug; +static struct platform_device *mlxplat_dev; +static struct mlxcpld_hotplug_platform_data *mlxplat_hotplug; static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) { diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index bc98ef95514a..9b9e1f39bbfb 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -8,6 +8,7 @@ */ #include <linux/acpi.h> +#include <linux/dmi.h> #include <linux/input-polldev.h> #include <linux/kernel.h> #include <linux/module.h> @@ -64,8 +65,23 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) } } +/* Some other devices (Shuttle XS35) use the same WMI GUID for other purposes */ +static const struct dmi_system_id peaq_dmi_table[] __initconst = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"), + DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), + }, + }, + {} +}; + static int __init peaq_wmi_init(void) { + /* WMI GUID is not unique, also check for a DMI match */ + if (!dmi_check_system(peaq_dmi_table)) + return -ENODEV; + if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) return -ENODEV; @@ -86,9 +102,6 @@ static int __init peaq_wmi_init(void) static void __exit peaq_wmi_exit(void) { - if (!wmi_has_guid(PEAQ_DOLBY_BUTTON_GUID)) - return; - input_unregister_polled_device(peaq_poll_dev); } diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c index 1157a7b646d6..266535c2a72f 100644 --- a/drivers/platform/x86/silead_dmi.c +++ b/drivers/platform/x86/silead_dmi.c @@ -58,6 +58,7 @@ static const struct property_entry dexp_ursus_7w_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 630), PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-dexp-ursus-7w.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -72,6 +73,7 @@ static const struct property_entry surftab_wintron70_st70416_6_props[] = { PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -83,6 +85,8 @@ static const struct silead_ts_dmi_data surftab_wintron70_st70416_6_data = { static const struct property_entry gp_electronic_t701_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 960), PROPERTY_ENTRY_U32("touchscreen-size-y", 640), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"), { } @@ -114,6 +118,7 @@ static const struct property_entry pov_mobii_wintab_p800w_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), + PROPERTY_ENTRY_BOOL("silead,home-button"), { } }; @@ -136,6 +141,36 @@ static const struct silead_ts_dmi_data itworks_tw891_data = { .properties = itworks_tw891_props, }; +static const struct property_entry chuwi_hi8_pro_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-chuwi-hi8-pro.fw"), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct silead_ts_dmi_data chuwi_hi8_pro_data = { + .acpi_name = "MSSL1680:00", + .properties = chuwi_hi8_pro_props, +}; + +static const struct property_entry digma_citi_e200_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", + "gsl1686-digma_citi_e200.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct silead_ts_dmi_data digma_citi_e200_data = { + .acpi_name = "MSSL1680:00", + .properties = digma_citi_e200_props, +}; + static const struct dmi_system_id silead_ts_dmi_table[] = { { /* CUBE iwork8 Air */ @@ -219,6 +254,23 @@ static const struct dmi_system_id silead_ts_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "TW891"), }, }, + { + /* Chuwi Hi8 Pro */ + .driver_data = (void *)&chuwi_hi8_pro_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Hampoo"), + DMI_MATCH(DMI_PRODUCT_NAME, "X1D3_C806N"), + }, + }, + { + /* Digma Citi E200 */ + .driver_data = (void *)&digma_citi_e200_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Digma"), + DMI_MATCH(DMI_PRODUCT_NAME, "CITI E200"), + DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), + }, + }, { }, }; diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index a16cea2be9c3..62aa2c37b8d2 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1627,7 +1627,7 @@ static const struct rfkill_ops sony_rfkill_ops = { static int sony_nc_setup_rfkill(struct acpi_device *device, enum sony_nc_rfkill nc_type) { - int err = 0; + int err; struct rfkill *rfk; enum rfkill_type type; const char *name; @@ -1660,17 +1660,19 @@ static int sony_nc_setup_rfkill(struct acpi_device *device, if (!rfk) return -ENOMEM; - if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) { + err = sony_call_snc_handle(sony_rfkill_handle, 0x200, &result); + if (err < 0) { rfkill_destroy(rfk); - return -1; + return err; } hwblock = !(result & 0x1); - if (sony_call_snc_handle(sony_rfkill_handle, - sony_rfkill_address[nc_type], - &result) < 0) { + err = sony_call_snc_handle(sony_rfkill_handle, + sony_rfkill_address[nc_type], + &result); + if (err < 0) { rfkill_destroy(rfk); - return -1; + return err; } swblock = !(result & 0x2); diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 3887dfeafc96..117be48ff4de 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -310,8 +310,7 @@ static struct { enum { TP_HOTKEY_TABLET_NONE = 0, TP_HOTKEY_TABLET_USES_MHKG, - /* X1 Yoga 2016, seen on BIOS N1FET44W */ - TP_HOTKEY_TABLET_USES_CMMD, + TP_HOTKEY_TABLET_USES_GMMS, } hotkey_tablet; u32 kbdlight:1; u32 light:1; @@ -2044,8 +2043,28 @@ static void hotkey_poll_setup(const bool may_warn); /* HKEY.MHKG() return bits */ #define TP_HOTKEY_TABLET_MASK (1 << 3) -/* ThinkPad X1 Yoga (2016) */ -#define TP_EC_CMMD_TABLET_MODE 0x6 +enum { + TP_ACPI_MULTI_MODE_INVALID = 0, + TP_ACPI_MULTI_MODE_UNKNOWN = 1 << 0, + TP_ACPI_MULTI_MODE_LAPTOP = 1 << 1, + TP_ACPI_MULTI_MODE_TABLET = 1 << 2, + TP_ACPI_MULTI_MODE_FLAT = 1 << 3, + TP_ACPI_MULTI_MODE_STAND = 1 << 4, + TP_ACPI_MULTI_MODE_TENT = 1 << 5, + TP_ACPI_MULTI_MODE_STAND_TENT = 1 << 6, +}; + +enum { + /* The following modes are considered tablet mode for the purpose of + * reporting the status to userspace. i.e. in all these modes it makes + * sense to disable the laptop input devices such as touchpad and + * keyboard. + */ + TP_ACPI_MULTI_MODE_TABLET_LIKE = TP_ACPI_MULTI_MODE_TABLET | + TP_ACPI_MULTI_MODE_STAND | + TP_ACPI_MULTI_MODE_TENT | + TP_ACPI_MULTI_MODE_STAND_TENT, +}; static int hotkey_get_wlsw(void) { @@ -2066,6 +2085,90 @@ static int hotkey_get_wlsw(void) return (status) ? TPACPI_RFK_RADIO_ON : TPACPI_RFK_RADIO_OFF; } +static int hotkey_gmms_get_tablet_mode(int s, int *has_tablet_mode) +{ + int type = (s >> 16) & 0xffff; + int value = s & 0xffff; + int mode = TP_ACPI_MULTI_MODE_INVALID; + int valid_modes = 0; + + if (has_tablet_mode) + *has_tablet_mode = 0; + + switch (type) { + case 1: + valid_modes = TP_ACPI_MULTI_MODE_LAPTOP | + TP_ACPI_MULTI_MODE_TABLET | + TP_ACPI_MULTI_MODE_STAND_TENT; + break; + case 2: + valid_modes = TP_ACPI_MULTI_MODE_LAPTOP | + TP_ACPI_MULTI_MODE_FLAT | + TP_ACPI_MULTI_MODE_TABLET | + TP_ACPI_MULTI_MODE_STAND | + TP_ACPI_MULTI_MODE_TENT; + break; + case 3: + valid_modes = TP_ACPI_MULTI_MODE_LAPTOP | + TP_ACPI_MULTI_MODE_FLAT; + break; + case 4: + valid_modes = TP_ACPI_MULTI_MODE_LAPTOP | + TP_ACPI_MULTI_MODE_TABLET | + TP_ACPI_MULTI_MODE_STAND | + TP_ACPI_MULTI_MODE_TENT; + break; + case 5: + valid_modes = TP_ACPI_MULTI_MODE_LAPTOP | + TP_ACPI_MULTI_MODE_FLAT | + TP_ACPI_MULTI_MODE_TABLET | + TP_ACPI_MULTI_MODE_STAND | + TP_ACPI_MULTI_MODE_TENT; + break; + default: + pr_err("Unknown multi mode status type %d with value 0x%04X, please report this to %s\n", + type, value, TPACPI_MAIL); + return 0; + } + + if (has_tablet_mode && (valid_modes & TP_ACPI_MULTI_MODE_TABLET_LIKE)) + *has_tablet_mode = 1; + + switch (value) { + case 1: + mode = TP_ACPI_MULTI_MODE_LAPTOP; + break; + case 2: + mode = TP_ACPI_MULTI_MODE_FLAT; + break; + case 3: + mode = TP_ACPI_MULTI_MODE_TABLET; + break; + case 4: + if (type == 1) + mode = TP_ACPI_MULTI_MODE_STAND_TENT; + else + mode = TP_ACPI_MULTI_MODE_STAND; + break; + case 5: + mode = TP_ACPI_MULTI_MODE_TENT; + break; + default: + if (type == 5 && value == 0xffff) { + pr_warn("Multi mode status is undetected, assuming laptop\n"); + return 0; + } + } + + if (!(mode & valid_modes)) { + pr_err("Unknown/reserved multi mode value 0x%04X for type %d, please report this to %s\n", + value, type, TPACPI_MAIL); + return 0; + } + + return !!(mode & TP_ACPI_MULTI_MODE_TABLET_LIKE); +} + static int hotkey_get_tablet_mode(int *status) { int s; @@ -2077,11 +2180,11 @@ static int hotkey_get_tablet_mode(int *status) *status = ((s & TP_HOTKEY_TABLET_MASK) != 0); break; - case TP_HOTKEY_TABLET_USES_CMMD: - if (!acpi_evalf(ec_handle, &s, "CMMD", "d")) + case TP_HOTKEY_TABLET_USES_GMMS: + if (!acpi_evalf(hkey_handle, &s, "GMMS", "dd", 0)) return -EIO; - *status = (s == TP_EC_CMMD_TABLET_MODE); + *status = hotkey_gmms_get_tablet_mode(s, NULL); break; default: break; @@ -3113,16 +3216,19 @@ static int hotkey_init_tablet_mode(void) int in_tablet_mode = 0, res; char *type = NULL; - if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) { + if (acpi_evalf(hkey_handle, &res, "GMMS", "qdd", 0)) { + int has_tablet_mode; + + in_tablet_mode = hotkey_gmms_get_tablet_mode(res, + &has_tablet_mode); + if (has_tablet_mode) + tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_GMMS; + type = "GMMS"; + } else if (acpi_evalf(hkey_handle, &res, "MHKG", "qd")) { /* For X41t, X60t, X61t Tablets... */ tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_MHKG; in_tablet_mode = !!(res & TP_HOTKEY_TABLET_MASK); type = "MHKG"; - } else if (acpi_evalf(ec_handle, &res, "CMMD", "qd")) { - /* For X1 Yoga (2016) */ - tp_features.hotkey_tablet = TP_HOTKEY_TABLET_USES_CMMD; - in_tablet_mode = res == TP_EC_CMMD_TABLET_MODE; - type = "CMMD"; } if (!tp_features.hotkey_tablet) diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 0765b1797d4c..791449a2370f 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c @@ -33,17 +33,20 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/types.h> +#include <linux/acpi.h> #include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> #include <linux/list.h> -#include <linux/acpi.h> -#include <linux/slab.h> +#include <linux/miscdevice.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/wmi.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <linux/uaccess.h> #include <linux/uuid.h> +#include <linux/wmi.h> +#include <uapi/linux/wmi.h> ACPI_MODULE_NAME("wmi"); MODULE_AUTHOR("Carlos Corbacho"); @@ -69,9 +72,12 @@ struct wmi_block { struct wmi_device dev; struct list_head list; struct guid_block gblock; + struct miscdevice char_dev; + struct mutex char_mutex; struct acpi_device *acpi_device; wmi_notify_handler handler; void *handler_data; + u64 req_buf_size; bool read_takes_no_args; }; @@ -188,6 +194,25 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable) /* * Exported WMI functions */ + +/** + * set_required_buffer_size - Sets the buffer size needed for performing IOCTL + * @wdev: A wmi bus device from a driver + * @instance: Instance index + * + * Allocates memory needed for buffer, stores the buffer size in that memory + */ +int set_required_buffer_size(struct wmi_device *wdev, u64 length) +{ + struct wmi_block *wblock; + + wblock = container_of(wdev, struct wmi_block, dev); + wblock->req_buf_size = length; + + return 0; +} +EXPORT_SYMBOL_GPL(set_required_buffer_size); + /** * wmi_evaluate_method - Evaluate a WMI method * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba @@ -201,6 +226,28 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable) acpi_status wmi_evaluate_method(const char *guid_string, u8 instance, u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) { + struct wmi_block *wblock = NULL; + + if (!find_guid(guid_string, &wblock)) + return AE_ERROR; + return wmidev_evaluate_method(&wblock->dev, instance, method_id, + in, out); +} +EXPORT_SYMBOL_GPL(wmi_evaluate_method); + +/** + * wmidev_evaluate_method - Evaluate a WMI method + * @wdev: A wmi bus device from a driver + * @instance: Instance index + * @method_id: Method ID to call + * &in: Buffer containing input for the method call + * &out: Empty buffer to return the method results + * + * Call an ACPI-WMI method + */ +acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, + u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) +{ struct guid_block *block = NULL; struct wmi_block *wblock = NULL; acpi_handle handle; @@ -209,9 +256,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) union acpi_object params[3]; char method[5] = "WM"; - if (!find_guid(guid_string, &wblock)) - return AE_ERROR; - + wblock = container_of(wdev, struct wmi_block, dev); block = &wblock->gblock; handle = wblock->acpi_device->handle; @@ -246,7 +291,7 @@ u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out) return status; } -EXPORT_SYMBOL_GPL(wmi_evaluate_method); +EXPORT_SYMBOL_GPL(wmidev_evaluate_method); static acpi_status __query_block(struct wmi_block *wblock, u8 instance, struct acpi_buffer *out) @@ -348,23 +393,6 @@ union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance) } EXPORT_SYMBOL_GPL(wmidev_block_query); -struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev, - const char *guid_string) -{ - struct wmi_block *this_wb = container_of(wdev, struct wmi_block, dev); - struct wmi_block *other_wb; - - if (!find_guid(guid_string, &other_wb)) - return NULL; - - if (other_wb->acpi_device != this_wb->acpi_device) - return NULL; - - get_device(&other_wb->dev.dev); - return &other_wb->dev; -} -EXPORT_SYMBOL_GPL(wmidev_get_other_guid); - /** * wmi_set_block - Write to a WMI block * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba @@ -761,6 +789,113 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver) return 0; } +static int wmi_char_open(struct inode *inode, struct file *filp) +{ + const char *driver_name = filp->f_path.dentry->d_iname; + struct wmi_block *wblock = NULL; + struct wmi_block *next = NULL; + + list_for_each_entry_safe(wblock, next, &wmi_block_list, list) { + if (!wblock->dev.dev.driver) + continue; + if (strcmp(driver_name, wblock->dev.dev.driver->name) == 0) { + filp->private_data = wblock; + break; + } + } + + if (!filp->private_data) + return -ENODEV; + + return nonseekable_open(inode, filp); +} + +static ssize_t wmi_char_read(struct file *filp, char __user *buffer, + size_t length, loff_t *offset) +{ + struct wmi_block *wblock = filp->private_data; + + return simple_read_from_buffer(buffer, length, offset, + &wblock->req_buf_size, + sizeof(wblock->req_buf_size)); +} + +static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct wmi_ioctl_buffer __user *input = + (struct wmi_ioctl_buffer __user *) arg; + struct wmi_block *wblock = filp->private_data; + struct wmi_ioctl_buffer *buf = NULL; + struct wmi_driver *wdriver = NULL; + int ret; + + if (_IOC_TYPE(cmd) != WMI_IOC) + return -ENOTTY; + + /* make sure we're not calling a higher instance than exists*/ + if (_IOC_NR(cmd) >= wblock->gblock.instance_count) + return -EINVAL; + + mutex_lock(&wblock->char_mutex); + buf = wblock->handler_data; + if (get_user(buf->length, &input->length)) { + dev_dbg(&wblock->dev.dev, "Read length from user failed\n"); + ret = -EFAULT; + goto out_ioctl; + } + /* if it's too small, abort */ + if (buf->length < wblock->req_buf_size) { + dev_err(&wblock->dev.dev, + "Buffer %lld too small, need at least %lld\n", + buf->length, wblock->req_buf_size); + ret = -EINVAL; + goto out_ioctl; + } + /* if it's too big, warn, driver will only use what is needed */ + if (buf->length > wblock->req_buf_size) + dev_warn(&wblock->dev.dev, + "Buffer %lld is bigger than required %lld\n", + buf->length, wblock->req_buf_size); + + /* copy the structure from userspace */ + if (copy_from_user(buf, input, wblock->req_buf_size)) { + dev_dbg(&wblock->dev.dev, "Copy %llu from user failed\n", + wblock->req_buf_size); + ret = -EFAULT; + goto out_ioctl; + } + + /* let the driver do any filtering and do the call */ + wdriver = container_of(wblock->dev.dev.driver, + struct wmi_driver, driver); + if (!try_module_get(wdriver->driver.owner)) { + ret = -EBUSY; + goto out_ioctl; + } + ret = wdriver->filter_callback(&wblock->dev, cmd, buf); + module_put(wdriver->driver.owner); + if (ret) + goto out_ioctl; + + /* return the result (only up to our internal buffer size) */ + if (copy_to_user(input, buf, wblock->req_buf_size)) { + dev_dbg(&wblock->dev.dev, "Copy %llu to user failed\n", + wblock->req_buf_size); + ret = -EFAULT; + } + +out_ioctl: + mutex_unlock(&wblock->char_mutex); + return ret; +} + +static const struct file_operations wmi_fops = { + .owner = THIS_MODULE, + .read = wmi_char_read, + .open = wmi_char_open, + .unlocked_ioctl = wmi_ioctl, + .compat_ioctl = wmi_ioctl, +}; static int wmi_dev_probe(struct device *dev) { @@ -768,16 +903,63 @@ static int wmi_dev_probe(struct device *dev) struct wmi_driver *wdriver = container_of(dev->driver, struct wmi_driver, driver); int ret = 0; + int count; + char *buf; if (ACPI_FAILURE(wmi_method_enable(wblock, 1))) dev_warn(dev, "failed to enable device -- probing anyway\n"); if (wdriver->probe) { ret = wdriver->probe(dev_to_wdev(dev)); - if (ret != 0 && ACPI_FAILURE(wmi_method_enable(wblock, 0))) - dev_warn(dev, "failed to disable device\n"); + if (ret != 0) + goto probe_failure; + } + + /* driver wants a character device made */ + if (wdriver->filter_callback) { + /* check that required buffer size declared by driver or MOF */ + if (!wblock->req_buf_size) { + dev_err(&wblock->dev.dev, + "Required buffer size not set\n"); + ret = -EINVAL; + goto probe_failure; + } + + count = get_order(wblock->req_buf_size); + wblock->handler_data = (void *)__get_free_pages(GFP_KERNEL, + count); + if (!wblock->handler_data) { + ret = -ENOMEM; + goto probe_failure; + } + + buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL); + if (!buf) { + ret = -ENOMEM; + goto probe_string_failure; + } + sprintf(buf, "wmi/%s", wdriver->driver.name); + wblock->char_dev.minor = MISC_DYNAMIC_MINOR; + wblock->char_dev.name = buf; + wblock->char_dev.fops = &wmi_fops; + wblock->char_dev.mode = 0444; + ret = misc_register(&wblock->char_dev); + if (ret) { + dev_warn(dev, "failed to register char dev: %d", ret); + ret = -ENOMEM; + goto probe_misc_failure; + } } + return 0; + +probe_misc_failure: + kfree(buf); +probe_string_failure: + kfree(wblock->handler_data); +probe_failure: + if (ACPI_FAILURE(wmi_method_enable(wblock, 0))) + dev_warn(dev, "failed to disable device\n"); return ret; } @@ -788,6 +970,13 @@ static int wmi_dev_remove(struct device *dev) container_of(dev->driver, struct wmi_driver, driver); int ret = 0; + if (wdriver->filter_callback) { + misc_deregister(&wblock->char_dev); + kfree(wblock->char_dev.name); + free_pages((unsigned long)wblock->handler_data, + get_order(wblock->req_buf_size)); + } + if (wdriver->remove) ret = wdriver->remove(dev_to_wdev(dev)); @@ -844,6 +1033,7 @@ static int wmi_create_device(struct device *wmi_bus_dev, if (gblock->flags & ACPI_WMI_METHOD) { wblock->dev.dev.type = &wmi_type_method; + mutex_init(&wblock->char_mutex); goto out_init; } @@ -1145,7 +1335,7 @@ static int acpi_wmi_remove(struct platform_device *device) acpi_remove_address_space_handler(acpi_device->handle, ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler); wmi_free_devices(acpi_device); - device_unregister((struct device *)dev_get_drvdata(&device->dev)); + device_destroy(&wmi_bus_class, MKDEV(0, 0)); return 0; } @@ -1199,7 +1389,7 @@ static int acpi_wmi_probe(struct platform_device *device) return 0; err_remove_busdev: - device_unregister(wmi_bus_dev); + device_destroy(&wmi_bus_class, MKDEV(0, 0)); err_remove_notify_handler: acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY, @@ -1264,8 +1454,8 @@ err_unreg_class: static void __exit acpi_wmi_exit(void) { platform_driver_unregister(&acpi_wmi_driver); - class_unregister(&wmi_bus_class); bus_unregister(&wmi_bus_type); + class_unregister(&wmi_bus_class); } subsys_initcall(acpi_wmi_init); diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c index 75db585a2a94..acd3ce8ecf3f 100644 --- a/drivers/pwm/pwm-atmel-tcb.c +++ b/drivers/pwm/pwm-atmel-tcb.c @@ -37,11 +37,20 @@ struct atmel_tcb_pwm_device { unsigned period; /* PWM period expressed in clk cycles */ }; +struct atmel_tcb_channel { + u32 enabled; + u32 cmr; + u32 ra; + u32 rb; + u32 rc; +}; + struct atmel_tcb_pwm_chip { struct pwm_chip chip; spinlock_t lock; struct atmel_tc *tc; struct atmel_tcb_pwm_device *pwms[NPWM]; + struct atmel_tcb_channel bkup[NPWM / 2]; }; static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip) @@ -175,12 +184,15 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) * Use software trigger to apply the new setting. * If both PWM devices in this group are disabled we stop the clock. */ - if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) + if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) { __raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(group, CCR)); - else + tcbpwmc->bkup[group].enabled = 1; + } else { __raw_writel(ATMEL_TC_SWTRG, regs + ATMEL_TC_REG(group, CCR)); + tcbpwmc->bkup[group].enabled = 0; + } spin_unlock(&tcbpwmc->lock); } @@ -263,6 +275,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) /* Use software trigger to apply the new setting */ __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs + ATMEL_TC_REG(group, CCR)); + tcbpwmc->bkup[group].enabled = 1; spin_unlock(&tcbpwmc->lock); return 0; } @@ -445,10 +458,56 @@ static const struct of_device_id atmel_tcb_pwm_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids); +#ifdef CONFIG_PM_SLEEP +static int atmel_tcb_pwm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev); + void __iomem *base = tcbpwm->tc->regs; + int i; + + for (i = 0; i < (NPWM / 2); i++) { + struct atmel_tcb_channel *chan = &tcbpwm->bkup[i]; + + chan->cmr = readl(base + ATMEL_TC_REG(i, CMR)); + chan->ra = readl(base + ATMEL_TC_REG(i, RA)); + chan->rb = readl(base + ATMEL_TC_REG(i, RB)); + chan->rc = readl(base + ATMEL_TC_REG(i, RC)); + } + return 0; +} + +static int atmel_tcb_pwm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev); + void __iomem *base = tcbpwm->tc->regs; + int i; + + for (i = 0; i < (NPWM / 2); i++) { + struct atmel_tcb_channel *chan = &tcbpwm->bkup[i]; + + writel(chan->cmr, base + ATMEL_TC_REG(i, CMR)); + writel(chan->ra, base + ATMEL_TC_REG(i, RA)); + writel(chan->rb, base + ATMEL_TC_REG(i, RB)); + writel(chan->rc, base + ATMEL_TC_REG(i, RC)); + if (chan->enabled) { + writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, + base + ATMEL_TC_REG(i, CCR)); + } + } + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(atmel_tcb_pwm_pm_ops, atmel_tcb_pwm_suspend, + atmel_tcb_pwm_resume); + static struct platform_driver atmel_tcb_pwm_driver = { .driver = { .name = "atmel-tcb-pwm", .of_match_table = atmel_tcb_pwm_dt_ids, + .pm = &atmel_tcb_pwm_pm_ops, }, .probe = atmel_tcb_pwm_probe, .remove = atmel_tcb_pwm_remove, diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c index 2fb30deee345..815f5333bb8f 100644 --- a/drivers/pwm/pwm-img.c +++ b/drivers/pwm/pwm-img.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/pwm.h> #include <linux/regmap.h> #include <linux/slab.h> @@ -39,6 +40,8 @@ #define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1 #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4) +#define IMG_PWM_PM_TIMEOUT 1000 /* ms */ + /* * PWM period is specified with a timebase register, * in number of step periods. The PWM duty cycle is also @@ -52,6 +55,8 @@ */ #define MIN_TMBASE_STEPS 16 +#define IMG_PWM_NPWM 4 + struct img_pwm_soc_data { u32 max_timebase; }; @@ -66,6 +71,8 @@ struct img_pwm_chip { int max_period_ns; int min_period_ns; const struct img_pwm_soc_data *data; + u32 suspend_ctrl_cfg; + u32 suspend_ch_cfg[IMG_PWM_NPWM]; }; static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip) @@ -92,6 +99,7 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long mul, output_clk_hz, input_clk_hz; struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); unsigned int max_timebase = pwm_chip->data->max_timebase; + int ret; if (period_ns < pwm_chip->min_period_ns || period_ns > pwm_chip->max_period_ns) { @@ -123,6 +131,10 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty = DIV_ROUND_UP(timebase * duty_ns, period_ns); + ret = pm_runtime_get_sync(chip->dev); + if (ret < 0) + return ret; + val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm)); val |= (div & PWM_CTRL_CFG_DIV_MASK) << @@ -133,6 +145,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, (timebase << PWM_CH_CFG_TMBASE_SHIFT); img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val); + pm_runtime_mark_last_busy(chip->dev); + pm_runtime_put_autosuspend(chip->dev); + return 0; } @@ -140,6 +155,11 @@ static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) { u32 val; struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); + int ret; + + ret = pm_runtime_get_sync(chip->dev); + if (ret < 0) + return ret; val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); val |= BIT(pwm->hwpwm); @@ -160,6 +180,9 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); val &= ~BIT(pwm->hwpwm); img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val); + + pm_runtime_mark_last_busy(chip->dev); + pm_runtime_put_autosuspend(chip->dev); } static const struct pwm_ops img_pwm_ops = { @@ -182,6 +205,37 @@ static const struct of_device_id img_pwm_of_match[] = { }; MODULE_DEVICE_TABLE(of, img_pwm_of_match); +static int img_pwm_runtime_suspend(struct device *dev) +{ + struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev); + + clk_disable_unprepare(pwm_chip->pwm_clk); + clk_disable_unprepare(pwm_chip->sys_clk); + + return 0; +} + +static int img_pwm_runtime_resume(struct device *dev) +{ + struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(pwm_chip->sys_clk); + if (ret < 0) { + dev_err(dev, "could not prepare or enable sys clock\n"); + return ret; + } + + ret = clk_prepare_enable(pwm_chip->pwm_clk); + if (ret < 0) { + dev_err(dev, "could not prepare or enable pwm clock\n"); + clk_disable_unprepare(pwm_chip->sys_clk); + return ret; + } + + return 0; +} + static int img_pwm_probe(struct platform_device *pdev) { int ret; @@ -224,23 +278,20 @@ static int img_pwm_probe(struct platform_device *pdev) return PTR_ERR(pwm->pwm_clk); } - ret = clk_prepare_enable(pwm->sys_clk); - if (ret < 0) { - dev_err(&pdev->dev, "could not prepare or enable sys clock\n"); - return ret; - } - - ret = clk_prepare_enable(pwm->pwm_clk); - if (ret < 0) { - dev_err(&pdev->dev, "could not prepare or enable pwm clock\n"); - goto disable_sysclk; + pm_runtime_set_autosuspend_delay(&pdev->dev, IMG_PWM_PM_TIMEOUT); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_enable(&pdev->dev); + if (!pm_runtime_enabled(&pdev->dev)) { + ret = img_pwm_runtime_resume(&pdev->dev); + if (ret) + goto err_pm_disable; } clk_rate = clk_get_rate(pwm->pwm_clk); if (!clk_rate) { dev_err(&pdev->dev, "pwm clock has no frequency\n"); ret = -EINVAL; - goto disable_pwmclk; + goto err_suspend; } /* The maximum input clock divider is 512 */ @@ -255,21 +306,23 @@ static int img_pwm_probe(struct platform_device *pdev) pwm->chip.dev = &pdev->dev; pwm->chip.ops = &img_pwm_ops; pwm->chip.base = -1; - pwm->chip.npwm = 4; + pwm->chip.npwm = IMG_PWM_NPWM; ret = pwmchip_add(&pwm->chip); if (ret < 0) { dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); - goto disable_pwmclk; + goto err_suspend; } platform_set_drvdata(pdev, pwm); return 0; -disable_pwmclk: - clk_disable_unprepare(pwm->pwm_clk); -disable_sysclk: - clk_disable_unprepare(pwm->sys_clk); +err_suspend: + if (!pm_runtime_enabled(&pdev->dev)) + img_pwm_runtime_suspend(&pdev->dev); +err_pm_disable: + pm_runtime_disable(&pdev->dev); + pm_runtime_dont_use_autosuspend(&pdev->dev); return ret; } @@ -278,6 +331,11 @@ static int img_pwm_remove(struct platform_device *pdev) struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev); u32 val; unsigned int i; + int ret; + + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) + return ret; for (i = 0; i < pwm_chip->chip.npwm; i++) { val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); @@ -285,15 +343,79 @@ static int img_pwm_remove(struct platform_device *pdev) img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val); } - clk_disable_unprepare(pwm_chip->pwm_clk); - clk_disable_unprepare(pwm_chip->sys_clk); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) + img_pwm_runtime_suspend(&pdev->dev); return pwmchip_remove(&pwm_chip->chip); } +#ifdef CONFIG_PM_SLEEP +static int img_pwm_suspend(struct device *dev) +{ + struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev); + int i, ret; + + if (pm_runtime_status_suspended(dev)) { + ret = img_pwm_runtime_resume(dev); + if (ret) + return ret; + } + + for (i = 0; i < pwm_chip->chip.npwm; i++) + pwm_chip->suspend_ch_cfg[i] = img_pwm_readl(pwm_chip, + PWM_CH_CFG(i)); + + pwm_chip->suspend_ctrl_cfg = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); + + img_pwm_runtime_suspend(dev); + + return 0; +} + +static int img_pwm_resume(struct device *dev) +{ + struct img_pwm_chip *pwm_chip = dev_get_drvdata(dev); + int ret; + int i; + + ret = img_pwm_runtime_resume(dev); + if (ret) + return ret; + + for (i = 0; i < pwm_chip->chip.npwm; i++) + img_pwm_writel(pwm_chip, PWM_CH_CFG(i), + pwm_chip->suspend_ch_cfg[i]); + + img_pwm_writel(pwm_chip, PWM_CTRL_CFG, pwm_chip->suspend_ctrl_cfg); + + for (i = 0; i < pwm_chip->chip.npwm; i++) + if (pwm_chip->suspend_ctrl_cfg & BIT(i)) + regmap_update_bits(pwm_chip->periph_regs, + PERIP_PWM_PDM_CONTROL, + PERIP_PWM_PDM_CONTROL_CH_MASK << + PERIP_PWM_PDM_CONTROL_CH_SHIFT(i), + 0); + + if (pm_runtime_status_suspended(dev)) + img_pwm_runtime_suspend(dev); + + return 0; +} +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops img_pwm_pm_ops = { + SET_RUNTIME_PM_OPS(img_pwm_runtime_suspend, + img_pwm_runtime_resume, + NULL) + SET_SYSTEM_SLEEP_PM_OPS(img_pwm_suspend, img_pwm_resume) +}; + static struct platform_driver img_pwm_driver = { .driver = { .name = "img-pwm", + .pm = &img_pwm_pm_ops, .of_match_table = img_pwm_of_match, }, .probe = img_pwm_probe, diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index b52f3afb2ba1..f5d97e0ad52b 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -16,6 +16,7 @@ #include <linux/module.h> #include <linux/clk.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> @@ -40,11 +41,19 @@ enum { MTK_CLK_PWM3, MTK_CLK_PWM4, MTK_CLK_PWM5, + MTK_CLK_PWM6, + MTK_CLK_PWM7, + MTK_CLK_PWM8, MTK_CLK_MAX, }; -static const char * const mtk_pwm_clk_name[] = { - "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5" +static const char * const mtk_pwm_clk_name[MTK_CLK_MAX] = { + "main", "top", "pwm1", "pwm2", "pwm3", "pwm4", "pwm5", "pwm6", "pwm7", + "pwm8" +}; + +struct mtk_pwm_platform_data { + unsigned int num_pwms; }; /** @@ -59,6 +68,10 @@ struct mtk_pwm_chip { struct clk *clks[MTK_CLK_MAX]; }; +static const unsigned int mtk_pwm_reg_offset[] = { + 0x0010, 0x0050, 0x0090, 0x00d0, 0x0110, 0x0150, 0x0190, 0x0220 +}; + static inline struct mtk_pwm_chip *to_mtk_pwm_chip(struct pwm_chip *chip) { return container_of(chip, struct mtk_pwm_chip, chip); @@ -103,14 +116,14 @@ static void mtk_pwm_clk_disable(struct pwm_chip *chip, struct pwm_device *pwm) static inline u32 mtk_pwm_readl(struct mtk_pwm_chip *chip, unsigned int num, unsigned int offset) { - return readl(chip->regs + 0x10 + (num * 0x40) + offset); + return readl(chip->regs + mtk_pwm_reg_offset[num] + offset); } static inline void mtk_pwm_writel(struct mtk_pwm_chip *chip, unsigned int num, unsigned int offset, u32 value) { - writel(value, chip->regs + 0x10 + (num * 0x40) + offset); + writel(value, chip->regs + mtk_pwm_reg_offset[num] + offset); } static int mtk_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, @@ -185,6 +198,7 @@ static const struct pwm_ops mtk_pwm_ops = { static int mtk_pwm_probe(struct platform_device *pdev) { + const struct mtk_pwm_platform_data *data; struct mtk_pwm_chip *pc; struct resource *res; unsigned int i; @@ -194,15 +208,22 @@ static int mtk_pwm_probe(struct platform_device *pdev) if (!pc) return -ENOMEM; + data = of_device_get_match_data(&pdev->dev); + if (data == NULL) + return -EINVAL; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pc->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pc->regs)) return PTR_ERR(pc->regs); - for (i = 0; i < MTK_CLK_MAX; i++) { + for (i = 0; i < data->num_pwms + 2; i++) { pc->clks[i] = devm_clk_get(&pdev->dev, mtk_pwm_clk_name[i]); - if (IS_ERR(pc->clks[i])) + if (IS_ERR(pc->clks[i])) { + dev_err(&pdev->dev, "clock: %s fail: %ld\n", + mtk_pwm_clk_name[i], PTR_ERR(pc->clks[i])); return PTR_ERR(pc->clks[i]); + } } platform_set_drvdata(pdev, pc); @@ -210,7 +231,7 @@ static int mtk_pwm_probe(struct platform_device *pdev) pc->chip.dev = &pdev->dev; pc->chip.ops = &mtk_pwm_ops; pc->chip.base = -1; - pc->chip.npwm = 5; + pc->chip.npwm = data->num_pwms; ret = pwmchip_add(&pc->chip); if (ret < 0) { @@ -228,9 +249,23 @@ static int mtk_pwm_remove(struct platform_device *pdev) return pwmchip_remove(&pc->chip); } +static const struct mtk_pwm_platform_data mt2712_pwm_data = { + .num_pwms = 8, +}; + +static const struct mtk_pwm_platform_data mt7622_pwm_data = { + .num_pwms = 6, +}; + +static const struct mtk_pwm_platform_data mt7623_pwm_data = { + .num_pwms = 5, +}; + static const struct of_device_id mtk_pwm_of_match[] = { - { .compatible = "mediatek,mt7623-pwm" }, - { } + { .compatible = "mediatek,mt2712-pwm", .data = &mt2712_pwm_data }, + { .compatible = "mediatek,mt7622-pwm", .data = &mt7622_pwm_data }, + { .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data }, + { }, }; MODULE_DEVICE_TABLE(of, mtk_pwm_of_match); diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 9793b296108f..1ac9e4384142 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -219,8 +219,7 @@ static int stm32_pwm_lp_remove(struct platform_device *pdev) unsigned int i; for (i = 0; i < priv->chip.npwm; i++) - if (pwm_is_enabled(&priv->chip.pwms[i])) - pwm_disable(&priv->chip.pwms[i]); + pwm_disable(&priv->chip.pwms[i]); return pwmchip_remove(&priv->chip); } diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 6d23f1d1c9b7..334199c58f1d 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -368,14 +368,15 @@ static int sun4i_pwm_probe(struct platform_device *pdev) struct sun4i_pwm_chip *pwm; struct resource *res; int ret; - const struct of_device_id *match; - - match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev); pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); if (!pwm) return -ENOMEM; + pwm->data = of_device_get_match_data(&pdev->dev); + if (!pwm->data) + return -ENODEV; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pwm->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(pwm->base)) @@ -385,7 +386,6 @@ static int sun4i_pwm_probe(struct platform_device *pdev) if (IS_ERR(pwm->clk)) return PTR_ERR(pwm->clk); - pwm->data = match->data; pwm->chip.dev = &pdev->dev; pwm->chip.ops = &sun4i_pwm_ops; pwm->chip.base = -1; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index e0e58f3b1420..b59a31b079a5 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -433,6 +433,19 @@ config RTC_DRV_PCF85063 This driver can also be built as a module. If so, the module will be called rtc-pcf85063. +config RTC_DRV_PCF85363 + tristate "NXP PCF85363" + depends on I2C + select REGMAP_I2C + help + If you say yes here you get support for the PCF85363 RTC chip. + + This driver can also be built as a module. If so, the module + will be called rtc-pcf85363. + + The nvmem interface will be named pcf85363-#, where # is the + zero-based instance number. + config RTC_DRV_PCF8563 tristate "Philips PCF8563/Epson RTC8564" help @@ -1174,6 +1187,17 @@ config RTC_DRV_WM8350 This driver can also be built as a module. If so, the module will be called "rtc-wm8350". +config RTC_DRV_SC27XX + tristate "Spreadtrum SC27xx RTC" + depends on MFD_SC27XX_PMIC || COMPILE_TEST + help + If you say Y here you will get support for the RTC subsystem + of the Spreadtrum SC27xx series PMICs. The SC27xx series PMICs + includes the SC2720, SC2721, SC2723, SC2730 and SC2731 chips. + + This driver can also be built as a module. If so, the module + will be called rtc-sc27xx. + config RTC_DRV_SPEAR tristate "SPEAR ST RTC" depends on PLAT_SPEAR || COMPILE_TEST @@ -1706,14 +1730,24 @@ config RTC_DRV_MOXART will be called rtc-moxart config RTC_DRV_MT6397 - tristate "Mediatek Real Time Clock driver" + tristate "MediaTek PMIC based RTC" depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN) help - This selects the Mediatek(R) RTC driver. RTC is part of Mediatek + This selects the MediaTek(R) RTC driver. RTC is part of MediaTek MT6397 PMIC. You should enable MT6397 PMIC MFD before select - Mediatek(R) RTC driver. + MediaTek(R) RTC driver. + + If you want to use MediaTek(R) RTC interface, select Y or M here. - If you want to use Mediatek(R) RTC interface, select Y or M here. +config RTC_DRV_MT7622 + tristate "MediaTek SoC based RTC" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + This enables support for the real time clock built in the MediaTek + SoCs. + + This drive can also be built as a module. If so, the module + will be called rtc-mt7622. config RTC_DRV_XGENE tristate "APM X-Gene RTC" diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 0bf1fc02b82c..f2f50c11dc38 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -103,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o obj-$(CONFIG_RTC_DRV_VRTC) += rtc-mrst.o obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o +obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o @@ -114,6 +115,7 @@ obj-$(CONFIG_RTC_DRV_PCF2123) += rtc-pcf2123.o obj-$(CONFIG_RTC_DRV_PCF2127) += rtc-pcf2127.o obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o obj-$(CONFIG_RTC_DRV_PCF85063) += rtc-pcf85063.o +obj-$(CONFIG_RTC_DRV_PCF85363) += rtc-pcf85363.o obj-$(CONFIG_RTC_DRV_PCF8523) += rtc-pcf8523.o obj-$(CONFIG_RTC_DRV_PCF8563) += rtc-pcf8563.o obj-$(CONFIG_RTC_DRV_PCF8583) += rtc-pcf8583.o @@ -144,6 +146,7 @@ obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o obj-$(CONFIG_RTC_DRV_S5M) += rtc-s5m.o obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o +obj-$(CONFIG_RTC_DRV_SC27XX) += rtc-sc27xx.o obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o obj-$(CONFIG_RTC_DRV_SIRFSOC) += rtc-sirfsoc.o obj-$(CONFIG_RTC_DRV_SNVS) += rtc-snvs.o diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 8cec9a02c0b8..672b192f8153 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -779,7 +779,7 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) } timerqueue_add(&rtc->timerqueue, &timer->node); - if (!next) { + if (!next || ktime_before(timer->node.expires, next->expires)) { struct rtc_wkalrm alarm; int err; alarm.time = rtc_ktime_to_tm(timer->node.expires); @@ -1004,6 +1004,10 @@ int rtc_read_offset(struct rtc_device *rtc, long *offset) * to compensate for differences in the actual clock rate due to temperature, * the crystal, capacitor, etc. * + * The adjustment applied is as follows: + * t = t0 * (1 + offset * 1e-9) + * where t0 is the measured length of 1 RTC second with offset = 0 + * * Kernel interface to adjust an rtc clock offset. * Return 0 on success, or a negative number on error. * If the rtc offset is not setable (or not implemented), return -EINVAL diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c index fea9a60b06cf..b033bc556f5d 100644 --- a/drivers/rtc/rtc-abx80x.c +++ b/drivers/rtc/rtc-abx80x.c @@ -614,12 +614,12 @@ static int abx80x_probe(struct i2c_client *client, if (err) return err; - rtc = devm_rtc_device_register(&client->dev, "abx8xx", - &abx80x_rtc_ops, THIS_MODULE); - + rtc = devm_rtc_allocate_device(&client->dev); if (IS_ERR(rtc)) return PTR_ERR(rtc); + rtc->ops = &abx80x_rtc_ops; + i2c_set_clientdata(client, rtc); if (client->irq > 0) { @@ -646,10 +646,14 @@ static int abx80x_probe(struct i2c_client *client, err = devm_add_action_or_reset(&client->dev, rtc_calib_remove_sysfs_group, &client->dev); - if (err) + if (err) { dev_err(&client->dev, "Failed to add sysfs cleanup action: %d\n", err); + return err; + } + + err = rtc_register_device(rtc); return err; } diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c index 21f355c37eab..1e4978c96ffd 100644 --- a/drivers/rtc/rtc-armada38x.c +++ b/drivers/rtc/rtc-armada38x.c @@ -28,6 +28,8 @@ #define RTC_IRQ_AL_EN BIT(0) #define RTC_IRQ_FREQ_EN BIT(1) #define RTC_IRQ_FREQ_1HZ BIT(2) +#define RTC_CCR 0x18 +#define RTC_CCR_MODE BIT(15) #define RTC_TIME 0xC #define RTC_ALARM1 0x10 @@ -343,18 +345,117 @@ static irqreturn_t armada38x_rtc_alarm_irq(int irq, void *data) return IRQ_HANDLED; } +/* + * The information given in the Armada 388 functional spec is complex. + * They give two different formulas for calculating the offset value, + * but when considering "Offset" as an 8-bit signed integer, they both + * reduce down to (we shall rename "Offset" as "val" here): + * + * val = (f_ideal / f_measured - 1) / resolution where f_ideal = 32768 + * + * Converting to time, f = 1/t: + * val = (t_measured / t_ideal - 1) / resolution where t_ideal = 1/32768 + * + * => t_measured / t_ideal = val * resolution + 1 + * + * "offset" in the RTC interface is defined as: + * t = t0 * (1 + offset * 1e-9) + * where t is the desired period, t0 is the measured period with a zero + * offset, which is t_measured above. With t0 = t_measured and t = t_ideal, + * offset = (t_ideal / t_measured - 1) / 1e-9 + * + * => t_ideal / t_measured = offset * 1e-9 + 1 + * + * so: + * + * offset * 1e-9 + 1 = 1 / (val * resolution + 1) + * + * We want "resolution" to be an integer, so resolution = R * 1e-9, giving + * offset = 1e18 / (val * R + 1e9) - 1e9 + * val = (1e18 / (offset + 1e9) - 1e9) / R + * with a common transformation: + * f(x) = 1e18 / (x + 1e9) - 1e9 + * offset = f(val * R) + * val = f(offset) / R + * + * Armada 38x supports two modes, fine mode (954ppb) and coarse mode (3815ppb). + */ +static long armada38x_ppb_convert(long ppb) +{ + long div = ppb + 1000000000L; + + return div_s64(1000000000000000000LL + div / 2, div) - 1000000000L; +} + +static int armada38x_rtc_read_offset(struct device *dev, long *offset) +{ + struct armada38x_rtc *rtc = dev_get_drvdata(dev); + unsigned long ccr, flags; + long ppb_cor; + + spin_lock_irqsave(&rtc->lock, flags); + ccr = rtc->data->read_rtc_reg(rtc, RTC_CCR); + spin_unlock_irqrestore(&rtc->lock, flags); + + ppb_cor = (ccr & RTC_CCR_MODE ? 3815 : 954) * (s8)ccr; + /* ppb_cor + 1000000000L can never be zero */ + *offset = armada38x_ppb_convert(ppb_cor); + + return 0; +} + +static int armada38x_rtc_set_offset(struct device *dev, long offset) +{ + struct armada38x_rtc *rtc = dev_get_drvdata(dev); + unsigned long ccr = 0; + long ppb_cor, off; + + /* + * The maximum ppb_cor is -128 * 3815 .. 127 * 3815, but we + * need to clamp the input. This equates to -484270 .. 488558. + * Not only is this to stop out of range "off" but also to + * avoid the division by zero in armada38x_ppb_convert(). + */ + offset = clamp(offset, -484270L, 488558L); + + ppb_cor = armada38x_ppb_convert(offset); + + /* + * Use low update mode where possible, which gives a better + * resolution of correction. + */ + off = DIV_ROUND_CLOSEST(ppb_cor, 954); + if (off > 127 || off < -128) { + ccr = RTC_CCR_MODE; + off = DIV_ROUND_CLOSEST(ppb_cor, 3815); + } + + /* + * Armada 388 requires a bit pattern in bits 14..8 depending on + * the sign bit: { 0, ~S, S, S, S, S, S } + */ + ccr |= (off & 0x3fff) ^ 0x2000; + rtc_delayed_write(ccr, rtc, RTC_CCR); + + return 0; +} + static const struct rtc_class_ops armada38x_rtc_ops = { .read_time = armada38x_rtc_read_time, .set_time = armada38x_rtc_set_time, .read_alarm = armada38x_rtc_read_alarm, .set_alarm = armada38x_rtc_set_alarm, .alarm_irq_enable = armada38x_rtc_alarm_irq_enable, + .read_offset = armada38x_rtc_read_offset, + .set_offset = armada38x_rtc_set_offset, }; static const struct rtc_class_ops armada38x_rtc_ops_noirq = { .read_time = armada38x_rtc_read_time, .set_time = armada38x_rtc_set_time, .read_alarm = armada38x_rtc_read_alarm, + .read_offset = armada38x_rtc_read_offset, + .set_offset = armada38x_rtc_set_offset, }; static const struct armada38x_rtc_data armada38x_data = { diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index e221b78b6f10..de81ecedd571 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -42,8 +42,6 @@ #define at91_rtc_write(field, val) \ writel_relaxed((val), at91_rtc_regs + field) -#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ - struct at91_rtc_config { bool use_shadow_imr; }; @@ -51,7 +49,6 @@ struct at91_rtc_config { static const struct at91_rtc_config *at91_rtc_config; static DECLARE_COMPLETION(at91_rtc_updated); static DECLARE_COMPLETION(at91_rtc_upd_rdy); -static unsigned int at91_alarm_year = AT91_RTC_EPOCH; static void __iomem *at91_rtc_regs; static int irq; static DEFINE_SPINLOCK(at91_rtc_lock); @@ -131,8 +128,7 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg, /* * The Calendar Alarm register does not have a field for - * the year - so these will return an invalid value. When an - * alarm is set, at91_alarm_year will store the current year. + * the year - so these will return an invalid value. */ tm->tm_year = bcd2bin(date & AT91_RTC_CENT) * 100; /* century */ tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8); /* year */ @@ -208,15 +204,14 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) struct rtc_time *tm = &alrm->time; at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm); - tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); - tm->tm_year = at91_alarm_year - 1900; + tm->tm_year = -1; alrm->enabled = (at91_rtc_read_imr() & AT91_RTC_ALARM) ? 1 : 0; - dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, - 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); + dev_dbg(dev, "%s(): %02d-%02d %02d:%02d:%02d %sabled\n", __func__, + tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, + alrm->enabled ? "en" : "dis"); return 0; } @@ -230,8 +225,6 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm); - at91_alarm_year = tm.tm_year; - tm.tm_mon = alrm->time.tm_mon; tm.tm_mday = alrm->time.tm_mday; tm.tm_hour = alrm->time.tm_hour; @@ -255,7 +248,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) } dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, - at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, + tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); return 0; diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 72b22935eb62..d8df2e9e14ad 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c @@ -514,56 +514,43 @@ static void msg_init(struct spi_message *m, struct spi_transfer *x, spi_message_add_tail(x, m); } -static ssize_t -ds1305_nvram_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int ds1305_nvram_read(void *priv, unsigned int off, void *buf, + size_t count) { - struct spi_device *spi; + struct ds1305 *ds1305 = priv; + struct spi_device *spi = ds1305->spi; u8 addr; struct spi_message m; struct spi_transfer x[2]; - int status; - - spi = to_spi_device(kobj_to_dev(kobj)); addr = DS1305_NVRAM + off; msg_init(&m, x, &addr, count, NULL, buf); - status = spi_sync(spi, &m); - if (status < 0) - dev_err(&spi->dev, "nvram %s error %d\n", "read", status); - return (status < 0) ? status : count; + return spi_sync(spi, &m); } -static ssize_t -ds1305_nvram_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int ds1305_nvram_write(void *priv, unsigned int off, void *buf, + size_t count) { - struct spi_device *spi; + struct ds1305 *ds1305 = priv; + struct spi_device *spi = ds1305->spi; u8 addr; struct spi_message m; struct spi_transfer x[2]; - int status; - - spi = to_spi_device(kobj_to_dev(kobj)); addr = (DS1305_WRITE | DS1305_NVRAM) + off; msg_init(&m, x, &addr, count, buf, NULL); - status = spi_sync(spi, &m); - if (status < 0) - dev_err(&spi->dev, "nvram %s error %d\n", "write", status); - return (status < 0) ? status : count; + return spi_sync(spi, &m); } -static struct bin_attribute nvram = { - .attr.name = "nvram", - .attr.mode = S_IRUGO | S_IWUSR, - .read = ds1305_nvram_read, - .write = ds1305_nvram_write, - .size = DS1305_NVRAM_LEN, +static struct nvmem_config ds1305_nvmem_cfg = { + .name = "ds1305_nvram", + .word_size = 1, + .stride = 1, + .size = DS1305_NVRAM_LEN, + .reg_read = ds1305_nvram_read, + .reg_write = ds1305_nvram_write, }; /*----------------------------------------------------------------------*/ @@ -708,10 +695,19 @@ static int ds1305_probe(struct spi_device *spi) dev_dbg(&spi->dev, "AM/PM\n"); /* register RTC ... from here on, ds1305->ctrl needs locking */ - ds1305->rtc = devm_rtc_device_register(&spi->dev, "ds1305", - &ds1305_ops, THIS_MODULE); + ds1305->rtc = devm_rtc_allocate_device(&spi->dev); if (IS_ERR(ds1305->rtc)) { - status = PTR_ERR(ds1305->rtc); + return PTR_ERR(ds1305->rtc); + } + + ds1305->rtc->ops = &ds1305_ops; + + ds1305_nvmem_cfg.priv = ds1305; + ds1305->rtc->nvmem_config = &ds1305_nvmem_cfg; + ds1305->rtc->nvram_old_abi = true; + + status = rtc_register_device(ds1305->rtc); + if (status) { dev_dbg(&spi->dev, "register rtc --> %d\n", status); return status; } @@ -734,12 +730,6 @@ static int ds1305_probe(struct spi_device *spi) } } - /* export NVRAM */ - status = sysfs_create_bin_file(&spi->dev.kobj, &nvram); - if (status < 0) { - dev_err(&spi->dev, "register nvram --> %d\n", status); - } - return 0; } @@ -747,8 +737,6 @@ static int ds1305_remove(struct spi_device *spi) { struct ds1305 *ds1305 = spi_get_drvdata(spi); - sysfs_remove_bin_file(&spi->dev.kobj, &nvram); - /* carefully shut down irq and workqueue, if present */ if (spi->irq) { set_bit(FLAG_EXITING, &ds1305->flags); diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index e7d9215c9201..923dde912f60 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -325,6 +325,10 @@ static const struct of_device_id ds1307_of_match[] = { .compatible = "isil,isl12057", .data = (void *)ds_1337 }, + { + .compatible = "epson,rx8130", + .data = (void *)rx_8130 + }, { } }; MODULE_DEVICE_TABLE(of, ds1307_of_match); @@ -348,6 +352,7 @@ static const struct acpi_device_id ds1307_acpi_ids[] = { { .id = "PT7C4338", .driver_data = ds_1307 }, { .id = "RX8025", .driver_data = rx_8025 }, { .id = "ISL12057", .driver_data = ds_1337 }, + { .id = "RX8130", .driver_data = rx_8130 }, { } }; MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids); @@ -787,8 +792,6 @@ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled) * Alarm support for mcp794xx devices. */ -#define MCP794XX_REG_WEEKDAY 0x3 -#define MCP794XX_REG_WEEKDAY_WDAY_MASK 0x7 #define MCP794XX_REG_CONTROL 0x07 # define MCP794XX_BIT_ALM0_EN 0x10 # define MCP794XX_BIT_ALM1_EN 0x20 @@ -877,15 +880,38 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } +/* + * We may have a random RTC weekday, therefore calculate alarm weekday based + * on current weekday we read from the RTC timekeeping regs + */ +static int mcp794xx_alm_weekday(struct device *dev, struct rtc_time *tm_alarm) +{ + struct rtc_time tm_now; + int days_now, days_alarm, ret; + + ret = ds1307_get_time(dev, &tm_now); + if (ret) + return ret; + + days_now = div_s64(rtc_tm_to_time64(&tm_now), 24 * 60 * 60); + days_alarm = div_s64(rtc_tm_to_time64(tm_alarm), 24 * 60 * 60); + + return (tm_now.tm_wday + days_alarm - days_now) % 7 + 1; +} + static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct ds1307 *ds1307 = dev_get_drvdata(dev); unsigned char regs[10]; - int ret; + int wday, ret; if (!test_bit(HAS_ALARM, &ds1307->flags)) return -EINVAL; + wday = mcp794xx_alm_weekday(dev, &t->time); + if (wday < 0) + return wday; + dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " "enabled=%d pending=%d\n", __func__, t->time.tm_sec, t->time.tm_min, t->time.tm_hour, @@ -902,7 +928,7 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) regs[3] = bin2bcd(t->time.tm_sec); regs[4] = bin2bcd(t->time.tm_min); regs[5] = bin2bcd(t->time.tm_hour); - regs[6] = bin2bcd(t->time.tm_wday + 1); + regs[6] = wday; regs[7] = bin2bcd(t->time.tm_mday); regs[8] = bin2bcd(t->time.tm_mon + 1); @@ -1354,14 +1380,12 @@ static int ds1307_probe(struct i2c_client *client, { struct ds1307 *ds1307; int err = -ENODEV; - int tmp, wday; + int tmp; const struct chip_desc *chip; bool want_irq; bool ds1307_can_wakeup_device = false; unsigned char regs[8]; struct ds1307_platform_data *pdata = dev_get_platdata(&client->dev); - struct rtc_time tm; - unsigned long timestamp; u8 trickle_charger_setup = 0; ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL); @@ -1641,25 +1665,6 @@ read_rtc: bin2bcd(tmp)); } - /* - * Some IPs have weekday reset value = 0x1 which might not correct - * hence compute the wday using the current date/month/year values - */ - ds1307_get_time(ds1307->dev, &tm); - wday = tm.tm_wday; - timestamp = rtc_tm_to_time64(&tm); - rtc_time64_to_tm(timestamp, &tm); - - /* - * Check if reset wday is different from the computed wday - * If different then set the wday which we computed using - * timestamp - */ - if (wday != tm.tm_wday) - regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY, - MCP794XX_REG_WEEKDAY_WDAY_MASK, - tm.tm_wday + 1); - if (want_irq || ds1307_can_wakeup_device) { device_set_wakeup_capable(ds1307->dev, true); set_bit(HAS_ALARM, &ds1307->flags); diff --git a/drivers/rtc/rtc-ds1390.c b/drivers/rtc/rtc-ds1390.c index aa0d2c6f1edc..4d5b007d7fc6 100644 --- a/drivers/rtc/rtc-ds1390.c +++ b/drivers/rtc/rtc-ds1390.c @@ -216,9 +216,16 @@ static int ds1390_probe(struct spi_device *spi) return res; } +static const struct of_device_id ds1390_of_match[] = { + { .compatible = "dallas,ds1390" }, + {} +}; +MODULE_DEVICE_TABLE(of, ds1390_of_match); + static struct spi_driver ds1390_driver = { .driver = { .name = "rtc-ds1390", + .of_match_table = of_match_ptr(ds1390_of_match), }, .probe = ds1390_probe, }; diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c index 1b2dcb58c0ab..1e95312a6f2e 100644 --- a/drivers/rtc/rtc-ds1511.c +++ b/drivers/rtc/rtc-ds1511.c @@ -398,42 +398,37 @@ static const struct rtc_class_ops ds1511_rtc_ops = { .alarm_irq_enable = ds1511_rtc_alarm_irq_enable, }; -static ssize_t -ds1511_nvram_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *ba, - char *buf, loff_t pos, size_t size) +static int ds1511_nvram_read(void *priv, unsigned int pos, void *buf, + size_t size) { - ssize_t count; + int i; rtc_write(pos, DS1511_RAMADDR_LSB); - for (count = 0; count < size; count++) - *buf++ = rtc_read(DS1511_RAMDATA); + for (i = 0; i < size; i++) + *(char *)buf++ = rtc_read(DS1511_RAMDATA); - return count; + return 0; } -static ssize_t -ds1511_nvram_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *bin_attr, - char *buf, loff_t pos, size_t size) +static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf, + size_t size) { - ssize_t count; + int i; rtc_write(pos, DS1511_RAMADDR_LSB); - for (count = 0; count < size; count++) - rtc_write(*buf++, DS1511_RAMDATA); + for (i = 0; i < size; i++) + rtc_write(*(char *)buf++, DS1511_RAMDATA); - return count; + return 0; } -static struct bin_attribute ds1511_nvram_attr = { - .attr = { - .name = "nvram", - .mode = S_IRUGO | S_IWUSR, - }, +static struct nvmem_config ds1511_nvmem_cfg = { + .name = "ds1511_nvram", + .word_size = 1, + .stride = 1, .size = DS1511_RAM_MAX, - .read = ds1511_nvram_read, - .write = ds1511_nvram_write, + .reg_read = ds1511_nvram_read, + .reg_write = ds1511_nvram_write, }; static int ds1511_rtc_probe(struct platform_device *pdev) @@ -477,11 +472,20 @@ static int ds1511_rtc_probe(struct platform_device *pdev) spin_lock_init(&pdata->lock); platform_set_drvdata(pdev, pdata); - pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &ds1511_rtc_ops, THIS_MODULE); + pdata->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(pdata->rtc)) return PTR_ERR(pdata->rtc); + pdata->rtc->ops = &ds1511_rtc_ops; + + ds1511_nvmem_cfg.priv = &pdev->dev; + pdata->rtc->nvmem_config = &ds1511_nvmem_cfg; + pdata->rtc->nvram_old_abi = true; + + ret = rtc_register_device(pdata->rtc); + if (ret) + return ret; + /* * if the platform has an interrupt in mind for this device, * then by all means, set it @@ -496,26 +500,6 @@ static int ds1511_rtc_probe(struct platform_device *pdev) } } - ret = sysfs_create_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); - if (ret) - dev_err(&pdev->dev, "Unable to create sysfs entry: %s\n", - ds1511_nvram_attr.attr.name); - - return 0; -} - -static int ds1511_rtc_remove(struct platform_device *pdev) -{ - struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - - sysfs_remove_bin_file(&pdev->dev.kobj, &ds1511_nvram_attr); - if (pdata->irq > 0) { - /* - * disable the alarm interrupt - */ - rtc_write(rtc_read(RTC_CMD) & ~RTC_TIE, RTC_CMD); - rtc_read(RTC_CMD1); - } return 0; } @@ -524,7 +508,6 @@ MODULE_ALIAS("platform:ds1511"); static struct platform_driver ds1511_rtc_driver = { .probe = ds1511_rtc_probe, - .remove = ds1511_rtc_remove, .driver = { .name = "ds1511", }, diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index 64989afffa3d..ff65a7d2b9c9 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c @@ -82,7 +82,7 @@ static inline uint32_t jz4740_rtc_reg_read(struct jz4740_rtc *rtc, size_t reg) static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc) { uint32_t ctrl; - int timeout = 1000; + int timeout = 10000; do { ctrl = jz4740_rtc_reg_read(rtc, JZ_REG_RTC_CTRL); @@ -94,7 +94,7 @@ static int jz4740_rtc_wait_write_ready(struct jz4740_rtc *rtc) static inline int jz4780_rtc_enable_write(struct jz4740_rtc *rtc) { uint32_t ctrl; - int ret, timeout = 1000; + int ret, timeout = 10000; ret = jz4740_rtc_wait_write_ready(rtc); if (ret != 0) @@ -368,7 +368,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev) ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SCRATCHPAD, 0x12345678); ret = jz4740_rtc_reg_write(rtc, JZ_REG_RTC_SEC, 0); if (ret) { - dev_err(&pdev->dev, "Could not write write to RTC registers\n"); + dev_err(&pdev->dev, "Could not write to RTC registers\n"); return ret; } } diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index f4c070ea8384..c90fba3ed861 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -154,6 +154,8 @@ struct m41t80_data { struct rtc_device *rtc; #ifdef CONFIG_COMMON_CLK struct clk_hw sqw; + unsigned long freq; + unsigned int sqwe; #endif }; @@ -443,43 +445,40 @@ static SIMPLE_DEV_PM_OPS(m41t80_pm, m41t80_suspend, m41t80_resume); #ifdef CONFIG_COMMON_CLK #define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw) -static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static unsigned long m41t80_decode_freq(int setting) +{ + return (setting == 0) ? 0 : (setting == 1) ? M41T80_SQW_MAX_FREQ : + M41T80_SQW_MAX_FREQ >> setting; +} + +static unsigned long m41t80_get_freq(struct m41t80_data *m41t80) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); struct i2c_client *client = m41t80->client; int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ? M41T80_REG_WDAY : M41T80_REG_SQW; int ret = i2c_smbus_read_byte_data(client, reg_sqw); - unsigned long val = M41T80_SQW_MAX_FREQ; if (ret < 0) return 0; + return m41t80_decode_freq(ret >> 4); +} - ret >>= 4; - if (ret == 0) - val = 0; - else if (ret > 1) - val = val / (1 << ret); - - return val; +static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return sqw_to_m41t80_data(hw)->freq; } static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { - int i, freq = M41T80_SQW_MAX_FREQ; - - if (freq <= rate) - return freq; - - for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) { - freq /= 1 << i; - if (freq <= rate) - return freq; - } - - return 0; + if (rate >= M41T80_SQW_MAX_FREQ) + return M41T80_SQW_MAX_FREQ; + if (rate >= M41T80_SQW_MAX_FREQ / 4) + return M41T80_SQW_MAX_FREQ / 4; + if (!rate) + return 0; + return 1 << ilog2(rate); } static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, @@ -491,17 +490,12 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, M41T80_REG_WDAY : M41T80_REG_SQW; int reg, ret, val = 0; - if (rate) { - if (!is_power_of_2(rate)) - return -EINVAL; - val = ilog2(rate); - if (val == ilog2(M41T80_SQW_MAX_FREQ)) - val = 1; - else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1)) - val = ilog2(M41T80_SQW_MAX_FREQ) - val; - else - return -EINVAL; - } + if (rate >= M41T80_SQW_MAX_FREQ) + val = 1; + else if (rate >= M41T80_SQW_MAX_FREQ / 4) + val = 2; + else if (rate) + val = 15 - ilog2(rate); reg = i2c_smbus_read_byte_data(client, reg_sqw); if (reg < 0) @@ -510,10 +504,9 @@ static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, reg = (reg & 0x0f) | (val << 4); ret = i2c_smbus_write_byte_data(client, reg_sqw, reg); - if (ret < 0) - return ret; - - return -EINVAL; + if (!ret) + m41t80->freq = m41t80_decode_freq(val); + return ret; } static int m41t80_sqw_control(struct clk_hw *hw, bool enable) @@ -530,7 +523,10 @@ static int m41t80_sqw_control(struct clk_hw *hw, bool enable) else ret &= ~M41T80_ALMON_SQWE; - return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); + if (!ret) + m41t80->sqwe = enable; + return ret; } static int m41t80_sqw_prepare(struct clk_hw *hw) @@ -545,14 +541,7 @@ static void m41t80_sqw_unprepare(struct clk_hw *hw) static int m41t80_sqw_is_prepared(struct clk_hw *hw) { - struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); - struct i2c_client *client = m41t80->client; - int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); - - if (ret < 0) - return ret; - - return !!(ret & M41T80_ALMON_SQWE); + return sqw_to_m41t80_data(hw)->sqwe; } static const struct clk_ops m41t80_sqw_ops = { @@ -587,6 +576,7 @@ static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80) init.parent_names = NULL; init.num_parents = 0; m41t80->sqw.init = &init; + m41t80->freq = m41t80_get_freq(m41t80); /* optional override of the clockname */ of_property_read_string(node, "clock-output-names", &init.name); diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c index 02af045305dd..d9aea9b6d9cd 100644 --- a/drivers/rtc/rtc-m48t86.c +++ b/drivers/rtc/rtc-m48t86.c @@ -163,35 +163,30 @@ static const struct rtc_class_ops m48t86_rtc_ops = { .proc = m48t86_rtc_proc, }; -static ssize_t m48t86_nvram_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int m48t86_nvram_read(void *priv, unsigned int off, void *buf, + size_t count) { - struct device *dev = kobj_to_dev(kobj); + struct device *dev = priv; unsigned int i; for (i = 0; i < count; i++) - buf[i] = m48t86_readb(dev, M48T86_NVRAM(off + i)); + ((u8 *)buf)[i] = m48t86_readb(dev, M48T86_NVRAM(off + i)); - return count; + return 0; } -static ssize_t m48t86_nvram_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int m48t86_nvram_write(void *priv, unsigned int off, void *buf, + size_t count) { - struct device *dev = kobj_to_dev(kobj); + struct device *dev = priv; unsigned int i; for (i = 0; i < count; i++) - m48t86_writeb(dev, buf[i], M48T86_NVRAM(off + i)); + m48t86_writeb(dev, ((u8 *)buf)[i], M48T86_NVRAM(off + i)); - return count; + return 0; } -static BIN_ATTR(nvram, 0644, m48t86_nvram_read, m48t86_nvram_write, - M48T86_NVRAM_LEN); - /* * The RTC is an optional feature at purchase time on some Technologic Systems * boards. Verify that it actually exists by checking if the last two bytes @@ -223,11 +218,21 @@ static bool m48t86_verify_chip(struct platform_device *pdev) return false; } +static struct nvmem_config m48t86_nvmem_cfg = { + .name = "m48t86_nvram", + .word_size = 1, + .stride = 1, + .size = M48T86_NVRAM_LEN, + .reg_read = m48t86_nvram_read, + .reg_write = m48t86_nvram_write, +}; + static int m48t86_rtc_probe(struct platform_device *pdev) { struct m48t86_rtc_info *info; struct resource *res; unsigned char reg; + int err; info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL); if (!info) @@ -254,25 +259,25 @@ static int m48t86_rtc_probe(struct platform_device *pdev) return -ENODEV; } - info->rtc = devm_rtc_device_register(&pdev->dev, "m48t86", - &m48t86_rtc_ops, THIS_MODULE); + info->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(info->rtc)) return PTR_ERR(info->rtc); + info->rtc->ops = &m48t86_rtc_ops; + + m48t86_nvmem_cfg.priv = &pdev->dev; + info->rtc->nvmem_config = &m48t86_nvmem_cfg; + info->rtc->nvram_old_abi = true; + + err = rtc_register_device(info->rtc); + if (err) + return err; + /* read battery status */ reg = m48t86_readb(&pdev->dev, M48T86_D); dev_info(&pdev->dev, "battery %s\n", (reg & M48T86_D_VRT) ? "ok" : "exhausted"); - if (device_create_bin_file(&pdev->dev, &bin_attr_nvram)) - dev_err(&pdev->dev, "failed to create nvram sysfs entry\n"); - - return 0; -} - -static int m48t86_rtc_remove(struct platform_device *pdev) -{ - device_remove_bin_file(&pdev->dev, &bin_attr_nvram); return 0; } @@ -281,7 +286,6 @@ static struct platform_driver m48t86_rtc_platform_driver = { .name = "rtc-m48t86", }, .probe = m48t86_rtc_probe, - .remove = m48t86_rtc_remove, }; module_platform_driver(m48t86_rtc_platform_driver); diff --git a/drivers/rtc/rtc-mt7622.c b/drivers/rtc/rtc-mt7622.c new file mode 100644 index 000000000000..d79b9ae4d237 --- /dev/null +++ b/drivers/rtc/rtc-mt7622.c @@ -0,0 +1,422 @@ +/* + * Driver for MediaTek SoC based RTC + * + * Copyright (C) 2017 Sean Wang <[email protected]> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> + +#define MTK_RTC_DEV KBUILD_MODNAME + +#define MTK_RTC_PWRCHK1 0x4 +#define RTC_PWRCHK1_MAGIC 0xc6 + +#define MTK_RTC_PWRCHK2 0x8 +#define RTC_PWRCHK2_MAGIC 0x9a + +#define MTK_RTC_KEY 0xc +#define RTC_KEY_MAGIC 0x59 + +#define MTK_RTC_PROT1 0x10 +#define RTC_PROT1_MAGIC 0xa3 + +#define MTK_RTC_PROT2 0x14 +#define RTC_PROT2_MAGIC 0x57 + +#define MTK_RTC_PROT3 0x18 +#define RTC_PROT3_MAGIC 0x67 + +#define MTK_RTC_PROT4 0x1c +#define RTC_PROT4_MAGIC 0xd2 + +#define MTK_RTC_CTL 0x20 +#define RTC_RC_STOP BIT(0) + +#define MTK_RTC_DEBNCE 0x2c +#define RTC_DEBNCE_MASK GENMASK(2, 0) + +#define MTK_RTC_INT 0x30 +#define RTC_INT_AL_STA BIT(4) + +/* + * Ranges from 0x40 to 0x78 provide RTC time setup for year, month, + * day of month, day of week, hour, minute and second. + */ +#define MTK_RTC_TREG(_t, _f) (0x40 + (0x4 * (_f)) + ((_t) * 0x20)) + +#define MTK_RTC_AL_CTL 0x7c +#define RTC_AL_EN BIT(0) +#define RTC_AL_ALL GENMASK(7, 0) + +/* + * The offset is used in the translation for the year between in struct + * rtc_time and in hardware register MTK_RTC_TREG(x,MTK_YEA) + */ +#define MTK_RTC_TM_YR_OFFSET 100 + +/* + * The lowest value for the valid tm_year. RTC hardware would take incorrectly + * tm_year 100 as not a leap year and thus it is also required being excluded + * from the valid options. + */ +#define MTK_RTC_TM_YR_L (MTK_RTC_TM_YR_OFFSET + 1) + +/* + * The most year the RTC can hold is 99 and the next to 99 in year register + * would be wraparound to 0, for MT7622. + */ +#define MTK_RTC_HW_YR_LIMIT 99 + +/* The highest value for the valid tm_year */ +#define MTK_RTC_TM_YR_H (MTK_RTC_TM_YR_OFFSET + MTK_RTC_HW_YR_LIMIT) + +/* Simple macro helps to check whether the hardware supports the tm_year */ +#define MTK_RTC_TM_YR_VALID(_y) ((_y) >= MTK_RTC_TM_YR_L && \ + (_y) <= MTK_RTC_TM_YR_H) + +/* Types of the function the RTC provides are time counter and alarm. */ +enum { + MTK_TC, + MTK_AL, +}; + +/* Indexes are used for the pointer to relevant registers in MTK_RTC_TREG */ +enum { + MTK_YEA, + MTK_MON, + MTK_DOM, + MTK_DOW, + MTK_HOU, + MTK_MIN, + MTK_SEC +}; + +struct mtk_rtc { + struct rtc_device *rtc; + void __iomem *base; + int irq; + struct clk *clk; +}; + +static void mtk_w32(struct mtk_rtc *rtc, u32 reg, u32 val) +{ + writel_relaxed(val, rtc->base + reg); +} + +static u32 mtk_r32(struct mtk_rtc *rtc, u32 reg) +{ + return readl_relaxed(rtc->base + reg); +} + +static void mtk_rmw(struct mtk_rtc *rtc, u32 reg, u32 mask, u32 set) +{ + u32 val; + + val = mtk_r32(rtc, reg); + val &= ~mask; + val |= set; + mtk_w32(rtc, reg, val); +} + +static void mtk_set(struct mtk_rtc *rtc, u32 reg, u32 val) +{ + mtk_rmw(rtc, reg, 0, val); +} + +static void mtk_clr(struct mtk_rtc *rtc, u32 reg, u32 val) +{ + mtk_rmw(rtc, reg, val, 0); +} + +static void mtk_rtc_hw_init(struct mtk_rtc *hw) +{ + /* The setup of the init sequence is for allowing RTC got to work */ + mtk_w32(hw, MTK_RTC_PWRCHK1, RTC_PWRCHK1_MAGIC); + mtk_w32(hw, MTK_RTC_PWRCHK2, RTC_PWRCHK2_MAGIC); + mtk_w32(hw, MTK_RTC_KEY, RTC_KEY_MAGIC); + mtk_w32(hw, MTK_RTC_PROT1, RTC_PROT1_MAGIC); + mtk_w32(hw, MTK_RTC_PROT2, RTC_PROT2_MAGIC); + mtk_w32(hw, MTK_RTC_PROT3, RTC_PROT3_MAGIC); + mtk_w32(hw, MTK_RTC_PROT4, RTC_PROT4_MAGIC); + mtk_rmw(hw, MTK_RTC_DEBNCE, RTC_DEBNCE_MASK, 0); + mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP); +} + +static void mtk_rtc_get_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm, + int time_alarm) +{ + u32 year, mon, mday, wday, hour, min, sec; + + /* + * Read again until the field of the second is not changed which + * ensures all fields in the consistent state. Note that MTK_SEC must + * be read first. In this way, it guarantees the others remain not + * changed when the results for two MTK_SEC consecutive reads are same. + */ + do { + sec = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC)); + min = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN)); + hour = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU)); + wday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW)); + mday = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM)); + mon = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_MON)); + year = mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA)); + } while (sec != mtk_r32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC))); + + tm->tm_sec = sec; + tm->tm_min = min; + tm->tm_hour = hour; + tm->tm_wday = wday; + tm->tm_mday = mday; + tm->tm_mon = mon - 1; + + /* Rebase to the absolute year which userspace queries */ + tm->tm_year = year + MTK_RTC_TM_YR_OFFSET; +} + +static void mtk_rtc_set_alarm_or_time(struct mtk_rtc *hw, struct rtc_time *tm, + int time_alarm) +{ + u32 year; + + /* Rebase to the relative year which RTC hardware requires */ + year = tm->tm_year - MTK_RTC_TM_YR_OFFSET; + + mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_YEA), year); + mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MON), tm->tm_mon + 1); + mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOW), tm->tm_wday); + mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_DOM), tm->tm_mday); + mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_HOU), tm->tm_hour); + mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_MIN), tm->tm_min); + mtk_w32(hw, MTK_RTC_TREG(time_alarm, MTK_SEC), tm->tm_sec); +} + +static irqreturn_t mtk_rtc_alarmirq(int irq, void *id) +{ + struct mtk_rtc *hw = (struct mtk_rtc *)id; + u32 irq_sta; + + irq_sta = mtk_r32(hw, MTK_RTC_INT); + if (irq_sta & RTC_INT_AL_STA) { + /* Stop alarm also implicitly disables the alarm interrupt */ + mtk_w32(hw, MTK_RTC_AL_CTL, 0); + rtc_update_irq(hw->rtc, 1, RTC_IRQF | RTC_AF); + + /* Ack alarm interrupt status */ + mtk_w32(hw, MTK_RTC_INT, RTC_INT_AL_STA); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static int mtk_rtc_gettime(struct device *dev, struct rtc_time *tm) +{ + struct mtk_rtc *hw = dev_get_drvdata(dev); + + mtk_rtc_get_alarm_or_time(hw, tm, MTK_TC); + + return rtc_valid_tm(tm); +} + +static int mtk_rtc_settime(struct device *dev, struct rtc_time *tm) +{ + struct mtk_rtc *hw = dev_get_drvdata(dev); + + if (!MTK_RTC_TM_YR_VALID(tm->tm_year)) + return -EINVAL; + + /* Stop time counter before setting a new one*/ + mtk_set(hw, MTK_RTC_CTL, RTC_RC_STOP); + + mtk_rtc_set_alarm_or_time(hw, tm, MTK_TC); + + /* Restart the time counter */ + mtk_clr(hw, MTK_RTC_CTL, RTC_RC_STOP); + + return 0; +} + +static int mtk_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm) +{ + struct mtk_rtc *hw = dev_get_drvdata(dev); + struct rtc_time *alrm_tm = &wkalrm->time; + + mtk_rtc_get_alarm_or_time(hw, alrm_tm, MTK_AL); + + wkalrm->enabled = !!(mtk_r32(hw, MTK_RTC_AL_CTL) & RTC_AL_EN); + wkalrm->pending = !!(mtk_r32(hw, MTK_RTC_INT) & RTC_INT_AL_STA); + + return 0; +} + +static int mtk_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) +{ + struct mtk_rtc *hw = dev_get_drvdata(dev); + struct rtc_time *alrm_tm = &wkalrm->time; + + if (!MTK_RTC_TM_YR_VALID(alrm_tm->tm_year)) + return -EINVAL; + + /* + * Stop the alarm also implicitly including disables interrupt before + * setting a new one. + */ + mtk_clr(hw, MTK_RTC_AL_CTL, RTC_AL_EN); + + /* + * Avoid contention between mtk_rtc_setalarm and IRQ handler so that + * disabling the interrupt and awaiting for pending IRQ handler to + * complete. + */ + synchronize_irq(hw->irq); + + mtk_rtc_set_alarm_or_time(hw, alrm_tm, MTK_AL); + + /* Restart the alarm with the new setup */ + mtk_w32(hw, MTK_RTC_AL_CTL, RTC_AL_ALL); + + return 0; +} + +static const struct rtc_class_ops mtk_rtc_ops = { + .read_time = mtk_rtc_gettime, + .set_time = mtk_rtc_settime, + .read_alarm = mtk_rtc_getalarm, + .set_alarm = mtk_rtc_setalarm, +}; + +static const struct of_device_id mtk_rtc_match[] = { + { .compatible = "mediatek,mt7622-rtc" }, + { .compatible = "mediatek,soc-rtc" }, + {}, +}; + +static int mtk_rtc_probe(struct platform_device *pdev) +{ + struct mtk_rtc *hw; + struct resource *res; + int ret; + + hw = devm_kzalloc(&pdev->dev, sizeof(*hw), GFP_KERNEL); + if (!hw) + return -ENOMEM; + + platform_set_drvdata(pdev, hw); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + hw->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(hw->base)) + return PTR_ERR(hw->base); + + hw->clk = devm_clk_get(&pdev->dev, "rtc"); + if (IS_ERR(hw->clk)) { + dev_err(&pdev->dev, "No clock\n"); + return PTR_ERR(hw->clk); + } + + ret = clk_prepare_enable(hw->clk); + if (ret) + return ret; + + hw->irq = platform_get_irq(pdev, 0); + if (hw->irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + ret = hw->irq; + goto err; + } + + ret = devm_request_irq(&pdev->dev, hw->irq, mtk_rtc_alarmirq, + 0, dev_name(&pdev->dev), hw); + if (ret) { + dev_err(&pdev->dev, "Can't request IRQ\n"); + goto err; + } + + mtk_rtc_hw_init(hw); + + device_init_wakeup(&pdev->dev, true); + + hw->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + &mtk_rtc_ops, THIS_MODULE); + if (IS_ERR(hw->rtc)) { + ret = PTR_ERR(hw->rtc); + dev_err(&pdev->dev, "Unable to register device\n"); + goto err; + } + + return 0; +err: + clk_disable_unprepare(hw->clk); + + return ret; +} + +static int mtk_rtc_remove(struct platform_device *pdev) +{ + struct mtk_rtc *hw = platform_get_drvdata(pdev); + + clk_disable_unprepare(hw->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mtk_rtc_suspend(struct device *dev) +{ + struct mtk_rtc *hw = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(hw->irq); + + return 0; +} + +static int mtk_rtc_resume(struct device *dev) +{ + struct mtk_rtc *hw = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(hw->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(mtk_rtc_pm_ops, mtk_rtc_suspend, mtk_rtc_resume); + +#define MTK_RTC_PM_OPS (&mtk_rtc_pm_ops) +#else /* CONFIG_PM */ +#define MTK_RTC_PM_OPS NULL +#endif /* CONFIG_PM */ + +static struct platform_driver mtk_rtc_driver = { + .probe = mtk_rtc_probe, + .remove = mtk_rtc_remove, + .driver = { + .name = MTK_RTC_DEV, + .of_match_table = mtk_rtc_match, + .pm = MTK_RTC_PM_OPS, + }, +}; + +module_platform_driver(mtk_rtc_driver); + +MODULE_DESCRIPTION("MediaTek SoC based RTC Driver"); +MODULE_AUTHOR("Sean Wang <[email protected]>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index 13f7cd11c07e..1d666ac9ef70 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -70,6 +70,10 @@ #define OMAP_RTC_COMP_MSB_REG 0x50 #define OMAP_RTC_OSC_REG 0x54 +#define OMAP_RTC_SCRATCH0_REG 0x60 +#define OMAP_RTC_SCRATCH1_REG 0x64 +#define OMAP_RTC_SCRATCH2_REG 0x68 + #define OMAP_RTC_KICK0_REG 0x6c #define OMAP_RTC_KICK1_REG 0x70 @@ -667,6 +671,45 @@ static struct pinctrl_desc rtc_pinctrl_desc = { .owner = THIS_MODULE, }; +static int omap_rtc_scratch_read(void *priv, unsigned int offset, void *_val, + size_t bytes) +{ + struct omap_rtc *rtc = priv; + u32 *val = _val; + int i; + + for (i = 0; i < bytes / 4; i++) + val[i] = rtc_readl(rtc, + OMAP_RTC_SCRATCH0_REG + offset + (i * 4)); + + return 0; +} + +static int omap_rtc_scratch_write(void *priv, unsigned int offset, void *_val, + size_t bytes) +{ + struct omap_rtc *rtc = priv; + u32 *val = _val; + int i; + + rtc->type->unlock(rtc); + for (i = 0; i < bytes / 4; i++) + rtc_writel(rtc, + OMAP_RTC_SCRATCH0_REG + offset + (i * 4), val[i]); + rtc->type->lock(rtc); + + return 0; +} + +static struct nvmem_config omap_rtc_nvmem_config = { + .name = "omap_rtc_scratch", + .word_size = 4, + .stride = 4, + .size = OMAP_RTC_KICK0_REG - OMAP_RTC_SCRATCH0_REG, + .reg_read = omap_rtc_scratch_read, + .reg_write = omap_rtc_scratch_write, +}; + static int omap_rtc_probe(struct platform_device *pdev) { struct omap_rtc *rtc; @@ -797,13 +840,16 @@ static int omap_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, true); - rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &omap_rtc_ops, THIS_MODULE); + rtc->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc->rtc)) { ret = PTR_ERR(rtc->rtc); goto err; } + rtc->rtc->ops = &omap_rtc_ops; + omap_rtc_nvmem_config.priv = rtc; + rtc->rtc->nvmem_config = &omap_rtc_nvmem_config; + /* handle periodic and alarm irqs */ ret = devm_request_irq(&pdev->dev, rtc->irq_timer, rtc_irq, 0, dev_name(&rtc->rtc->dev), rtc); @@ -830,9 +876,14 @@ static int omap_rtc_probe(struct platform_device *pdev) rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc); if (IS_ERR(rtc->pctldev)) { dev_err(&pdev->dev, "Couldn't register pinctrl driver\n"); - return PTR_ERR(rtc->pctldev); + ret = PTR_ERR(rtc->pctldev); + goto err; } + ret = rtc_register_device(rtc->rtc); + if (ret) + goto err; + return 0; err: diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c index 28c48b3c1946..c312af0db729 100644 --- a/drivers/rtc/rtc-pcf8523.c +++ b/drivers/rtc/rtc-pcf8523.c @@ -35,6 +35,9 @@ #define REG_MONTHS 0x08 #define REG_YEARS 0x09 +#define REG_OFFSET 0x0e +#define REG_OFFSET_MODE BIT(7) + struct pcf8523 { struct rtc_device *rtc; }; @@ -272,10 +275,47 @@ static int pcf8523_rtc_ioctl(struct device *dev, unsigned int cmd, #define pcf8523_rtc_ioctl NULL #endif +static int pcf8523_rtc_read_offset(struct device *dev, long *offset) +{ + struct i2c_client *client = to_i2c_client(dev); + int err; + u8 value; + s8 val; + + err = pcf8523_read(client, REG_OFFSET, &value); + if (err < 0) + return err; + + /* sign extend the 7-bit offset value */ + val = value << 1; + *offset = (value & REG_OFFSET_MODE ? 4069 : 4340) * (val >> 1); + + return 0; +} + +static int pcf8523_rtc_set_offset(struct device *dev, long offset) +{ + struct i2c_client *client = to_i2c_client(dev); + long reg_m0, reg_m1; + u8 value; + + reg_m0 = clamp(DIV_ROUND_CLOSEST(offset, 4340), -64L, 63L); + reg_m1 = clamp(DIV_ROUND_CLOSEST(offset, 4069), -64L, 63L); + + if (abs(reg_m0 * 4340 - offset) < abs(reg_m1 * 4069 - offset)) + value = reg_m0 & 0x7f; + else + value = (reg_m1 & 0x7f) | REG_OFFSET_MODE; + + return pcf8523_write(client, REG_OFFSET, value); +} + static const struct rtc_class_ops pcf8523_rtc_ops = { .read_time = pcf8523_rtc_read_time, .set_time = pcf8523_rtc_set_time, .ioctl = pcf8523_rtc_ioctl, + .read_offset = pcf8523_rtc_read_offset, + .set_offset = pcf8523_rtc_set_offset, }; static int pcf8523_probe(struct i2c_client *client, diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c new file mode 100644 index 000000000000..ea04e9f0930b --- /dev/null +++ b/drivers/rtc/rtc-pcf85363.c @@ -0,0 +1,220 @@ +/* + * drivers/rtc/rtc-pcf85363.c + * + * Driver for NXP PCF85363 real-time clock. + * + * Copyright (C) 2017 Eric Nelson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based loosely on rtc-8583 by Russell King, Wolfram Sang and Juergen Beisert + */ +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/slab.h> +#include <linux/rtc.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/bcd.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/regmap.h> + +/* + * Date/Time registers + */ +#define DT_100THS 0x00 +#define DT_SECS 0x01 +#define DT_MINUTES 0x02 +#define DT_HOURS 0x03 +#define DT_DAYS 0x04 +#define DT_WEEKDAYS 0x05 +#define DT_MONTHS 0x06 +#define DT_YEARS 0x07 + +/* + * Alarm registers + */ +#define DT_SECOND_ALM1 0x08 +#define DT_MINUTE_ALM1 0x09 +#define DT_HOUR_ALM1 0x0a +#define DT_DAY_ALM1 0x0b +#define DT_MONTH_ALM1 0x0c +#define DT_MINUTE_ALM2 0x0d +#define DT_HOUR_ALM2 0x0e +#define DT_WEEKDAY_ALM2 0x0f +#define DT_ALARM_EN 0x10 + +/* + * Time stamp registers + */ +#define DT_TIMESTAMP1 0x11 +#define DT_TIMESTAMP2 0x17 +#define DT_TIMESTAMP3 0x1d +#define DT_TS_MODE 0x23 + +/* + * control registers + */ +#define CTRL_OFFSET 0x24 +#define CTRL_OSCILLATOR 0x25 +#define CTRL_BATTERY 0x26 +#define CTRL_PIN_IO 0x27 +#define CTRL_FUNCTION 0x28 +#define CTRL_INTA_EN 0x29 +#define CTRL_INTB_EN 0x2a +#define CTRL_FLAGS 0x2b +#define CTRL_RAMBYTE 0x2c +#define CTRL_WDOG 0x2d +#define CTRL_STOP_EN 0x2e +#define CTRL_RESETS 0x2f +#define CTRL_RAM 0x40 + +#define NVRAM_SIZE 0x40 + +static struct i2c_driver pcf85363_driver; + +struct pcf85363 { + struct device *dev; + struct rtc_device *rtc; + struct nvmem_config nvmem_cfg; + struct regmap *regmap; +}; + +static int pcf85363_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct pcf85363 *pcf85363 = dev_get_drvdata(dev); + unsigned char buf[DT_YEARS + 1]; + int ret, len = sizeof(buf); + + /* read the RTC date and time registers all at once */ + ret = regmap_bulk_read(pcf85363->regmap, DT_100THS, buf, len); + if (ret) { + dev_err(dev, "%s: error %d\n", __func__, ret); + return ret; + } + + tm->tm_year = bcd2bin(buf[DT_YEARS]); + /* adjust for 1900 base of rtc_time */ + tm->tm_year += 100; + + tm->tm_wday = buf[DT_WEEKDAYS] & 7; + buf[DT_SECS] &= 0x7F; + tm->tm_sec = bcd2bin(buf[DT_SECS]); + buf[DT_MINUTES] &= 0x7F; + tm->tm_min = bcd2bin(buf[DT_MINUTES]); + tm->tm_hour = bcd2bin(buf[DT_HOURS]); + tm->tm_mday = bcd2bin(buf[DT_DAYS]); + tm->tm_mon = bcd2bin(buf[DT_MONTHS]) - 1; + + return 0; +} + +static int pcf85363_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct pcf85363 *pcf85363 = dev_get_drvdata(dev); + unsigned char buf[DT_YEARS + 1]; + int len = sizeof(buf); + + buf[DT_100THS] = 0; + buf[DT_SECS] = bin2bcd(tm->tm_sec); + buf[DT_MINUTES] = bin2bcd(tm->tm_min); + buf[DT_HOURS] = bin2bcd(tm->tm_hour); + buf[DT_DAYS] = bin2bcd(tm->tm_mday); + buf[DT_WEEKDAYS] = tm->tm_wday; + buf[DT_MONTHS] = bin2bcd(tm->tm_mon + 1); + buf[DT_YEARS] = bin2bcd(tm->tm_year % 100); + + return regmap_bulk_write(pcf85363->regmap, DT_100THS, + buf, len); +} + +static const struct rtc_class_ops rtc_ops = { + .read_time = pcf85363_rtc_read_time, + .set_time = pcf85363_rtc_set_time, +}; + +static int pcf85363_nvram_read(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct pcf85363 *pcf85363 = priv; + + return regmap_bulk_read(pcf85363->regmap, CTRL_RAM + offset, + val, bytes); +} + +static int pcf85363_nvram_write(void *priv, unsigned int offset, void *val, + size_t bytes) +{ + struct pcf85363 *pcf85363 = priv; + + return regmap_bulk_write(pcf85363->regmap, CTRL_RAM + offset, + val, bytes); +} + +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int pcf85363_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pcf85363 *pcf85363; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -ENODEV; + + pcf85363 = devm_kzalloc(&client->dev, sizeof(struct pcf85363), + GFP_KERNEL); + if (!pcf85363) + return -ENOMEM; + + pcf85363->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(pcf85363->regmap)) { + dev_err(&client->dev, "regmap allocation failed\n"); + return PTR_ERR(pcf85363->regmap); + } + + pcf85363->dev = &client->dev; + i2c_set_clientdata(client, pcf85363); + + pcf85363->rtc = devm_rtc_allocate_device(pcf85363->dev); + if (IS_ERR(pcf85363->rtc)) + return PTR_ERR(pcf85363->rtc); + + pcf85363->nvmem_cfg.name = "pcf85363-"; + pcf85363->nvmem_cfg.word_size = 1; + pcf85363->nvmem_cfg.stride = 1; + pcf85363->nvmem_cfg.size = NVRAM_SIZE; + pcf85363->nvmem_cfg.reg_read = pcf85363_nvram_read; + pcf85363->nvmem_cfg.reg_write = pcf85363_nvram_write; + pcf85363->nvmem_cfg.priv = pcf85363; + pcf85363->rtc->nvmem_config = &pcf85363->nvmem_cfg; + pcf85363->rtc->ops = &rtc_ops; + + return rtc_register_device(pcf85363->rtc); +} + +static const struct of_device_id dev_ids[] = { + { .compatible = "nxp,pcf85363" }, + {} +}; +MODULE_DEVICE_TABLE(of, dev_ids); + +static struct i2c_driver pcf85363_driver = { + .driver = { + .name = "pcf85363", + .of_match_table = of_match_ptr(dev_ids), + }, + .probe = pcf85363_probe, +}; + +module_i2c_driver(pcf85363_driver); + +MODULE_AUTHOR("Eric Nelson"); +MODULE_DESCRIPTION("pcf85363 I2C RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index cea6ea4df970..3efc86c25d27 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -387,7 +387,7 @@ static int pcf8563_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *tm) if (err) return err; - return pcf8563_set_alarm_mode(client, 1); + return pcf8563_set_alarm_mode(client, !!tm->enabled); } static int pcf8563_irq_enable(struct device *dev, unsigned int enabled) @@ -422,7 +422,7 @@ static unsigned long pcf8563_clkout_recalc_rate(struct clk_hw *hw, return 0; buf &= PCF8563_REG_CLKO_F_MASK; - return clkout_rates[ret]; + return clkout_rates[buf]; } static long pcf8563_clkout_round_rate(struct clk_hw *hw, unsigned long rate, diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index e1687e19c59f..82eb7da2c478 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -308,10 +308,9 @@ static int pl031_remove(struct amba_device *adev) dev_pm_clear_wake_irq(&adev->dev); device_init_wakeup(&adev->dev, false); - free_irq(adev->irq[0], ldata); + if (adev->irq[0]) + free_irq(adev->irq[0], ldata); rtc_device_unregister(ldata->rtc); - iounmap(ldata->base); - kfree(ldata); amba_release_regions(adev); return 0; @@ -322,25 +321,28 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) int ret; struct pl031_local *ldata; struct pl031_vendor_data *vendor = id->data; - struct rtc_class_ops *ops = &vendor->ops; + struct rtc_class_ops *ops; unsigned long time, data; ret = amba_request_regions(adev, NULL); if (ret) goto err_req; - ldata = kzalloc(sizeof(struct pl031_local), GFP_KERNEL); - if (!ldata) { + ldata = devm_kzalloc(&adev->dev, sizeof(struct pl031_local), + GFP_KERNEL); + ops = devm_kmemdup(&adev->dev, &vendor->ops, sizeof(vendor->ops), + GFP_KERNEL); + if (!ldata || !ops) { ret = -ENOMEM; goto out; } - ldata->vendor = vendor; - - ldata->base = ioremap(adev->res.start, resource_size(&adev->res)); + ldata->vendor = vendor; + ldata->base = devm_ioremap(&adev->dev, adev->res.start, + resource_size(&adev->res)); if (!ldata->base) { ret = -ENOMEM; - goto out_no_remap; + goto out; } amba_set_drvdata(adev, ldata); @@ -373,28 +375,32 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) } } + if (!adev->irq[0]) { + /* When there's no interrupt, no point in exposing the alarm */ + ops->read_alarm = NULL; + ops->set_alarm = NULL; + ops->alarm_irq_enable = NULL; + } + device_init_wakeup(&adev->dev, true); ldata->rtc = rtc_device_register("pl031", &adev->dev, ops, THIS_MODULE); if (IS_ERR(ldata->rtc)) { ret = PTR_ERR(ldata->rtc); - goto out_no_rtc; + goto out; } - if (request_irq(adev->irq[0], pl031_interrupt, - vendor->irqflags, "rtc-pl031", ldata)) { - ret = -EIO; - goto out_no_irq; + if (adev->irq[0]) { + ret = request_irq(adev->irq[0], pl031_interrupt, + vendor->irqflags, "rtc-pl031", ldata); + if (ret) + goto out_no_irq; + dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); } - dev_pm_set_wake_irq(&adev->dev, adev->irq[0]); return 0; out_no_irq: rtc_device_unregister(ldata->rtc); -out_no_rtc: - iounmap(ldata->base); -out_no_remap: - kfree(ldata); out: amba_release_regions(adev); err_req: @@ -446,7 +452,7 @@ static struct pl031_vendor_data stv2_pl031 = { .irqflags = IRQF_SHARED | IRQF_COND_SUSPEND, }; -static struct amba_id pl031_ids[] = { +static const struct amba_id pl031_ids[] = { { .id = 0x00041031, .mask = 0x000fffff, diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index aa09771de04f..3d6174eb32f6 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c @@ -282,13 +282,13 @@ static int rv3029_eeprom_read(struct device *dev, u8 reg, static int rv3029_eeprom_write(struct device *dev, u8 reg, u8 const buf[], size_t len) { - int ret, err; + int ret; size_t i; u8 tmp; - err = rv3029_eeprom_enter(dev); - if (err < 0) - return err; + ret = rv3029_eeprom_enter(dev); + if (ret < 0) + return ret; for (i = 0; i < len; i++, reg++) { ret = rv3029_read_regs(dev, reg, &tmp, 1); @@ -304,11 +304,11 @@ static int rv3029_eeprom_write(struct device *dev, u8 reg, break; } - err = rv3029_eeprom_exit(dev); - if (err < 0) - return err; + ret = rv3029_eeprom_exit(dev); + if (ret < 0) + return ret; - return ret; + return 0; } static int rv3029_eeprom_update_bits(struct device *dev, @@ -876,6 +876,8 @@ static const struct i2c_device_id rv3029_id[] = { MODULE_DEVICE_TABLE(i2c, rv3029_id); static const struct of_device_id rv3029_of_match[] = { + { .compatible = "microcrystal,rv3029" }, + /* Backward compatibility only, do not use compatibles below: */ { .compatible = "rv3029" }, { .compatible = "rv3029c2" }, { .compatible = "mc,rv3029c2" }, diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c index 1ed3403ff8ac..5c5938ab3d86 100644 --- a/drivers/rtc/rtc-rx8010.c +++ b/drivers/rtc/rtc-rx8010.c @@ -24,7 +24,6 @@ #define RX8010_MDAY 0x14 #define RX8010_MONTH 0x15 #define RX8010_YEAR 0x16 -#define RX8010_YEAR 0x16 #define RX8010_RESV17 0x17 #define RX8010_ALMIN 0x18 #define RX8010_ALHOUR 0x19 @@ -36,7 +35,7 @@ #define RX8010_CTRL 0x1F /* 0x20 to 0x2F are user registers */ #define RX8010_RESV30 0x30 -#define RX8010_RESV31 0x32 +#define RX8010_RESV31 0x31 #define RX8010_IRQ 0x32 #define RX8010_EXT_WADA BIT(3) @@ -248,7 +247,7 @@ static int rx8010_init_client(struct i2c_client *client) rx8010->ctrlreg = (ctrl[1] & ~RX8010_CTRL_TEST); - return err; + return 0; } static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t) @@ -277,7 +276,7 @@ static int rx8010_read_alarm(struct device *dev, struct rtc_wkalrm *t) t->enabled = !!(rx8010->ctrlreg & RX8010_CTRL_AIE); t->pending = (flagreg & RX8010_FLAG_AF) && t->enabled; - return err; + return 0; } static int rx8010_set_alarm(struct device *dev, struct rtc_wkalrm *t) diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c new file mode 100644 index 000000000000..d544d5268757 --- /dev/null +++ b/drivers/rtc/rtc-sc27xx.c @@ -0,0 +1,662 @@ +/* + * Copyright (C) 2017 Spreadtrum Communications Inc. + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include <linux/bitops.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/rtc.h> + +#define SPRD_RTC_SEC_CNT_VALUE 0x0 +#define SPRD_RTC_MIN_CNT_VALUE 0x4 +#define SPRD_RTC_HOUR_CNT_VALUE 0x8 +#define SPRD_RTC_DAY_CNT_VALUE 0xc +#define SPRD_RTC_SEC_CNT_UPD 0x10 +#define SPRD_RTC_MIN_CNT_UPD 0x14 +#define SPRD_RTC_HOUR_CNT_UPD 0x18 +#define SPRD_RTC_DAY_CNT_UPD 0x1c +#define SPRD_RTC_SEC_ALM_UPD 0x20 +#define SPRD_RTC_MIN_ALM_UPD 0x24 +#define SPRD_RTC_HOUR_ALM_UPD 0x28 +#define SPRD_RTC_DAY_ALM_UPD 0x2c +#define SPRD_RTC_INT_EN 0x30 +#define SPRD_RTC_INT_RAW_STS 0x34 +#define SPRD_RTC_INT_CLR 0x38 +#define SPRD_RTC_INT_MASK_STS 0x3C +#define SPRD_RTC_SEC_ALM_VALUE 0x40 +#define SPRD_RTC_MIN_ALM_VALUE 0x44 +#define SPRD_RTC_HOUR_ALM_VALUE 0x48 +#define SPRD_RTC_DAY_ALM_VALUE 0x4c +#define SPRD_RTC_SPG_VALUE 0x50 +#define SPRD_RTC_SPG_UPD 0x54 +#define SPRD_RTC_SEC_AUXALM_UPD 0x60 +#define SPRD_RTC_MIN_AUXALM_UPD 0x64 +#define SPRD_RTC_HOUR_AUXALM_UPD 0x68 +#define SPRD_RTC_DAY_AUXALM_UPD 0x6c + +/* BIT & MASK definition for SPRD_RTC_INT_* registers */ +#define SPRD_RTC_SEC_EN BIT(0) +#define SPRD_RTC_MIN_EN BIT(1) +#define SPRD_RTC_HOUR_EN BIT(2) +#define SPRD_RTC_DAY_EN BIT(3) +#define SPRD_RTC_ALARM_EN BIT(4) +#define SPRD_RTC_HRS_FORMAT_EN BIT(5) +#define SPRD_RTC_AUXALM_EN BIT(6) +#define SPRD_RTC_SPG_UPD_EN BIT(7) +#define SPRD_RTC_SEC_UPD_EN BIT(8) +#define SPRD_RTC_MIN_UPD_EN BIT(9) +#define SPRD_RTC_HOUR_UPD_EN BIT(10) +#define SPRD_RTC_DAY_UPD_EN BIT(11) +#define SPRD_RTC_ALMSEC_UPD_EN BIT(12) +#define SPRD_RTC_ALMMIN_UPD_EN BIT(13) +#define SPRD_RTC_ALMHOUR_UPD_EN BIT(14) +#define SPRD_RTC_ALMDAY_UPD_EN BIT(15) +#define SPRD_RTC_INT_MASK GENMASK(15, 0) + +#define SPRD_RTC_TIME_INT_MASK \ + (SPRD_RTC_SEC_UPD_EN | SPRD_RTC_MIN_UPD_EN | \ + SPRD_RTC_HOUR_UPD_EN | SPRD_RTC_DAY_UPD_EN) + +#define SPRD_RTC_ALMTIME_INT_MASK \ + (SPRD_RTC_ALMSEC_UPD_EN | SPRD_RTC_ALMMIN_UPD_EN | \ + SPRD_RTC_ALMHOUR_UPD_EN | SPRD_RTC_ALMDAY_UPD_EN) + +#define SPRD_RTC_ALM_INT_MASK \ + (SPRD_RTC_SEC_EN | SPRD_RTC_MIN_EN | \ + SPRD_RTC_HOUR_EN | SPRD_RTC_DAY_EN | \ + SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN) + +/* second/minute/hour/day values mask definition */ +#define SPRD_RTC_SEC_MASK GENMASK(5, 0) +#define SPRD_RTC_MIN_MASK GENMASK(5, 0) +#define SPRD_RTC_HOUR_MASK GENMASK(4, 0) +#define SPRD_RTC_DAY_MASK GENMASK(15, 0) + +/* alarm lock definition for SPRD_RTC_SPG_UPD register */ +#define SPRD_RTC_ALMLOCK_MASK GENMASK(7, 0) +#define SPRD_RTC_ALM_UNLOCK 0xa5 +#define SPRD_RTC_ALM_LOCK (~SPRD_RTC_ALM_UNLOCK & \ + SPRD_RTC_ALMLOCK_MASK) + +/* SPG values definition for SPRD_RTC_SPG_UPD register */ +#define SPRD_RTC_POWEROFF_ALM_FLAG BIT(8) +#define SPRD_RTC_POWER_RESET_FLAG BIT(9) + +/* timeout of synchronizing time and alarm registers (us) */ +#define SPRD_RTC_POLL_TIMEOUT 200000 +#define SPRD_RTC_POLL_DELAY_US 20000 + +struct sprd_rtc { + struct rtc_device *rtc; + struct regmap *regmap; + struct device *dev; + u32 base; + int irq; + bool valid; +}; + +/* + * The Spreadtrum RTC controller has 3 groups registers, including time, normal + * alarm and auxiliary alarm. The time group registers are used to set RTC time, + * the normal alarm registers are used to set normal alarm, and the auxiliary + * alarm registers are used to set auxiliary alarm. Both alarm event and + * auxiliary alarm event can wake up system from deep sleep, but only alarm + * event can power up system from power down status. + */ +enum sprd_rtc_reg_types { + SPRD_RTC_TIME, + SPRD_RTC_ALARM, + SPRD_RTC_AUX_ALARM, +}; + +static int sprd_rtc_clear_alarm_ints(struct sprd_rtc *rtc) +{ + return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR, + SPRD_RTC_ALM_INT_MASK); +} + +static int sprd_rtc_disable_ints(struct sprd_rtc *rtc) +{ + int ret; + + ret = regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, + SPRD_RTC_INT_MASK, 0); + if (ret) + return ret; + + return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR, + SPRD_RTC_INT_MASK); +} + +static int sprd_rtc_lock_alarm(struct sprd_rtc *rtc, bool lock) +{ + int ret; + u32 val; + + ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val); + if (ret) + return ret; + + val &= ~(SPRD_RTC_ALMLOCK_MASK | SPRD_RTC_POWEROFF_ALM_FLAG); + if (lock) + val |= SPRD_RTC_ALM_LOCK; + else + val |= SPRD_RTC_ALM_UNLOCK | SPRD_RTC_POWEROFF_ALM_FLAG; + + ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_SPG_UPD, val); + if (ret) + return ret; + + /* wait until the SPG value is updated successfully */ + ret = regmap_read_poll_timeout(rtc->regmap, + rtc->base + SPRD_RTC_INT_RAW_STS, val, + (val & SPRD_RTC_SPG_UPD_EN), + SPRD_RTC_POLL_DELAY_US, + SPRD_RTC_POLL_TIMEOUT); + if (ret) { + dev_err(rtc->dev, "failed to update SPG value:%d\n", ret); + return ret; + } + + return 0; +} + +static int sprd_rtc_get_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type, + time64_t *secs) +{ + u32 sec_reg, min_reg, hour_reg, day_reg; + u32 val, sec, min, hour, day; + int ret; + + switch (type) { + case SPRD_RTC_TIME: + sec_reg = SPRD_RTC_SEC_CNT_VALUE; + min_reg = SPRD_RTC_MIN_CNT_VALUE; + hour_reg = SPRD_RTC_HOUR_CNT_VALUE; + day_reg = SPRD_RTC_DAY_CNT_VALUE; + break; + case SPRD_RTC_ALARM: + sec_reg = SPRD_RTC_SEC_ALM_VALUE; + min_reg = SPRD_RTC_MIN_ALM_VALUE; + hour_reg = SPRD_RTC_HOUR_ALM_VALUE; + day_reg = SPRD_RTC_DAY_ALM_VALUE; + break; + case SPRD_RTC_AUX_ALARM: + sec_reg = SPRD_RTC_SEC_AUXALM_UPD; + min_reg = SPRD_RTC_MIN_AUXALM_UPD; + hour_reg = SPRD_RTC_HOUR_AUXALM_UPD; + day_reg = SPRD_RTC_DAY_AUXALM_UPD; + break; + default: + return -EINVAL; + } + + ret = regmap_read(rtc->regmap, rtc->base + sec_reg, &val); + if (ret) + return ret; + + sec = val & SPRD_RTC_SEC_MASK; + + ret = regmap_read(rtc->regmap, rtc->base + min_reg, &val); + if (ret) + return ret; + + min = val & SPRD_RTC_MIN_MASK; + + ret = regmap_read(rtc->regmap, rtc->base + hour_reg, &val); + if (ret) + return ret; + + hour = val & SPRD_RTC_HOUR_MASK; + + ret = regmap_read(rtc->regmap, rtc->base + day_reg, &val); + if (ret) + return ret; + + day = val & SPRD_RTC_DAY_MASK; + *secs = (((time64_t)(day * 24) + hour) * 60 + min) * 60 + sec; + return 0; +} + +static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type, + time64_t secs) +{ + u32 sec_reg, min_reg, hour_reg, day_reg, sts_mask; + u32 sec, min, hour, day, val; + int ret, rem; + + /* convert seconds to RTC time format */ + day = div_s64_rem(secs, 86400, &rem); + hour = rem / 3600; + rem -= hour * 3600; + min = rem / 60; + sec = rem - min * 60; + + switch (type) { + case SPRD_RTC_TIME: + sec_reg = SPRD_RTC_SEC_CNT_UPD; + min_reg = SPRD_RTC_MIN_CNT_UPD; + hour_reg = SPRD_RTC_HOUR_CNT_UPD; + day_reg = SPRD_RTC_DAY_CNT_UPD; + sts_mask = SPRD_RTC_TIME_INT_MASK; + break; + case SPRD_RTC_ALARM: + sec_reg = SPRD_RTC_SEC_ALM_UPD; + min_reg = SPRD_RTC_MIN_ALM_UPD; + hour_reg = SPRD_RTC_HOUR_ALM_UPD; + day_reg = SPRD_RTC_DAY_ALM_UPD; + sts_mask = SPRD_RTC_ALMTIME_INT_MASK; + break; + case SPRD_RTC_AUX_ALARM: + sec_reg = SPRD_RTC_SEC_AUXALM_UPD; + min_reg = SPRD_RTC_MIN_AUXALM_UPD; + hour_reg = SPRD_RTC_HOUR_AUXALM_UPD; + day_reg = SPRD_RTC_DAY_AUXALM_UPD; + sts_mask = 0; + break; + default: + return -EINVAL; + } + + ret = regmap_write(rtc->regmap, rtc->base + sec_reg, sec); + if (ret) + return ret; + + ret = regmap_write(rtc->regmap, rtc->base + min_reg, min); + if (ret) + return ret; + + ret = regmap_write(rtc->regmap, rtc->base + hour_reg, hour); + if (ret) + return ret; + + ret = regmap_write(rtc->regmap, rtc->base + day_reg, day); + if (ret) + return ret; + + if (type == SPRD_RTC_AUX_ALARM) + return 0; + + /* + * Since the time and normal alarm registers are put in always-power-on + * region supplied by VDDRTC, then these registers changing time will + * be very long, about 125ms. Thus here we should wait until all + * values are updated successfully. + */ + ret = regmap_read_poll_timeout(rtc->regmap, + rtc->base + SPRD_RTC_INT_RAW_STS, val, + ((val & sts_mask) == sts_mask), + SPRD_RTC_POLL_DELAY_US, + SPRD_RTC_POLL_TIMEOUT); + if (ret < 0) { + dev_err(rtc->dev, "set time/alarm values timeout\n"); + return ret; + } + + return regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR, + sts_mask); +} + +static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct sprd_rtc *rtc = dev_get_drvdata(dev); + time64_t secs; + u32 val; + int ret; + + ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs); + if (ret) + return ret; + + rtc_time64_to_tm(secs, &alrm->time); + + ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val); + if (ret) + return ret; + + alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN); + + ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val); + if (ret) + return ret; + + alrm->pending = !!(val & SPRD_RTC_AUXALM_EN); + return 0; +} + +static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct sprd_rtc *rtc = dev_get_drvdata(dev); + time64_t secs = rtc_tm_to_time64(&alrm->time); + int ret; + + /* clear the auxiliary alarm interrupt status */ + ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR, + SPRD_RTC_AUXALM_EN); + if (ret) + return ret; + + ret = sprd_rtc_set_secs(rtc, SPRD_RTC_AUX_ALARM, secs); + if (ret) + return ret; + + if (alrm->enabled) { + ret = regmap_update_bits(rtc->regmap, + rtc->base + SPRD_RTC_INT_EN, + SPRD_RTC_AUXALM_EN, + SPRD_RTC_AUXALM_EN); + } else { + ret = regmap_update_bits(rtc->regmap, + rtc->base + SPRD_RTC_INT_EN, + SPRD_RTC_AUXALM_EN, 0); + } + + return ret; +} + +static int sprd_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct sprd_rtc *rtc = dev_get_drvdata(dev); + time64_t secs; + int ret; + + if (!rtc->valid) { + dev_warn(dev, "RTC values are invalid\n"); + return -EINVAL; + } + + ret = sprd_rtc_get_secs(rtc, SPRD_RTC_TIME, &secs); + if (ret) + return ret; + + rtc_time64_to_tm(secs, tm); + return rtc_valid_tm(tm); +} + +static int sprd_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct sprd_rtc *rtc = dev_get_drvdata(dev); + time64_t secs = rtc_tm_to_time64(tm); + u32 val; + int ret; + + ret = sprd_rtc_set_secs(rtc, SPRD_RTC_TIME, secs); + if (ret) + return ret; + + if (!rtc->valid) { + /* + * Set SPRD_RTC_POWER_RESET_FLAG to indicate now RTC has valid + * time values. + */ + ret = regmap_update_bits(rtc->regmap, + rtc->base + SPRD_RTC_SPG_UPD, + SPRD_RTC_POWER_RESET_FLAG, + SPRD_RTC_POWER_RESET_FLAG); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(rtc->regmap, + rtc->base + SPRD_RTC_INT_RAW_STS, + val, (val & SPRD_RTC_SPG_UPD_EN), + SPRD_RTC_POLL_DELAY_US, + SPRD_RTC_POLL_TIMEOUT); + if (ret) { + dev_err(rtc->dev, "failed to update SPG value:%d\n", + ret); + return ret; + } + + rtc->valid = true; + } + + return 0; +} + +static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct sprd_rtc *rtc = dev_get_drvdata(dev); + time64_t secs; + int ret; + u32 val; + + /* + * If aie_timer is enabled, we should get the normal alarm time. + * Otherwise we should get auxiliary alarm time. + */ + if (rtc->rtc && rtc->rtc->aie_timer.enabled == 0) + return sprd_rtc_read_aux_alarm(dev, alrm); + + ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs); + if (ret) + return ret; + + rtc_time64_to_tm(secs, &alrm->time); + + ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val); + if (ret) + return ret; + + alrm->enabled = !!(val & SPRD_RTC_ALARM_EN); + + ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val); + if (ret) + return ret; + + alrm->pending = !!(val & SPRD_RTC_ALARM_EN); + return 0; +} + +static int sprd_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct sprd_rtc *rtc = dev_get_drvdata(dev); + time64_t secs = rtc_tm_to_time64(&alrm->time); + struct rtc_time aie_time = + rtc_ktime_to_tm(rtc->rtc->aie_timer.node.expires); + int ret; + + /* + * We have 2 groups alarms: normal alarm and auxiliary alarm. Since + * both normal alarm event and auxiliary alarm event can wake up system + * from deep sleep, but only alarm event can power up system from power + * down status. Moreover we do not need to poll about 125ms when + * updating auxiliary alarm registers. Thus we usually set auxiliary + * alarm when wake up system from deep sleep, and for other scenarios, + * we should set normal alarm with polling status. + * + * So here we check if the alarm time is set by aie_timer, if yes, we + * should set normal alarm, if not, we should set auxiliary alarm which + * means it is just a wake event. + */ + if (!rtc->rtc->aie_timer.enabled || rtc_tm_sub(&aie_time, &alrm->time)) + return sprd_rtc_set_aux_alarm(dev, alrm); + + /* clear the alarm interrupt status firstly */ + ret = regmap_write(rtc->regmap, rtc->base + SPRD_RTC_INT_CLR, + SPRD_RTC_ALARM_EN); + if (ret) + return ret; + + ret = sprd_rtc_set_secs(rtc, SPRD_RTC_ALARM, secs); + if (ret) + return ret; + + if (alrm->enabled) { + ret = regmap_update_bits(rtc->regmap, + rtc->base + SPRD_RTC_INT_EN, + SPRD_RTC_ALARM_EN, + SPRD_RTC_ALARM_EN); + if (ret) + return ret; + + /* unlock the alarm to enable the alarm function. */ + ret = sprd_rtc_lock_alarm(rtc, false); + } else { + regmap_update_bits(rtc->regmap, + rtc->base + SPRD_RTC_INT_EN, + SPRD_RTC_ALARM_EN, 0); + + /* + * Lock the alarm function in case fake alarm event will power + * up systems. + */ + ret = sprd_rtc_lock_alarm(rtc, true); + } + + return ret; +} + +static int sprd_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct sprd_rtc *rtc = dev_get_drvdata(dev); + int ret; + + if (enabled) { + ret = regmap_update_bits(rtc->regmap, + rtc->base + SPRD_RTC_INT_EN, + SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, + SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN); + if (ret) + return ret; + + ret = sprd_rtc_lock_alarm(rtc, false); + } else { + regmap_update_bits(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, + SPRD_RTC_ALARM_EN | SPRD_RTC_AUXALM_EN, 0); + + ret = sprd_rtc_lock_alarm(rtc, true); + } + + return ret; +} + +static const struct rtc_class_ops sprd_rtc_ops = { + .read_time = sprd_rtc_read_time, + .set_time = sprd_rtc_set_time, + .read_alarm = sprd_rtc_read_alarm, + .set_alarm = sprd_rtc_set_alarm, + .alarm_irq_enable = sprd_rtc_alarm_irq_enable, +}; + +static irqreturn_t sprd_rtc_handler(int irq, void *dev_id) +{ + struct sprd_rtc *rtc = dev_id; + int ret; + + ret = sprd_rtc_clear_alarm_ints(rtc); + if (ret) + return IRQ_RETVAL(ret); + + rtc_update_irq(rtc->rtc, 1, RTC_AF | RTC_IRQF); + return IRQ_HANDLED; +} + +static int sprd_rtc_check_power_down(struct sprd_rtc *rtc) +{ + u32 val; + int ret; + + ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_SPG_VALUE, &val); + if (ret) + return ret; + + /* + * If the SPRD_RTC_POWER_RESET_FLAG was not set, which means the RTC has + * been powered down, so the RTC time values are invalid. + */ + rtc->valid = (val & SPRD_RTC_POWER_RESET_FLAG) ? true : false; + return 0; +} + +static int sprd_rtc_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + struct sprd_rtc *rtc; + int ret; + + rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return -ENOMEM; + + rtc->regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!rtc->regmap) + return -ENODEV; + + ret = of_property_read_u32(node, "reg", &rtc->base); + if (ret) { + dev_err(&pdev->dev, "failed to get RTC base address\n"); + return ret; + } + + rtc->irq = platform_get_irq(pdev, 0); + if (rtc->irq < 0) { + dev_err(&pdev->dev, "failed to get RTC irq number\n"); + return rtc->irq; + } + + rtc->dev = &pdev->dev; + platform_set_drvdata(pdev, rtc); + + /* clear all RTC interrupts and disable all RTC interrupts */ + ret = sprd_rtc_disable_ints(rtc); + if (ret) { + dev_err(&pdev->dev, "failed to disable RTC interrupts\n"); + return ret; + } + + /* check if RTC time values are valid */ + ret = sprd_rtc_check_power_down(rtc); + if (ret) { + dev_err(&pdev->dev, "failed to check RTC time values\n"); + return ret; + } + + ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, + sprd_rtc_handler, + IRQF_ONESHOT | IRQF_EARLY_RESUME, + pdev->name, rtc); + if (ret < 0) { + dev_err(&pdev->dev, "failed to request RTC irq\n"); + return ret; + } + + rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, + &sprd_rtc_ops, THIS_MODULE); + if (IS_ERR(rtc->rtc)) + return PTR_ERR(rtc->rtc); + + device_init_wakeup(&pdev->dev, 1); + return 0; +} + +static int sprd_rtc_remove(struct platform_device *pdev) +{ + device_init_wakeup(&pdev->dev, 0); + return 0; +} + +static const struct of_device_id sprd_rtc_of_match[] = { + { .compatible = "sprd,sc2731-rtc", }, + { }, +}; +MODULE_DEVICE_TABLE(of, sprd_rtc_of_match); + +static struct platform_driver sprd_rtc_driver = { + .driver = { + .name = "sprd-rtc", + .of_match_table = sprd_rtc_of_match, + }, + .probe = sprd_rtc_probe, + .remove = sprd_rtc_remove, +}; +module_platform_driver(sprd_rtc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Spreadtrum RTC Device Driver"); +MODULE_AUTHOR("Baolin Wang <[email protected]>"); diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index e364550eb9a7..92ff2edb86a6 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c @@ -72,9 +72,10 @@ since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf) retval = rtc_read_time(to_rtc_device(dev), &tm); if (retval == 0) { - unsigned long time; - rtc_tm_to_time(&tm, &time); - retval = sprintf(buf, "%lu\n", time); + time64_t time; + + time = rtc_tm_to_time64(&tm); + retval = sprintf(buf, "%lld\n", time); } return retval; @@ -132,7 +133,7 @@ static ssize_t wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t retval; - unsigned long alarm; + time64_t alarm; struct rtc_wkalrm alm; /* Don't show disabled alarms. For uniformity, RTC alarms are @@ -145,8 +146,8 @@ wakealarm_show(struct device *dev, struct device_attribute *attr, char *buf) */ retval = rtc_read_alarm(to_rtc_device(dev), &alm); if (retval == 0 && alm.enabled) { - rtc_tm_to_time(&alm.time, &alarm); - retval = sprintf(buf, "%lu\n", alarm); + alarm = rtc_tm_to_time64(&alm.time); + retval = sprintf(buf, "%lld\n", alarm); } return retval; @@ -157,8 +158,8 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { ssize_t retval; - unsigned long now, alarm; - unsigned long push = 0; + time64_t now, alarm; + time64_t push = 0; struct rtc_wkalrm alm; struct rtc_device *rtc = to_rtc_device(dev); const char *buf_ptr; @@ -170,7 +171,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, retval = rtc_read_time(rtc, &alm.time); if (retval < 0) return retval; - rtc_tm_to_time(&alm.time, &now); + now = rtc_tm_to_time64(&alm.time); buf_ptr = buf; if (*buf_ptr == '+') { @@ -181,7 +182,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, } else adjust = 1; } - retval = kstrtoul(buf_ptr, 0, &alarm); + retval = kstrtos64(buf_ptr, 0, &alarm); if (retval) return retval; if (adjust) { @@ -197,7 +198,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, return retval; if (alm.enabled) { if (push) { - rtc_tm_to_time(&alm.time, &push); + push = rtc_tm_to_time64(&alm.time); alarm += push; } else return -EBUSY; @@ -212,7 +213,7 @@ wakealarm_store(struct device *dev, struct device_attribute *attr, */ alarm = now + 300; } - rtc_time_to_tm(alarm, &alm.time); + rtc_time64_to_tm(alarm, &alm.time); retval = rtc_set_alarm(rtc, &alm); return (retval < 0) ? retval : n; diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c index 65b432a096fe..0c34d3b81279 100644 --- a/drivers/rtc/rtc-xgene.c +++ b/drivers/rtc/rtc-xgene.c @@ -52,6 +52,7 @@ struct xgene_rtc_dev { void __iomem *csr_base; struct clk *clk; unsigned int irq_wake; + unsigned int irq_enabled; }; static int xgene_rtc_read_time(struct device *dev, struct rtc_time *tm) @@ -104,15 +105,19 @@ static int xgene_rtc_alarm_irq_enable(struct device *dev, u32 enabled) return 0; } +static int xgene_rtc_alarm_irq_enabled(struct device *dev) +{ + struct xgene_rtc_dev *pdata = dev_get_drvdata(dev); + + return readl(pdata->csr_base + RTC_CCR) & RTC_CCR_IE ? 1 : 0; +} + static int xgene_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct xgene_rtc_dev *pdata = dev_get_drvdata(dev); - unsigned long rtc_time; unsigned long alarm_time; - rtc_time = readl(pdata->csr_base + RTC_CCVR); rtc_tm_to_time(&alrm->time, &alarm_time); - pdata->alarm_time = alarm_time; writel((u32) pdata->alarm_time, pdata->csr_base + RTC_CMR); @@ -180,12 +185,18 @@ static int xgene_rtc_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Couldn't get the clock for RTC\n"); return -ENODEV; } - clk_prepare_enable(pdata->clk); + ret = clk_prepare_enable(pdata->clk); + if (ret) + return ret; /* Turn on the clock and the crystal */ writel(RTC_CCR_EN, pdata->csr_base + RTC_CCR); - device_init_wakeup(&pdev->dev, 1); + ret = device_init_wakeup(&pdev->dev, 1); + if (ret) { + clk_disable_unprepare(pdata->clk); + return ret; + } pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &xgene_rtc_ops, THIS_MODULE); @@ -210,45 +221,55 @@ static int xgene_rtc_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int xgene_rtc_suspend(struct device *dev) +static int __maybe_unused xgene_rtc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev); int irq; irq = platform_get_irq(pdev, 0); + + /* + * If this RTC alarm will be used for waking the system up, + * don't disable it of course. Else we just disable the alarm + * and await suspension. + */ if (device_may_wakeup(&pdev->dev)) { if (!enable_irq_wake(irq)) pdata->irq_wake = 1; } else { + pdata->irq_enabled = xgene_rtc_alarm_irq_enabled(dev); xgene_rtc_alarm_irq_enable(dev, 0); - clk_disable(pdata->clk); + clk_disable_unprepare(pdata->clk); } - return 0; } -static int xgene_rtc_resume(struct device *dev) +static int __maybe_unused xgene_rtc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct xgene_rtc_dev *pdata = platform_get_drvdata(pdev); int irq; + int rc; irq = platform_get_irq(pdev, 0); + if (device_may_wakeup(&pdev->dev)) { if (pdata->irq_wake) { disable_irq_wake(irq); pdata->irq_wake = 0; } } else { - clk_enable(pdata->clk); - xgene_rtc_alarm_irq_enable(dev, 1); + rc = clk_prepare_enable(pdata->clk); + if (rc) { + dev_err(dev, "Unable to enable clock error %d\n", rc); + return rc; + } + xgene_rtc_alarm_irq_enable(dev, pdata->irq_enabled); } return 0; } -#endif static SIMPLE_DEV_PM_OPS(xgene_rtc_pm_ops, xgene_rtc_suspend, xgene_rtc_resume); diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 5b6153f23f01..8e2f767147cb 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1084,24 +1084,35 @@ static int bnx2fc_abts_cleanup(struct bnx2fc_cmd *io_req) { struct bnx2fc_rport *tgt = io_req->tgt; int rc = SUCCESS; + unsigned int time_left; io_req->wait_for_comp = 1; bnx2fc_initiate_cleanup(io_req); spin_unlock_bh(&tgt->tgt_lock); - wait_for_completion(&io_req->tm_done); - + /* + * Can't wait forever on cleanup response lest we let the SCSI error + * handler wait forever + */ + time_left = wait_for_completion_timeout(&io_req->tm_done, + BNX2FC_FW_TIMEOUT); io_req->wait_for_comp = 0; + if (!time_left) + BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n", + __func__); + /* - * release the reference taken in eh_abort to allow the - * target to re-login after flushing IOs + * Release reference held by SCSI command the cleanup completion + * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and + * thus the SCSI command is not returnedi by bnx2fc_scsi_done(). */ kref_put(&io_req->refcount, bnx2fc_cmd_release); spin_lock_bh(&tgt->tgt_lock); return rc; } + /** * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding * SCSI command @@ -1118,6 +1129,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) struct fc_lport *lport; struct bnx2fc_rport *tgt; int rc; + unsigned int time_left; rc = fc_block_scsi_eh(sc_cmd); if (rc) @@ -1194,6 +1206,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); /* drop timer hold */ + /* + * We don't want to hold off the upper layer timer so simply + * cleanup the command and return that I/O was successfully + * aborted. + */ rc = bnx2fc_abts_cleanup(io_req); /* This only occurs when an task abort was requested while ABTS is in progress. Setting the IO_CLEANUP flag will skip the @@ -1201,7 +1218,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) was a result from the ABTS request rather than the CLEANUP request */ set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags); - goto out; + goto done; } /* Cancel the current timer running on this io_req */ @@ -1221,7 +1238,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) } spin_unlock_bh(&tgt->tgt_lock); - wait_for_completion(&io_req->tm_done); + /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */ + time_left = wait_for_completion_timeout(&io_req->tm_done, + (2 * rp->r_a_tov + 1) * HZ); + if (time_left) + BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done"); spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; @@ -1233,8 +1254,12 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); + /* + * Cleanup firmware residuals before returning control back + * to SCSI ML. + */ rc = bnx2fc_abts_cleanup(io_req); - goto out; + goto done; } else { /* * We come here even when there was a race condition @@ -1249,7 +1274,6 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) done: /* release the reference taken in eh_abort */ kref_put(&io_req->refcount, bnx2fc_cmd_release); -out: spin_unlock_bh(&tgt->tgt_lock); return rc; } diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index fe5a9ea27b5e..78d4aa8df675 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -22,7 +22,7 @@ struct scsi_dev_info_list { struct list_head dev_info_list; char vendor[8]; char model[16]; - unsigned flags; + blist_flags_t flags; unsigned compatible; /* for use with scsi_static_device_list entries */ }; @@ -35,7 +35,7 @@ struct scsi_dev_info_list_table { static const char spaces[] = " "; /* 16 of them */ -static unsigned scsi_default_dev_flags; +static blist_flags_t scsi_default_dev_flags; static LIST_HEAD(scsi_dev_info_list); static char scsi_dev_flags[256]; @@ -52,7 +52,7 @@ static struct { char *vendor; char *model; char *revision; /* revision known to be bad, unused */ - unsigned flags; + blist_flags_t flags; } scsi_static_device_list[] __initdata = { /* * The following devices are known not to tolerate a lun != 0 scan @@ -335,7 +335,7 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, * Returns: 0 OK, -error on failure. **/ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, - char *strflags, int flags) + char *strflags, blist_flags_t flags) { return scsi_dev_info_list_add_keyed(compatible, vendor, model, strflags, flags, @@ -361,7 +361,7 @@ static int scsi_dev_info_list_add(int compatible, char *vendor, char *model, * Returns: 0 OK, -error on failure. **/ int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, - char *strflags, int flags, int key) + char *strflags, blist_flags_t flags, int key) { struct scsi_dev_info_list *devinfo; struct scsi_dev_info_list_table *devinfo_table = @@ -571,9 +571,9 @@ static int scsi_dev_info_list_add_str(char *dev_list) * matching flags value, else return the host or global default * settings. Called during scan time. **/ -int scsi_get_device_flags(struct scsi_device *sdev, - const unsigned char *vendor, - const unsigned char *model) +blist_flags_t scsi_get_device_flags(struct scsi_device *sdev, + const unsigned char *vendor, + const unsigned char *model) { return scsi_get_device_flags_keyed(sdev, vendor, model, SCSI_DEVINFO_GLOBAL); @@ -593,7 +593,7 @@ int scsi_get_device_flags(struct scsi_device *sdev, * flags value, else return the host or global default settings. * Called during scan time. **/ -int scsi_get_device_flags_keyed(struct scsi_device *sdev, +blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev, const unsigned char *vendor, const unsigned char *model, int key) diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index df1368aea9a3..a5946cd64caa 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -50,15 +50,16 @@ enum { SCSI_DEVINFO_SPI, }; -extern int scsi_get_device_flags(struct scsi_device *sdev, - const unsigned char *vendor, - const unsigned char *model); -extern int scsi_get_device_flags_keyed(struct scsi_device *sdev, - const unsigned char *vendor, - const unsigned char *model, int key); +extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev, + const unsigned char *vendor, + const unsigned char *model); +extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev, + const unsigned char *vendor, + const unsigned char *model, + int key); extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model, char *strflags, - int flags, int key); + blist_flags_t flags, int key); extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key); extern int scsi_dev_info_add_list(int key, const char *name); extern int scsi_dev_info_remove_list(int key); diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index a0f2a20ea9e9..be5e919db0e8 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -566,7 +566,7 @@ EXPORT_SYMBOL(scsi_sanitize_inquiry_string); * are copied to the scsi_device any flags value is stored in *@bflags. **/ static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result, - int result_len, int *bflags) + int result_len, blist_flags_t *bflags) { unsigned char scsi_cmd[MAX_COMMAND_SIZE]; int first_inquiry_len, try_inquiry_len, next_inquiry_len; diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index 5e58f5ec0a28..2f615b7f1c9f 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -905,16 +905,6 @@ config FB_LEO This is the frame buffer device driver for the SBUS-based Sun ZX (leo) frame buffer cards. -config FB_IGA - bool "IGA 168x display support" - depends on (FB = y) && SPARC32 - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT - help - This is the framebuffer device for the INTERGRAPHICS 1680 and - successor frame buffer cards. - config FB_XVR500 bool "Sun XVR-500 3DLABS Wildcat support" depends on (FB = y) && PCI && SPARC64 diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 8895536a20d6..115961e0721b 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -65,7 +65,6 @@ obj-$(CONFIG_FB_HGA) += hgafb.o obj-$(CONFIG_FB_XVR500) += sunxvr500.o obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o -obj-$(CONFIG_FB_IGA) += igafb.o obj-$(CONFIG_FB_APOLLO) += dnfb.o obj-$(CONFIG_FB_Q40) += q40fb.o obj-$(CONFIG_FB_TGA) += tgafb.o diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 3ec72f19114b..a9a8272f7a6e 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -2272,10 +2272,10 @@ static void aty_bl_exit(struct backlight_device *bd) static void aty_calc_mem_refresh(struct atyfb_par *par, int xclk) { - const int ragepro_tbl[] = { + static const int ragepro_tbl[] = { 44, 50, 55, 66, 75, 80, 100 }; - const int ragexl_tbl[] = { + static const int ragexl_tbl[] = { 50, 66, 75, 83, 90, 95, 100, 105, 110, 115, 120, 125, 133, 143, 166 }; diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 1e2ec360f8c1..4d77daeecf99 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -1454,9 +1454,9 @@ static void radeon_write_pll_regs(struct radeonfb_info *rinfo, struct radeon_reg /* * Timer function for delayed LVDS panel power up/down */ -static void radeon_lvds_timer_func(unsigned long data) +static void radeon_lvds_timer_func(struct timer_list *t) { - struct radeonfb_info *rinfo = (struct radeonfb_info *)data; + struct radeonfb_info *rinfo = from_timer(rinfo, t, lvds_timer); radeon_engine_idle(); @@ -1534,7 +1534,7 @@ void radeon_write_mode (struct radeonfb_info *rinfo, struct radeon_regs *mode, static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs *regs, unsigned long freq) { - const struct { + static const struct { int divider; int bitvalue; } *post_div, @@ -2291,9 +2291,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev, rinfo->pdev = pdev; spin_lock_init(&rinfo->reg_lock); - init_timer(&rinfo->lvds_timer); - rinfo->lvds_timer.function = radeon_lvds_timer_func; - rinfo->lvds_timer.data = (unsigned long)rinfo; + timer_setup(&rinfo->lvds_timer, radeon_lvds_timer_func, 0); c1 = ent->device >> 8; c2 = ent->device & 0xff; diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c index f7c253dd5899..7137c12cbcee 100644 --- a/drivers/video/fbdev/aty/radeon_pm.c +++ b/drivers/video/fbdev/aty/radeon_pm.c @@ -1208,9 +1208,11 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo) case 1: if (mc & 0x4) break; + /* fall through */ case 2: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKB_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKB_RESET; + /* fall through */ case 0: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKA_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKA_RESET; @@ -1219,6 +1221,7 @@ static void radeon_pm_enable_dll_m10(struct radeonfb_info *rinfo) case 1: if (!(mc & 0x4)) break; + /* fall through */ case 2: dll_sleep_mask |= MDLL_R300_RDCK__MRDCKD_SLEEP; dll_reset_mask |= MDLL_R300_RDCK__MRDCKD_RESET; diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c index 5f04b4096c42..87d5a62bf6ca 100644 --- a/drivers/video/fbdev/au1200fb.c +++ b/drivers/video/fbdev/au1200fb.c @@ -1518,7 +1518,7 @@ static irqreturn_t au1200fb_handle_irq(int irq, void* dev_id) static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev) { struct fb_info *fbi = fbdev->fb_info; - int bpp; + int bpp, ret; fbi->fbops = &au1200fb_fb_ops; @@ -1546,15 +1546,14 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev) } fbi->pseudo_palette = kcalloc(16, sizeof(u32), GFP_KERNEL); - if (!fbi->pseudo_palette) { + if (!fbi->pseudo_palette) return -ENOMEM; - } - if (fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { + ret = fb_alloc_cmap(&fbi->cmap, AU1200_LCD_NBR_PALETTE_ENTRIES, 0); + if (ret < 0) { print_err("Fail to allocate colormap (%d entries)", - AU1200_LCD_NBR_PALETTE_ENTRIES); - kfree(fbi->pseudo_palette); - return -EFAULT; + AU1200_LCD_NBR_PALETTE_ENTRIES); + return ret; } strncpy(fbi->fix.id, "AU1200", sizeof(fbi->fix.id)); @@ -1668,10 +1667,6 @@ static int au1200fb_drv_probe(struct platform_device *dev) printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name); printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name); - /* shut gcc up */ - ret = 0; - fbdev = NULL; - for (plane = 0; plane < device_count; ++plane) { bpp = winbpp(win->w[plane].mode_winctrl1); if (win->w[plane].xres == 0) @@ -1681,8 +1676,10 @@ static int au1200fb_drv_probe(struct platform_device *dev) fbi = framebuffer_alloc(sizeof(struct au1200fb_device), &dev->dev); - if (!fbi) + if (!fbi) { + ret = -ENOMEM; goto failed; + } _au1200fb_infos[plane] = fbi; fbdev = fbi->par; @@ -1701,7 +1698,8 @@ static int au1200fb_drv_probe(struct platform_device *dev) if (!fbdev->fb_mem) { print_err("fail to allocate frambuffer (size: %dK))", fbdev->fb_len / 1024); - return -ENOMEM; + ret = -ENOMEM; + goto failed; } /* @@ -1718,7 +1716,8 @@ static int au1200fb_drv_probe(struct platform_device *dev) print_dbg("phys=0x%08x, size=%dK", fbdev->fb_phys, fbdev->fb_len / 1024); /* Init FB data */ - if ((ret = au1200fb_init_fbinfo(fbdev)) < 0) + ret = au1200fb_init_fbinfo(fbdev); + if (ret < 0) goto failed; /* Register new framebuffer */ @@ -1758,21 +1757,26 @@ static int au1200fb_drv_probe(struct platform_device *dev) return 0; failed: - /* NOTE: This only does the current plane/window that failed; others are still active */ - if (fbi) { + for (plane = 0; plane < device_count; ++plane) { + fbi = _au1200fb_infos[plane]; + if (!fbi) + break; + + /* Clean up all probe data */ + unregister_framebuffer(fbi); if (fbi->cmap.len != 0) fb_dealloc_cmap(&fbi->cmap); kfree(fbi->pseudo_palette); + + framebuffer_release(fbi); + _au1200fb_infos[plane] = NULL; } - if (plane == 0) - free_irq(AU1200_LCD_INT, (void*)dev); return ret; } static int au1200fb_drv_remove(struct platform_device *dev) { struct au1200fb_platdata *pd = platform_get_drvdata(dev); - struct au1200fb_device *fbdev; struct fb_info *fbi; int plane; @@ -1781,7 +1785,6 @@ static int au1200fb_drv_remove(struct platform_device *dev) for (plane = 0; plane < device_count; ++plane) { fbi = _au1200fb_infos[plane]; - fbdev = fbi->par; /* Clean up all probe data */ unregister_framebuffer(fbi); diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c index d992aa5eb3f0..b3be06dd2908 100644 --- a/drivers/video/fbdev/cirrusfb.c +++ b/drivers/video/fbdev/cirrusfb.c @@ -1477,10 +1477,12 @@ static void init_vgachip(struct fb_info *info) mdelay(100); /* mode */ vga_wgfx(cinfo->regbase, CL_GR31, 0x00); - case BT_GD5480: /* fall through */ + /* fall through */ + case BT_GD5480: /* from Klaus' NetBSD driver: */ vga_wgfx(cinfo->regbase, CL_GR2F, 0x00); - case BT_ALPINE: /* fall through */ + /* fall through */ + case BT_ALPINE: /* put blitter into 542x compat */ vga_wgfx(cinfo->regbase, CL_GR33, 0x00); break; diff --git a/drivers/video/fbdev/controlfb.h b/drivers/video/fbdev/controlfb.h index 6026c60fc100..261522fabdac 100644 --- a/drivers/video/fbdev/controlfb.h +++ b/drivers/video/fbdev/controlfb.h @@ -141,5 +141,7 @@ static struct max_cmodes control_mac_modes[] = { {{ 1, 2}}, /* 1152x870, 75Hz */ {{ 0, 1}}, /* 1280x960, 75Hz */ {{ 0, 1}}, /* 1280x1024, 75Hz */ + {{ 1, 2}}, /* 1152x768, 60Hz */ + {{ 0, 1}}, /* 1600x1024, 60Hz */ }; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 04612f938bab..929ca472c524 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -395,10 +395,10 @@ static void fb_flashcursor(struct work_struct *work) console_unlock(); } -static void cursor_timer_handler(unsigned long dev_addr) +static void cursor_timer_handler(struct timer_list *t) { - struct fb_info *info = (struct fb_info *) dev_addr; - struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_ops *ops = from_timer(ops, t, cursor_timer); + struct fb_info *info = ops->info; queue_work(system_power_efficient_wq, &info->queue); mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies); @@ -414,8 +414,7 @@ static void fbcon_add_cursor_timer(struct fb_info *info) if (!info->queue.func) INIT_WORK(&info->queue, fb_flashcursor); - setup_timer(&ops->cursor_timer, cursor_timer_handler, - (unsigned long) info); + timer_setup(&ops->cursor_timer, cursor_timer_handler, 0); mod_timer(&ops->cursor_timer, jiffies + ops->cur_blink_jiffies); ops->flags |= FBCON_FLAGS_CURSOR_TIMER; } @@ -714,6 +713,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info, if (!err) { ops->cur_blink_jiffies = HZ / 5; + ops->info = info; info->fbcon_par = ops; if (vc) @@ -962,6 +962,7 @@ static const char *fbcon_startup(void) ops->graphics = 1; ops->cur_rotate = -1; ops->cur_blink_jiffies = HZ / 5; + ops->info = info; info->fbcon_par = ops; if (initial_rotation != -1) p->con_rotate = initial_rotation; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 18f3ac144237..9f7744fbc962 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -69,6 +69,7 @@ struct fbcon_ops { struct timer_list cursor_timer; /* Cursor timer */ struct fb_cursor cursor_state; struct display *p; + struct fb_info *info; int currcon; /* Current VC. */ int cur_blink_jiffies; int cursor_flash; diff --git a/drivers/video/fbdev/dnfb.c b/drivers/video/fbdev/dnfb.c index 7b1492d34e98..5505fa00c634 100644 --- a/drivers/video/fbdev/dnfb.c +++ b/drivers/video/fbdev/dnfb.c @@ -115,7 +115,7 @@ static struct fb_ops dn_fb_ops = { .fb_imageblit = cfb_imageblit, }; -struct fb_var_screeninfo dnfb_var = { +static const struct fb_var_screeninfo dnfb_var = { .xres = 1280, .yres = 1024, .xres_virtual = 2048, @@ -242,16 +242,13 @@ static int dnfb_probe(struct platform_device *dev) info->screen_base = (u_char *) info->fix.smem_start; err = fb_alloc_cmap(&info->cmap, 2, 0); - if (err < 0) { - framebuffer_release(info); - return err; - } + if (err < 0) + goto release_framebuffer; err = register_framebuffer(info); if (err < 0) { fb_dealloc_cmap(&info->cmap); - framebuffer_release(info); - return err; + goto release_framebuffer; } platform_set_drvdata(dev, info); @@ -265,6 +262,10 @@ static int dnfb_probe(struct platform_device *dev) printk("apollo frame buffer alive and kicking !\n"); return err; + +release_framebuffer: + framebuffer_release(info); + return err; } static struct platform_driver dnfb_driver = { diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 7f6c9e6cfc6c..3b70044773b6 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -304,12 +304,18 @@ static int goldfish_fb_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id goldfish_fb_of_match[] = { + { .compatible = "google,goldfish-fb", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_fb_of_match); static struct platform_driver goldfish_fb_driver = { .probe = goldfish_fb_probe, .remove = goldfish_fb_remove, .driver = { - .name = "goldfish_fb" + .name = "goldfish_fb", + .of_match_table = goldfish_fb_of_match, } }; diff --git a/drivers/video/fbdev/igafb.c b/drivers/video/fbdev/igafb.c deleted file mode 100644 index 486f18897414..000000000000 --- a/drivers/video/fbdev/igafb.c +++ /dev/null @@ -1,579 +0,0 @@ -/* - * linux/drivers/video/igafb.c -- Frame buffer device for IGA 1682 - * - * Copyright (C) 1998 Vladimir Roganov and Gleb Raiko - * - * This driver is partly based on the Frame buffer device for ATI Mach64 - * and partially on VESA-related code. - * - * Copyright (C) 1997-1998 Geert Uytterhoeven - * Copyright (C) 1998 Bernd Harries - * Copyright (C) 1998 Eddie C. Dost ([email protected]) - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file COPYING in the main directory of this archive for - * more details. - */ - -/****************************************************************************** - - TODO: - Despite of IGA Card has advanced graphic acceleration, - initial version is almost dummy and does not support it. - Support for video modes and acceleration must be added - together with accelerated X-Windows driver implementation. - - Most important thing at this moment is that we have working - JavaEngine1 console & X with new console interface. - -******************************************************************************/ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/mm.h> -#include <linux/slab.h> -#include <linux/vmalloc.h> -#include <linux/delay.h> -#include <linux/interrupt.h> -#include <linux/fb.h> -#include <linux/init.h> -#include <linux/pci.h> -#include <linux/nvram.h> - -#include <asm/io.h> - -#ifdef CONFIG_SPARC -#include <asm/prom.h> -#include <asm/pcic.h> -#endif - -#include <video/iga.h> - -struct pci_mmap_map { - unsigned long voff; - unsigned long poff; - unsigned long size; - unsigned long prot_flag; - unsigned long prot_mask; -}; - -struct iga_par { - struct pci_mmap_map *mmap_map; - unsigned long frame_buffer_phys; - unsigned long io_base; -}; - -struct fb_info fb_info; - -struct fb_fix_screeninfo igafb_fix __initdata = { - .id = "IGA 1682", - .type = FB_TYPE_PACKED_PIXELS, - .mmio_len = 1000 -}; - -struct fb_var_screeninfo default_var = { - /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ - .xres = 640, - .yres = 480, - .xres_virtual = 640, - .yres_virtual = 480, - .bits_per_pixel = 8, - .red = {0, 8, 0 }, - .green = {0, 8, 0 }, - .blue = {0, 8, 0 }, - .height = -1, - .width = -1, - .accel_flags = FB_ACCEL_NONE, - .pixclock = 39722, - .left_margin = 48, - .right_margin = 16, - .upper_margin = 33, - .lower_margin = 10, - .hsync_len = 96, - .vsync_len = 2, - .vmode = FB_VMODE_NONINTERLACED -}; - -#ifdef CONFIG_SPARC -struct fb_var_screeninfo default_var_1024x768 __initdata = { - /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ - .xres = 1024, - .yres = 768, - .xres_virtual = 1024, - .yres_virtual = 768, - .bits_per_pixel = 8, - .red = {0, 8, 0 }, - .green = {0, 8, 0 }, - .blue = {0, 8, 0 }, - .height = -1, - .width = -1, - .accel_flags = FB_ACCEL_NONE, - .pixclock = 12699, - .left_margin = 176, - .right_margin = 16, - .upper_margin = 28, - .lower_margin = 1, - .hsync_len = 96, - .vsync_len = 3, - .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED -}; - -struct fb_var_screeninfo default_var_1152x900 __initdata = { - /* 1152x900, 76 Hz, Non-Interlaced (110.0 MHz dotclock) */ - .xres = 1152, - .yres = 900, - .xres_virtual = 1152, - .yres_virtual = 900, - .bits_per_pixel = 8, - .red = { 0, 8, 0 }, - .green = { 0, 8, 0 }, - .blue = { 0, 8, 0 }, - .height = -1, - .width = -1, - .accel_flags = FB_ACCEL_NONE, - .pixclock = 9091, - .left_margin = 234, - .right_margin = 24, - .upper_margin = 34, - .lower_margin = 3, - .hsync_len = 100, - .vsync_len = 3, - .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED -}; - -struct fb_var_screeninfo default_var_1280x1024 __initdata = { - /* 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock) */ - .xres = 1280, - .yres = 1024, - .xres_virtual = 1280, - .yres_virtual = 1024, - .bits_per_pixel = 8, - .red = {0, 8, 0 }, - .green = {0, 8, 0 }, - .blue = {0, 8, 0 }, - .height = -1, - .width = -1, - .accel_flags = 0, - .pixclock = 7408, - .left_margin = 248, - .right_margin = 16, - .upper_margin = 38, - .lower_margin = 1, - .hsync_len = 144, - .vsync_len = 3, - .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED -}; - -/* - * Memory-mapped I/O functions for Sparc PCI - * - * On sparc we happen to access I/O with memory mapped functions too. - */ -#define pci_inb(par, reg) readb(par->io_base+(reg)) -#define pci_outb(par, val, reg) writeb(val, par->io_base+(reg)) - -static inline unsigned int iga_inb(struct iga_par *par, unsigned int reg, - unsigned int idx) -{ - pci_outb(par, idx, reg); - return pci_inb(par, reg + 1); -} - -static inline void iga_outb(struct iga_par *par, unsigned char val, - unsigned int reg, unsigned int idx ) -{ - pci_outb(par, idx, reg); - pci_outb(par, val, reg+1); -} - -#endif /* CONFIG_SPARC */ - -/* - * Very important functionality for the JavaEngine1 computer: - * make screen border black (usign special IGA registers) - */ -static void iga_blank_border(struct iga_par *par) -{ - int i; -#if 0 - /* - * PROM does this for us, so keep this code as a reminder - * about required read from 0x3DA and writing of 0x20 in the end. - */ - (void) pci_inb(par, 0x3DA); /* required for every access */ - pci_outb(par, IGA_IDX_VGA_OVERSCAN, IGA_ATTR_CTL); - (void) pci_inb(par, IGA_ATTR_CTL+1); - pci_outb(par, 0x38, IGA_ATTR_CTL); - pci_outb(par, 0x20, IGA_ATTR_CTL); /* re-enable visual */ -#endif - /* - * This does not work as it was designed because the overscan - * color is looked up in the palette. Therefore, under X11 - * overscan changes color. - */ - for (i=0; i < 3; i++) - iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); -} - -#ifdef CONFIG_SPARC -static int igafb_mmap(struct fb_info *info, - struct vm_area_struct *vma) -{ - struct iga_par *par = (struct iga_par *)info->par; - unsigned int size, page, map_size = 0; - unsigned long map_offset = 0; - int i; - - if (!par->mmap_map) - return -ENXIO; - - size = vma->vm_end - vma->vm_start; - - /* Each page, see which map applies */ - for (page = 0; page < size; ) { - map_size = 0; - for (i = 0; par->mmap_map[i].size; i++) { - unsigned long start = par->mmap_map[i].voff; - unsigned long end = start + par->mmap_map[i].size; - unsigned long offset = (vma->vm_pgoff << PAGE_SHIFT) + page; - - if (start > offset) - continue; - if (offset >= end) - continue; - - map_size = par->mmap_map[i].size - (offset - start); - map_offset = par->mmap_map[i].poff + (offset - start); - break; - } - if (!map_size) { - page += PAGE_SIZE; - continue; - } - if (page + map_size > size) - map_size = size - page; - - pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask); - pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag; - - if (remap_pfn_range(vma, vma->vm_start + page, - map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot)) - return -EAGAIN; - - page += map_size; - } - - if (!map_size) - return -EINVAL; - - vma->vm_flags |= VM_IO; - return 0; -} -#endif /* CONFIG_SPARC */ - -static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, - unsigned blue, unsigned transp, - struct fb_info *info) -{ - /* - * Set a single color register. The values supplied are - * already rounded down to the hardware's capabilities - * (according to the entries in the `var' structure). Return - * != 0 for invalid regno. - */ - struct iga_par *par = (struct iga_par *)info->par; - - if (regno >= info->cmap.len) - return 1; - - pci_outb(par, regno, DAC_W_INDEX); - pci_outb(par, red, DAC_DATA); - pci_outb(par, green, DAC_DATA); - pci_outb(par, blue, DAC_DATA); - - if (regno < 16) { - switch (info->var.bits_per_pixel) { - case 16: - ((u16*)(info->pseudo_palette))[regno] = - (regno << 10) | (regno << 5) | regno; - break; - case 24: - ((u32*)(info->pseudo_palette))[regno] = - (regno << 16) | (regno << 8) | regno; - break; - case 32: - { int i; - i = (regno << 8) | regno; - ((u32*)(info->pseudo_palette))[regno] = (i << 16) | i; - } - break; - } - } - return 0; -} - -/* - * Framebuffer option structure - */ -static struct fb_ops igafb_ops = { - .owner = THIS_MODULE, - .fb_setcolreg = igafb_setcolreg, - .fb_fillrect = cfb_fillrect, - .fb_copyarea = cfb_copyarea, - .fb_imageblit = cfb_imageblit, -#ifdef CONFIG_SPARC - .fb_mmap = igafb_mmap, -#endif -}; - -static int __init iga_init(struct fb_info *info, struct iga_par *par) -{ - char vramsz = iga_inb(par, IGA_EXT_CNTRL, IGA_IDX_EXT_BUS_CNTL) - & MEM_SIZE_ALIAS; - int video_cmap_len; - - switch (vramsz) { - case MEM_SIZE_1M: - info->fix.smem_len = 0x100000; - break; - case MEM_SIZE_2M: - info->fix.smem_len = 0x200000; - break; - case MEM_SIZE_4M: - case MEM_SIZE_RESERVED: - info->fix.smem_len = 0x400000; - break; - } - - if (info->var.bits_per_pixel > 8) - video_cmap_len = 16; - else - video_cmap_len = 256; - - info->fbops = &igafb_ops; - info->flags = FBINFO_DEFAULT; - - fb_alloc_cmap(&info->cmap, video_cmap_len, 0); - - if (register_framebuffer(info) < 0) - return 0; - - fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n", - info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20); - - iga_blank_border(par); - return 1; -} - -static int __init igafb_init(void) -{ - struct fb_info *info; - struct pci_dev *pdev; - struct iga_par *par; - unsigned long addr; - int size, iga2000 = 0; - - if (fb_get_options("igafb", NULL)) - return -ENODEV; - - pdev = pci_get_device(PCI_VENDOR_ID_INTERG, - PCI_DEVICE_ID_INTERG_1682, 0); - if (pdev == NULL) { - /* - * XXX We tried to use cyber2000fb.c for IGS 2000. - * But it does not initialize the chip in JavaStation-E, alas. - */ - pdev = pci_get_device(PCI_VENDOR_ID_INTERG, 0x2000, 0); - if(pdev == NULL) { - return -ENXIO; - } - iga2000 = 1; - } - /* We leak a reference here but as it cannot be unloaded this is - fine. If you write unload code remember to free it in unload */ - - size = sizeof(struct iga_par) + sizeof(u32)*16; - - info = framebuffer_alloc(size, &pdev->dev); - if (!info) { - printk("igafb_init: can't alloc fb_info\n"); - pci_dev_put(pdev); - return -ENOMEM; - } - - par = info->par; - - if ((addr = pdev->resource[0].start) == 0) { - printk("igafb_init: no memory start\n"); - kfree(info); - pci_dev_put(pdev); - return -ENXIO; - } - - if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) { - printk("igafb_init: can't remap %lx[2M]\n", addr); - kfree(info); - pci_dev_put(pdev); - return -ENXIO; - } - - par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; - -#ifdef CONFIG_SPARC - /* - * The following is sparc specific and this is why: - * - * IGS2000 has its I/O memory mapped and we want - * to generate memory cycles on PCI, e.g. do ioremap(), - * then readb/writeb() as in Documentation/io-mapping.txt. - * - * IGS1682 is more traditional, it responds to PCI I/O - * cycles, so we want to access it with inb()/outb(). - * - * On sparc, PCIC converts CPU memory access within - * phys window 0x3000xxxx into PCI I/O cycles. Therefore - * we may use readb/writeb to access them with IGS1682. - * - * We do not take io_base_phys from resource[n].start - * on IGS1682 because that chip is BROKEN. It does not - * have a base register for I/O. We just "know" what its - * I/O addresses are. - */ - if (iga2000) { - igafb_fix.mmio_start = par->frame_buffer_phys | 0x00800000; - } else { - igafb_fix.mmio_start = 0x30000000; /* XXX */ - } - if ((par->io_base = (int) ioremap(igafb_fix.mmio_start, igafb_fix.smem_len)) == 0) { - printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start); - iounmap((void *)info->screen_base); - kfree(info); - pci_dev_put(pdev); - return -ENXIO; - } - - /* - * Figure mmap addresses from PCI config space. - * We need two regions: for video memory and for I/O ports. - * Later one can add region for video coprocessor registers. - * However, mmap routine loops until size != 0, so we put - * one additional region with size == 0. - */ - - par->mmap_map = kzalloc(4 * sizeof(*par->mmap_map), GFP_ATOMIC); - if (!par->mmap_map) { - printk("igafb_init: can't alloc mmap_map\n"); - iounmap((void *)par->io_base); - iounmap(info->screen_base); - kfree(info); - pci_dev_put(pdev); - return -ENOMEM; - } - - /* - * Set default vmode and cmode from PROM properties. - */ - { - struct device_node *dp = pci_device_to_OF_node(pdev); - int node = dp->node; - int width = prom_getintdefault(node, "width", 1024); - int height = prom_getintdefault(node, "height", 768); - int depth = prom_getintdefault(node, "depth", 8); - switch (width) { - case 1024: - if (height == 768) - default_var = default_var_1024x768; - break; - case 1152: - if (height == 900) - default_var = default_var_1152x900; - break; - case 1280: - if (height == 1024) - default_var = default_var_1280x1024; - break; - default: - break; - } - - switch (depth) { - case 8: - default_var.bits_per_pixel = 8; - break; - case 16: - default_var.bits_per_pixel = 16; - break; - case 24: - default_var.bits_per_pixel = 24; - break; - case 32: - default_var.bits_per_pixel = 32; - break; - default: - break; - } - } - -#endif - igafb_fix.smem_start = (unsigned long) info->screen_base; - igafb_fix.line_length = default_var.xres*(default_var.bits_per_pixel/8); - igafb_fix.visual = default_var.bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; - - info->var = default_var; - info->fix = igafb_fix; - info->pseudo_palette = (void *)(par + 1); - - if (!iga_init(info, par)) { - iounmap((void *)par->io_base); - iounmap(info->screen_base); - kfree(par->mmap_map); - kfree(info); - return -ENODEV; - } - -#ifdef CONFIG_SPARC - /* - * Add /dev/fb mmap values. - */ - - /* First region is for video memory */ - par->mmap_map[0].voff = 0x0; - par->mmap_map[0].poff = par->frame_buffer_phys & PAGE_MASK; - par->mmap_map[0].size = info->fix.smem_len & PAGE_MASK; - par->mmap_map[0].prot_mask = SRMMU_CACHE; - par->mmap_map[0].prot_flag = SRMMU_WRITE; - - /* Second region is for I/O ports */ - par->mmap_map[1].voff = par->frame_buffer_phys & PAGE_MASK; - par->mmap_map[1].poff = info->fix.smem_start & PAGE_MASK; - par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ - par->mmap_map[1].prot_mask = SRMMU_CACHE; - par->mmap_map[1].prot_flag = SRMMU_WRITE; -#endif /* CONFIG_SPARC */ - - return 0; -} - -static int __init igafb_setup(char *options) -{ - char *this_opt; - - if (!options || !*options) - return 0; - - while ((this_opt = strsep(&options, ",")) != NULL) { - } - return 0; -} - -module_init(igafb_init); -MODULE_LICENSE("GPL"); -static struct pci_device_id igafb_pci_tbl[] = { - { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, - { } -}; - -MODULE_DEVICE_TABLE(pci, igafb_pci_tbl); diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c index d31ed4e2c46f..83fec573cceb 100644 --- a/drivers/video/fbdev/intelfb/intelfbhw.c +++ b/drivers/video/fbdev/intelfb/intelfbhw.c @@ -937,15 +937,11 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2, { u32 m1, m2, n, p1, p2, n1, testm; u32 f_vco, p, p_best = 0, m, f_out = 0; - u32 err_max, err_target, err_best = 10000000; - u32 n_best = 0, m_best = 0, f_best, f_err; + u32 err_best = 10000000; + u32 n_best = 0, m_best = 0, f_err; u32 p_min, p_max, p_inc, div_max; struct pll_min_max *pll = &plls[index]; - /* Accept 0.5% difference, but aim for 0.1% */ - err_max = 5 * clock / 1000; - err_target = clock / 1000; - DBG_MSG("Clock is %d\n", clock); div_max = pll->max_vco / clock; @@ -992,7 +988,6 @@ static int calc_pll_params(int index, int clock, u32 *retm1, u32 *retm2, m_best = testm; n_best = n; p_best = p; - f_best = f_out; err_best = f_err; } } diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index b9b284d79631..838869c6490c 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -2056,7 +2056,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm minfo = kzalloc(sizeof(*minfo), GFP_KERNEL); if (!minfo) - return -1; + return -ENOMEM; minfo->pcidev = pdev; minfo->dead = 0; diff --git a/drivers/video/fbdev/mxsfb.c b/drivers/video/fbdev/mxsfb.c index 7846f0e8bbbb..79b1dc7f042b 100644 --- a/drivers/video/fbdev/mxsfb.c +++ b/drivers/video/fbdev/mxsfb.c @@ -150,7 +150,7 @@ #define STMLCDIF_24BIT 3 /** pixel data bus to the display is of 24 bit width */ #define MXSFB_SYNC_DATA_ENABLE_HIGH_ACT (1 << 6) -#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negtive edge sampling */ +#define MXSFB_SYNC_DOTCLK_FALLING_ACT (1 << 7) /* negative edge sampling */ enum mxsfb_devtype { MXSFB_V3, @@ -788,7 +788,16 @@ static int mxsfb_init_fbinfo_dt(struct mxsfb_info *host, if (vm.flags & DISPLAY_FLAGS_DE_HIGH) host->sync |= MXSFB_SYNC_DATA_ENABLE_HIGH_ACT; - if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE) + + /* + * The PIXDATA flags of the display_flags enum are controller + * centric, e.g. NEGEDGE means drive data on negative edge. + * However, the drivers flag is display centric: Sample the + * data on negative (falling) edge. Therefore, check for the + * POSEDGE flag: + * drive on positive edge => sample on negative edge + */ + if (vm.flags & DISPLAY_FLAGS_PIXDATA_POSEDGE) host->sync |= MXSFB_SYNC_DOTCLK_FALLING_ACT; put_display_node: diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c index a4ee65b8f918..6199d4806193 100644 --- a/drivers/video/fbdev/omap/hwa742.c +++ b/drivers/video/fbdev/omap/hwa742.c @@ -474,7 +474,7 @@ static void auto_update_complete(void *data) jiffies + HWA742_AUTO_UPDATE_TIME); } -static void hwa742_update_window_auto(unsigned long arg) +static void hwa742_update_window_auto(struct timer_list *unused) { LIST_HEAD(req_list); struct hwa742_request *last; @@ -1002,9 +1002,7 @@ static int hwa742_init(struct omapfb_device *fbdev, int ext_mode, hwa742.auto_update_window.height = fbdev->panel->y_res; hwa742.auto_update_window.format = 0; - init_timer(&hwa742.auto_update_timer); - hwa742.auto_update_timer.function = hwa742_update_window_auto; - hwa742.auto_update_timer.data = 0; + timer_setup(&hwa742.auto_update_timer, hwa742_update_window_auto, 0); hwa742.prev_color_mode = -1; hwa742.prev_flags = 0; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c index 30d49f3800b3..8e1d60d48dbb 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c @@ -3988,7 +3988,7 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev) } #ifdef DSI_CATCH_MISSING_TE -static void dsi_te_timeout(unsigned long arg) +static void dsi_te_timeout(struct timer_list *unused) { DSSERR("TE not received for 250ms!\n"); } @@ -5298,9 +5298,7 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) dsi_framedone_timeout_work_callback); #ifdef DSI_CATCH_MISSING_TE - init_timer(&dsi->te_timer); - dsi->te_timer.function = dsi_te_timeout; - dsi->te_timer.data = 0; + timer_setup(&dsi->te_timer, dsi_te_timeout, 0); #endif res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto"); diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c index 1d7c012f09db..e08e5664e330 100644 --- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c @@ -1477,7 +1477,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size, static int omapfb_parse_vram_param(const char *param, int max_entries, unsigned long *sizes, unsigned long *paddrs) { - int fbnum; + unsigned int fbnum; unsigned long size; unsigned long paddr = 0; char *p, *start; diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 933619da1a94..55fbb432c053 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -512,28 +512,26 @@ pxa3xx_gcu_mmap(struct file *file, struct vm_area_struct *vma) #ifdef PXA3XX_GCU_DEBUG_TIMER static struct timer_list pxa3xx_gcu_debug_timer; +static struct pxa3xx_gcu_priv *debug_timer_priv; -static void pxa3xx_gcu_debug_timedout(unsigned long ptr) +static void pxa3xx_gcu_debug_timedout(struct timer_list *unused) { - struct pxa3xx_gcu_priv *priv = (struct pxa3xx_gcu_priv *) ptr; + struct pxa3xx_gcu_priv *priv = debug_timer_priv; QERROR("Timer DUMP"); - /* init the timer structure */ - init_timer(&pxa3xx_gcu_debug_timer); - pxa3xx_gcu_debug_timer.function = pxa3xx_gcu_debug_timedout; - pxa3xx_gcu_debug_timer.data = ptr; - pxa3xx_gcu_debug_timer.expires = jiffies + 5*HZ; /* one second */ - - add_timer(&pxa3xx_gcu_debug_timer); + mod_timer(&pxa3xx_gcu_debug_timer, jiffies + 5 * HZ); } -static void pxa3xx_gcu_init_debug_timer(void) +static void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) { - pxa3xx_gcu_debug_timedout((unsigned long) &pxa3xx_gcu_debug_timer); + /* init the timer structure */ + debug_timer_priv = priv; + timer_setup(&pxa3xx_gcu_debug_timer, pxa3xx_gcu_debug_timedout, 0); + pxa3xx_gcu_debug_timedout(NULL); } #else -static inline void pxa3xx_gcu_init_debug_timer(void) {} +static inline void pxa3xx_gcu_init_debug_timer(struct pxa3xx_gcu_priv *priv) {} #endif static int @@ -670,7 +668,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, priv); priv->resource_mem = r; pxa3xx_gcu_reset(priv); - pxa3xx_gcu_init_debug_timer(); + pxa3xx_gcu_init_debug_timer(priv); dev_info(dev, "registered @0x%p, DMA 0x%p (%d bytes), IRQ %d\n", (void *) r->start, (void *) priv->shared_phys, diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c index fc2aaa5aca23..15ae50063296 100644 --- a/drivers/video/fbdev/sa1100fb.c +++ b/drivers/video/fbdev/sa1100fb.c @@ -323,13 +323,11 @@ sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, * according to the RGB bitfield information. */ if (regno < 16) { - u32 *pal = fbi->fb.pseudo_palette; - val = chan_to_field(red, &fbi->fb.var.red); val |= chan_to_field(green, &fbi->fb.var.green); val |= chan_to_field(blue, &fbi->fb.var.blue); - pal[regno] = val; + fbi->pseudo_palette[regno] = val; ret = 0; } break; @@ -1132,12 +1130,10 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev) struct sa1100fb_info *fbi; unsigned i; - fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16, - GFP_KERNEL); + fbi = devm_kzalloc(dev, sizeof(struct sa1100fb_info), GFP_KERNEL); if (!fbi) return NULL; - memset(fbi, 0, sizeof(struct sa1100fb_info)); fbi->dev = dev; strcpy(fbi->fb.fix.id, SA1100_NAME); @@ -1159,7 +1155,7 @@ static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev) fbi->fb.fbops = &sa1100fb_ops; fbi->fb.flags = FBINFO_DEFAULT; fbi->fb.monspecs = monspecs; - fbi->fb.pseudo_palette = (fbi + 1); + fbi->fb.pseudo_palette = fbi->pseudo_palette; fbi->rgb[RGB_4] = &rgb_4; fbi->rgb[RGB_8] = &rgb_8; @@ -1218,48 +1214,42 @@ static int sa1100fb_probe(struct platform_device *pdev) return -EINVAL; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (irq < 0 || !res) + if (irq < 0) return -EINVAL; - if (!request_mem_region(res->start, resource_size(res), "LCD")) - return -EBUSY; - fbi = sa1100fb_init_fbinfo(&pdev->dev); - ret = -ENOMEM; if (!fbi) - goto failed; - - fbi->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(fbi->clk)) { - ret = PTR_ERR(fbi->clk); - fbi->clk = NULL; - goto failed; - } + return -ENOMEM; - fbi->base = ioremap(res->start, resource_size(res)); - if (!fbi->base) - goto failed; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + fbi->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(fbi->base)) + return PTR_ERR(fbi->base); - /* Initialize video memory */ - ret = sa1100fb_map_video_memory(fbi); - if (ret) - goto failed; + fbi->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(fbi->clk)) + return PTR_ERR(fbi->clk); - ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi); + ret = devm_request_irq(&pdev->dev, irq, sa1100fb_handle_irq, 0, + "LCD", fbi); if (ret) { dev_err(&pdev->dev, "request_irq failed: %d\n", ret); - goto failed; + return ret; } if (machine_is_shannon()) { - ret = gpio_request_one(SHANNON_GPIO_DISP_EN, + ret = devm_gpio_request_one(&pdev->dev, SHANNON_GPIO_DISP_EN, GPIOF_OUT_INIT_LOW, "display enable"); if (ret) - goto err_free_irq; + return ret; } + /* Initialize video memory */ + ret = sa1100fb_map_video_memory(fbi); + if (ret) + return ret; + /* * This makes sure that our colour bitfield * descriptors are correctly initialised. @@ -1269,8 +1259,11 @@ static int sa1100fb_probe(struct platform_device *pdev) platform_set_drvdata(pdev, fbi); ret = register_framebuffer(&fbi->fb); - if (ret < 0) - goto err_reg_fb; + if (ret < 0) { + dma_free_wc(fbi->dev, fbi->map_size, fbi->map_cpu, + fbi->map_dma); + return ret; + } #ifdef CONFIG_CPU_FREQ fbi->freq_transition.notifier_call = sa1100fb_freq_transition; @@ -1281,20 +1274,6 @@ static int sa1100fb_probe(struct platform_device *pdev) /* This driver cannot be unloaded at the moment */ return 0; - - err_reg_fb: - if (machine_is_shannon()) - gpio_free(SHANNON_GPIO_DISP_EN); - err_free_irq: - free_irq(irq, fbi); - failed: - if (fbi) - iounmap(fbi->base); - if (fbi->clk) - clk_put(fbi->clk); - kfree(fbi); - release_mem_region(res->start, resource_size(res)); - return ret; } static struct platform_driver sa1100fb_driver = { diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h index 0139d13377a5..7a1a9ca33cec 100644 --- a/drivers/video/fbdev/sa1100fb.h +++ b/drivers/video/fbdev/sa1100fb.h @@ -69,6 +69,8 @@ struct sa1100fb_info { const struct sa1100fb_mach_info *inf; struct clk *clk; + + u32 pseudo_palette[16]; }; #define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member) diff --git a/drivers/video/fbdev/sis/init301.c b/drivers/video/fbdev/sis/init301.c index 1ec9c3e0e1d8..02ee752d5000 100644 --- a/drivers/video/fbdev/sis/init301.c +++ b/drivers/video/fbdev/sis/init301.c @@ -6486,7 +6486,7 @@ SiS_SetTVSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo) if(!(SiS_Pr->SiS_TVMode & TVSetPAL)) { if(SiS_Pr->SiS_TVMode & TVSetNTSC1024) { - const unsigned char specialtv[] = { + static const unsigned char specialtv[] = { 0xa7,0x07,0xf2,0x6e,0x17,0x8b,0x73,0x53, 0x13,0x40,0x34,0xf4,0x63,0xbb,0xcc,0x7a, 0x58,0xe4,0x73,0xda,0x13 diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c index e92303823a4b..ecdd054d8951 100644 --- a/drivers/video/fbdev/sis/sis_main.c +++ b/drivers/video/fbdev/sis/sis_main.c @@ -1702,6 +1702,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); + /* fall through */ case SISFB_GET_INFO: /* For communication with X driver */ ivideo->sisfb_infoblock.sisfb_id = SISFB_ID; ivideo->sisfb_infoblock.sisfb_version = VER_MAJOR; @@ -1755,6 +1756,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); + /* fall through */ case SISFB_GET_VBRSTATUS: if(sisfb_CheckVBRetrace(ivideo)) return put_user((u32)1, argp); @@ -1765,6 +1767,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); + /* fall through */ case SISFB_GET_AUTOMAXIMIZE: if(ivideo->sisfb_max) return put_user((u32)1, argp); @@ -1775,6 +1778,7 @@ static int sisfb_ioctl(struct fb_info *info, unsigned int cmd, if(ivideo->warncount++ < 10) printk(KERN_INFO "sisfb: Deprecated ioctl call received - update your application!\n"); + /* fall through */ case SISFB_SET_AUTOMAXIMIZE: if(get_user(gpu32, argp)) return -EFAULT; diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index 076dd2711630..6f0a19501c6a 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -1008,6 +1008,7 @@ static int sm501fb_blank_crt(int blank_mode, struct fb_info *info) case FB_BLANK_POWERDOWN: ctrl &= ~SM501_DC_CRT_CONTROL_ENABLE; sm501_misc_control(fbi->dev->parent, SM501_MISC_DAC_POWER, 0); + /* fall through */ case FB_BLANK_NORMAL: ctrl |= SM501_DC_CRT_CONTROL_BLANK; @@ -1889,6 +1890,9 @@ static void sm501_free_init_fb(struct sm501fb_info *info, { struct fb_info *fbi = info->fb[head]; + if (!fbi) + return; + fb_dealloc_cmap(&fbi->cmap); } @@ -2076,8 +2080,10 @@ static int sm501fb_remove(struct platform_device *pdev) sm501_free_init_fb(info, HEAD_CRT); sm501_free_init_fb(info, HEAD_PANEL); - unregister_framebuffer(fbinfo_crt); - unregister_framebuffer(fbinfo_pnl); + if (fbinfo_crt) + unregister_framebuffer(fbinfo_crt); + if (fbinfo_pnl) + unregister_framebuffer(fbinfo_pnl); sm501fb_stop(info); kfree(info); @@ -2094,8 +2100,12 @@ static int sm501fb_suspend_fb(struct sm501fb_info *info, enum sm501_controller head) { struct fb_info *fbi = info->fb[head]; - struct sm501fb_par *par = fbi->par; + struct sm501fb_par *par; + + if (!fbi) + return 0; + par = fbi->par; if (par->screen.size == 0) return 0; @@ -2141,8 +2151,12 @@ static void sm501fb_resume_fb(struct sm501fb_info *info, enum sm501_controller head) { struct fb_info *fbi = info->fb[head]; - struct sm501fb_par *par = fbi->par; + struct sm501fb_par *par; + + if (!fbi) + return; + par = fbi->par; if (par->screen.size == 0) return; diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index ef08a104fb42..d44f14242016 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -769,11 +769,11 @@ static int dlfb_get_edid(struct dlfb_data *dev, char *edid, int len) for (i = 0; i < len; i++) { ret = usb_control_msg(dev->udev, - usb_rcvctrlpipe(dev->udev, 0), (0x02), - (0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2, - HZ); - if (ret < 1) { - pr_err("Read EDID byte %d failed err %x\n", i, ret); + usb_rcvctrlpipe(dev->udev, 0), 0x02, + (0x80 | (0x02 << 5)), i << 8, 0xA1, + rbuf, 2, USB_CTRL_GET_TIMEOUT); + if (ret < 2) { + pr_err("Read EDID byte %d failed: %d\n", i, ret); i--; break; } diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 2a5de610dd8f..bdabb2765d1b 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 70f9887c59a9..7f6ae21a27b3 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data) if (v9inode->qid.type != st->qid.type) return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; return 1; } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index ff5d32cf9578..a14b2c974c9e 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1160,7 +1160,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, struct ceph_inode_info *ci = cap->ci; struct inode *inode = &ci->vfs_inode; struct cap_msg_args arg; - int held, revoking, dropping; + int held, revoking; int wake = 0; int delayed = 0; int ret; @@ -1168,7 +1168,6 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, held = cap->issued | cap->implemented; revoking = cap->implemented & ~cap->issued; retain &= ~revoking; - dropping = cap->issued & ~retain; dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n", inode, cap, cap->session, @@ -1712,7 +1711,7 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags, /* if we are unmounting, flush any unused caps immediately. */ if (mdsc->stopping) - is_delayed = 1; + is_delayed = true; spin_lock(&ci->i_ceph_lock); @@ -3189,8 +3188,8 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, int dirty = le32_to_cpu(m->dirty); int cleaned = 0; bool drop = false; - bool wake_ci = 0; - bool wake_mdsc = 0; + bool wake_ci = false; + bool wake_mdsc = false; list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) { if (cf->tid == flush_tid) diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index f2550a076edc..ab81652198c4 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -493,6 +493,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_wb_ref = 0; ci->i_wrbuffer_ref = 0; ci->i_wrbuffer_ref_head = 0; + atomic_set(&ci->i_filelock_ref, 0); ci->i_shared_gen = 0; ci->i_rdcache_gen = 0; ci->i_rdcache_revoking = 0; @@ -786,7 +787,6 @@ static int fill_inode(struct inode *inode, struct page *locked_page, /* update inode */ ci->i_version = le64_to_cpu(info->version); - inode->i_version++; inode->i_rdev = le32_to_cpu(info->rdev); inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; @@ -1185,6 +1185,7 @@ retry_lookup: ceph_snap(d_inode(dn)) != tvino.snap)) { dout(" dn %p points to wrong inode %p\n", dn, d_inode(dn)); + ceph_dir_clear_ordered(dir); d_delete(dn); dput(dn); goto retry_lookup; @@ -1322,6 +1323,7 @@ retry_lookup: dout(" %p links to %p %llx.%llx, not %llx.%llx\n", dn, d_inode(dn), ceph_vinop(d_inode(dn)), ceph_vinop(in)); + ceph_dir_clear_ordered(dir); d_invalidate(dn); have_lease = false; } @@ -1573,6 +1575,7 @@ retry_lookup: ceph_snap(d_inode(dn)) != tvino.snap)) { dout(" dn %p points to wrong inode %p\n", dn, d_inode(dn)); + __ceph_dir_clear_ordered(ci); d_delete(dn); dput(dn); goto retry_lookup; @@ -1597,7 +1600,9 @@ retry_lookup: &req->r_caps_reservation); if (ret < 0) { pr_err("fill_inode badness on %p\n", in); - if (d_really_is_negative(dn)) + if (d_really_is_positive(dn)) + __ceph_dir_clear_ordered(ci); + else iput(in); d_drop(dn); err = ret; diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index e7cce412f2cf..9e66f69ee8a5 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -30,19 +30,52 @@ void __init ceph_flock_init(void) get_random_bytes(&lock_secret, sizeof(lock_secret)); } +static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src) +{ + struct inode *inode = file_inode(src->fl_file); + atomic_inc(&ceph_inode(inode)->i_filelock_ref); +} + +static void ceph_fl_release_lock(struct file_lock *fl) +{ + struct inode *inode = file_inode(fl->fl_file); + struct ceph_inode_info *ci = ceph_inode(inode); + if (atomic_dec_and_test(&ci->i_filelock_ref)) { + /* clear error when all locks are released */ + spin_lock(&ci->i_ceph_lock); + ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK; + spin_unlock(&ci->i_ceph_lock); + } +} + +static const struct file_lock_operations ceph_fl_lock_ops = { + .fl_copy_lock = ceph_fl_copy_lock, + .fl_release_private = ceph_fl_release_lock, +}; + /** * Implement fcntl and flock locking functions. */ -static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file, +static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode, int cmd, u8 wait, struct file_lock *fl) { - struct inode *inode = file_inode(file); struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_mds_request *req; int err; u64 length = 0; u64 owner; + if (operation == CEPH_MDS_OP_SETFILELOCK) { + /* + * increasing i_filelock_ref closes race window between + * handling request reply and adding file_lock struct to + * inode. Otherwise, auth caps may get trimmed in the + * window. Caller function will decrease the counter. + */ + fl->fl_ops = &ceph_fl_lock_ops; + atomic_inc(&ceph_inode(inode)->i_filelock_ref); + } + if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK) wait = 0; @@ -180,10 +213,12 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, */ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) { - u8 lock_cmd; - int err; - u8 wait = 0; + struct inode *inode = file_inode(file); + struct ceph_inode_info *ci = ceph_inode(inode); + int err = 0; u16 op = CEPH_MDS_OP_SETFILELOCK; + u8 wait = 0; + u8 lock_cmd; if (!(fl->fl_flags & FL_POSIX)) return -ENOLCK; @@ -199,6 +234,26 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) else if (IS_SETLKW(cmd)) wait = 1; + spin_lock(&ci->i_ceph_lock); + if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) { + err = -EIO; + } else if (op == CEPH_MDS_OP_SETFILELOCK) { + /* + * increasing i_filelock_ref closes race window between + * handling request reply and adding file_lock struct to + * inode. Otherwise, i_auth_cap may get trimmed in the + * window. Caller function will decrease the counter. + */ + fl->fl_ops = &ceph_fl_lock_ops; + atomic_inc(&ci->i_filelock_ref); + } + spin_unlock(&ci->i_ceph_lock); + if (err < 0) { + if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) + posix_lock_file(file, fl, NULL); + return err; + } + if (F_RDLCK == fl->fl_type) lock_cmd = CEPH_LOCK_SHARED; else if (F_WRLCK == fl->fl_type) @@ -206,16 +261,16 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) else lock_cmd = CEPH_LOCK_UNLOCK; - err = ceph_lock_message(CEPH_LOCK_FCNTL, op, file, lock_cmd, wait, fl); + err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl); if (!err) { - if (op != CEPH_MDS_OP_GETFILELOCK) { + if (op == CEPH_MDS_OP_SETFILELOCK) { dout("mds locked, locking locally"); err = posix_lock_file(file, fl, NULL); - if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { + if (err) { /* undo! This should only happen if * the kernel detects local * deadlock. */ - ceph_lock_message(CEPH_LOCK_FCNTL, op, file, + ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on posix_lock_file, undid lock", err); @@ -227,9 +282,11 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl) int ceph_flock(struct file *file, int cmd, struct file_lock *fl) { - u8 lock_cmd; - int err; + struct inode *inode = file_inode(file); + struct ceph_inode_info *ci = ceph_inode(inode); + int err = 0; u8 wait = 0; + u8 lock_cmd; if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; @@ -239,6 +296,21 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) dout("ceph_flock, fl_file: %p", fl->fl_file); + spin_lock(&ci->i_ceph_lock); + if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) { + err = -EIO; + } else { + /* see comment in ceph_lock */ + fl->fl_ops = &ceph_fl_lock_ops; + atomic_inc(&ci->i_filelock_ref); + } + spin_unlock(&ci->i_ceph_lock); + if (err < 0) { + if (F_UNLCK == fl->fl_type) + locks_lock_file_wait(file, fl); + return err; + } + if (IS_SETLKW(cmd)) wait = 1; @@ -250,13 +322,13 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) lock_cmd = CEPH_LOCK_UNLOCK; err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, - file, lock_cmd, wait, fl); + inode, lock_cmd, wait, fl); if (!err) { err = locks_lock_file_wait(file, fl); if (err) { ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK, - file, CEPH_LOCK_UNLOCK, 0, fl); + inode, CEPH_LOCK_UNLOCK, 0, fl); dout("got %d on locks_lock_file_wait, undid lock", err); } } @@ -288,6 +360,37 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) *flock_count, *fcntl_count); } +/* + * Given a pointer to a lock, convert it to a ceph filelock + */ +static int lock_to_ceph_filelock(struct file_lock *lock, + struct ceph_filelock *cephlock) +{ + int err = 0; + cephlock->start = cpu_to_le64(lock->fl_start); + cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); + cephlock->client = cpu_to_le64(0); + cephlock->pid = cpu_to_le64((u64)lock->fl_pid); + cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner)); + + switch (lock->fl_type) { + case F_RDLCK: + cephlock->type = CEPH_LOCK_SHARED; + break; + case F_WRLCK: + cephlock->type = CEPH_LOCK_EXCL; + break; + case F_UNLCK: + cephlock->type = CEPH_LOCK_UNLOCK; + break; + default: + dout("Have unknown lock type %d", lock->fl_type); + err = -EINVAL; + } + + return err; +} + /** * Encode the flock and fcntl locks for the given inode into the ceph_filelock * array. Must be called with inode->i_lock already held. @@ -356,50 +459,22 @@ int ceph_locks_to_pagelist(struct ceph_filelock *flocks, if (err) goto out_fail; - err = ceph_pagelist_append(pagelist, flocks, - num_fcntl_locks * sizeof(*flocks)); - if (err) - goto out_fail; + if (num_fcntl_locks > 0) { + err = ceph_pagelist_append(pagelist, flocks, + num_fcntl_locks * sizeof(*flocks)); + if (err) + goto out_fail; + } nlocks = cpu_to_le32(num_flock_locks); err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks)); if (err) goto out_fail; - err = ceph_pagelist_append(pagelist, - &flocks[num_fcntl_locks], - num_flock_locks * sizeof(*flocks)); -out_fail: - return err; -} - -/* - * Given a pointer to a lock, convert it to a ceph filelock - */ -int lock_to_ceph_filelock(struct file_lock *lock, - struct ceph_filelock *cephlock) -{ - int err = 0; - cephlock->start = cpu_to_le64(lock->fl_start); - cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1); - cephlock->client = cpu_to_le64(0); - cephlock->pid = cpu_to_le64((u64)lock->fl_pid); - cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner)); - - switch (lock->fl_type) { - case F_RDLCK: - cephlock->type = CEPH_LOCK_SHARED; - break; - case F_WRLCK: - cephlock->type = CEPH_LOCK_EXCL; - break; - case F_UNLCK: - cephlock->type = CEPH_LOCK_UNLOCK; - break; - default: - dout("Have unknown lock type %d", lock->fl_type); - err = -EINVAL; + if (num_flock_locks > 0) { + err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks], + num_flock_locks * sizeof(*flocks)); } - +out_fail: return err; } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 0687ab3c3267..ab69dcb70e8a 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1039,22 +1039,23 @@ void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, * session caps */ -/* caller holds s_cap_lock, we drop it */ -static void cleanup_cap_releases(struct ceph_mds_client *mdsc, - struct ceph_mds_session *session) - __releases(session->s_cap_lock) +static void detach_cap_releases(struct ceph_mds_session *session, + struct list_head *target) { - LIST_HEAD(tmp_list); - list_splice_init(&session->s_cap_releases, &tmp_list); + lockdep_assert_held(&session->s_cap_lock); + + list_splice_init(&session->s_cap_releases, target); session->s_num_cap_releases = 0; - spin_unlock(&session->s_cap_lock); + dout("dispose_cap_releases mds%d\n", session->s_mds); +} - dout("cleanup_cap_releases mds%d\n", session->s_mds); - while (!list_empty(&tmp_list)) { +static void dispose_cap_releases(struct ceph_mds_client *mdsc, + struct list_head *dispose) +{ + while (!list_empty(dispose)) { struct ceph_cap *cap; /* zero out the in-progress message */ - cap = list_first_entry(&tmp_list, - struct ceph_cap, session_caps); + cap = list_first_entry(dispose, struct ceph_cap, session_caps); list_del(&cap->session_caps); ceph_put_cap(mdsc, cap); } @@ -1215,6 +1216,13 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, } spin_unlock(&mdsc->cap_dirty_lock); + if (atomic_read(&ci->i_filelock_ref) > 0) { + /* make further file lock syscall return -EIO */ + ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK; + pr_warn_ratelimited(" dropping file locks for %p %lld\n", + inode, ceph_ino(inode)); + } + if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove); ci->i_prealloc_cap_flush = NULL; @@ -1244,6 +1252,8 @@ static void remove_session_caps(struct ceph_mds_session *session) { struct ceph_fs_client *fsc = session->s_mdsc->fsc; struct super_block *sb = fsc->sb; + LIST_HEAD(dispose); + dout("remove_session_caps on %p\n", session); iterate_session_caps(session, remove_session_caps_cb, fsc); @@ -1278,10 +1288,12 @@ static void remove_session_caps(struct ceph_mds_session *session) } // drop cap expires and unlock s_cap_lock - cleanup_cap_releases(session->s_mdsc, session); + detach_cap_releases(session, &dispose); BUG_ON(session->s_nr_caps > 0); BUG_ON(!list_empty(&session->s_cap_flushing)); + spin_unlock(&session->s_cap_lock); + dispose_cap_releases(session->s_mdsc, &dispose); } /* @@ -1462,6 +1474,11 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) goto out; if ((used | wanted) & CEPH_CAP_ANY_WR) goto out; + /* Note: it's possible that i_filelock_ref becomes non-zero + * after dropping auth caps. It doesn't hurt because reply + * of lock mds request will re-add auth caps. */ + if (atomic_read(&ci->i_filelock_ref) > 0) + goto out; } /* The inode has cached pages, but it's no longer used. * we can safely drop it */ @@ -2827,7 +2844,7 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, struct ceph_mds_cap_reconnect v2; struct ceph_mds_cap_reconnect_v1 v1; } rec; - struct ceph_inode_info *ci; + struct ceph_inode_info *ci = cap->ci; struct ceph_reconnect_state *recon_state = arg; struct ceph_pagelist *pagelist = recon_state->pagelist; char *path; @@ -2836,8 +2853,6 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, u64 snap_follows; struct dentry *dentry; - ci = cap->ci; - dout(" adding %p ino %llx.%llx cap %p %lld %s\n", inode, ceph_vinop(inode), cap, cap->cap_id, ceph_cap_string(cap->issued)); @@ -2870,7 +2885,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, rec.v2.issued = cpu_to_le32(cap->issued); rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); rec.v2.pathbase = cpu_to_le64(pathbase); - rec.v2.flock_len = 0; + rec.v2.flock_len = (__force __le32) + ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1); } else { rec.v1.cap_id = cpu_to_le64(cap->cap_id); rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); @@ -2894,26 +2910,37 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, if (recon_state->msg_version >= 2) { int num_fcntl_locks, num_flock_locks; - struct ceph_filelock *flocks; + struct ceph_filelock *flocks = NULL; size_t struct_len, total_len = 0; u8 struct_v = 0; encode_again: - ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); - flocks = kmalloc((num_fcntl_locks+num_flock_locks) * - sizeof(struct ceph_filelock), GFP_NOFS); - if (!flocks) { - err = -ENOMEM; - goto out_free; + if (rec.v2.flock_len) { + ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); + } else { + num_fcntl_locks = 0; + num_flock_locks = 0; } - err = ceph_encode_locks_to_buffer(inode, flocks, - num_fcntl_locks, - num_flock_locks); - if (err) { + if (num_fcntl_locks + num_flock_locks > 0) { + flocks = kmalloc((num_fcntl_locks + num_flock_locks) * + sizeof(struct ceph_filelock), GFP_NOFS); + if (!flocks) { + err = -ENOMEM; + goto out_free; + } + err = ceph_encode_locks_to_buffer(inode, flocks, + num_fcntl_locks, + num_flock_locks); + if (err) { + kfree(flocks); + flocks = NULL; + if (err == -ENOSPC) + goto encode_again; + goto out_free; + } + } else { kfree(flocks); - if (err == -ENOSPC) - goto encode_again; - goto out_free; + flocks = NULL; } if (recon_state->msg_version >= 3) { @@ -2993,6 +3020,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, int s_nr_caps; struct ceph_pagelist *pagelist; struct ceph_reconnect_state recon_state; + LIST_HEAD(dispose); pr_info("mds%d reconnect start\n", mds); @@ -3026,7 +3054,9 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, */ session->s_cap_reconnect = 1; /* drop old cap expires; we're about to reestablish that state */ - cleanup_cap_releases(mdsc, session); + detach_cap_releases(session, &dispose); + spin_unlock(&session->s_cap_lock); + dispose_cap_releases(mdsc, &dispose); /* trim unused caps to reduce MDS's cache rejoin time */ if (mdsc->fsc->sb->s_root) @@ -3857,14 +3887,14 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg) goto err_out; } return; + bad: pr_err("error decoding fsmap\n"); err_out: mutex_lock(&mdsc->mutex); - mdsc->mdsmap_err = -ENOENT; + mdsc->mdsmap_err = err; __wake_requests(mdsc, &mdsc->waiting_for_map); mutex_unlock(&mdsc->mutex); - return; } /* diff --git a/fs/ceph/super.c b/fs/ceph/super.c index e4082afedcb1..fe9fbb3f13f7 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -84,8 +84,9 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_ffree = -1; buf->f_namelen = NAME_MAX; - /* leave fsid little-endian, regardless of host endianness */ - fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1); + /* Must convert the fsid, for consistent values across arches */ + fsid = le64_to_cpu(*(__le64 *)(&monmap->fsid)) ^ + le64_to_cpu(*((__le64 *)&monmap->fsid + 1)); buf->f_fsid.val[0] = fsid & 0xffffffff; buf->f_fsid.val[1] = fsid >> 32; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 3e27a28aa44a..2beeec07fa76 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -352,6 +352,7 @@ struct ceph_inode_info { int i_pin_ref; int i_rd_ref, i_rdcache_ref, i_wr_ref, i_wb_ref; int i_wrbuffer_ref, i_wrbuffer_ref_head; + atomic_t i_filelock_ref; u32 i_shared_gen; /* increment each time we get FILE_SHARED */ u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */ u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */ @@ -487,6 +488,8 @@ static inline struct inode *ceph_find_inode(struct super_block *sb, #define CEPH_I_KICK_FLUSH (1 << 9) /* kick flushing caps */ #define CEPH_I_FLUSH_SNAPS (1 << 10) /* need flush snapss */ #define CEPH_I_ERROR_WRITE (1 << 11) /* have seen write errors */ +#define CEPH_I_ERROR_FILELOCK (1 << 12) /* have seen file lock errors */ + /* * We set the ERROR_WRITE bit when we start seeing write errors on an inode @@ -1011,7 +1014,6 @@ extern int ceph_encode_locks_to_buffer(struct inode *inode, extern int ceph_locks_to_pagelist(struct ceph_filelock *flocks, struct ceph_pagelist *pagelist, int num_fcntl_locks, int num_flock_locks); -extern int lock_to_ceph_filelock(struct file_lock *fl, struct ceph_filelock *c); /* debugfs.c */ extern int ceph_fs_debugfs_init(struct ceph_fs_client *client); diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index b837fb7e290a..a8e3777c94dc 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -369,6 +369,7 @@ static int lockd_start_svc(struct svc_serv *serv) printk(KERN_WARNING "lockd_up: svc_rqst allocation failed, error=%d\n", error); + lockd_unregister_notifiers(); goto out_rqst; } @@ -459,13 +460,16 @@ int lockd_up(struct net *net) } error = lockd_up_net(serv, net); - if (error < 0) - goto err_net; + if (error < 0) { + lockd_unregister_notifiers(); + goto err_put; + } error = lockd_start_svc(serv); - if (error < 0) - goto err_start; - + if (error < 0) { + lockd_down_net(serv, net); + goto err_put; + } nlmsvc_users++; /* * Note: svc_serv structures have an initial use count of 1, @@ -476,12 +480,6 @@ err_put: err_create: mutex_unlock(&nlmsvc_mutex); return error; - -err_start: - lockd_down_net(serv, net); -err_net: - lockd_unregister_notifiers(); - goto err_put; } EXPORT_SYMBOL_GPL(lockd_up); diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c index 420d3a0ab258..897b299db55e 100644 --- a/fs/nfs_common/grace.c +++ b/fs/nfs_common/grace.c @@ -55,14 +55,7 @@ locks_end_grace(struct lock_manager *lm) } EXPORT_SYMBOL_GPL(locks_end_grace); -/** - * locks_in_grace - * - * Lock managers call this function to determine when it is OK for them - * to answer ordinary lock requests, and when they should accept only - * lock reclaims. - */ -int +static bool __state_in_grace(struct net *net, bool open) { struct list_head *grace_list = net_generic(net, grace_net_id); @@ -78,15 +71,22 @@ __state_in_grace(struct net *net, bool open) return false; } -int locks_in_grace(struct net *net) +/** + * locks_in_grace + * + * Lock managers call this function to determine when it is OK for them + * to answer ordinary lock requests, and when they should accept only + * lock reclaims. + */ +bool locks_in_grace(struct net *net) { - return __state_in_grace(net, 0); + return __state_in_grace(net, false); } EXPORT_SYMBOL_GPL(locks_in_grace); -int opens_in_grace(struct net *net) +bool opens_in_grace(struct net *net) { - return __state_in_grace(net, 1); + return __state_in_grace(net, true); } EXPORT_SYMBOL_GPL(opens_in_grace); diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c index 6dfede6d172a..84831253203d 100644 --- a/fs/nfsd/fault_inject.c +++ b/fs/nfsd/fault_inject.c @@ -12,6 +12,7 @@ #include <linux/nsproxy.h> #include <linux/sunrpc/addr.h> #include <linux/uaccess.h> +#include <linux/kernel.h> #include "state.h" #include "netns.h" @@ -126,8 +127,6 @@ static struct nfsd_fault_inject_op inject_ops[] = { }, }; -#define NUM_INJECT_OPS (sizeof(inject_ops)/sizeof(struct nfsd_fault_inject_op)) - int nfsd_fault_inject_init(void) { unsigned int i; @@ -138,7 +137,7 @@ int nfsd_fault_inject_init(void) if (!debug_dir) goto fail; - for (i = 0; i < NUM_INJECT_OPS; i++) { + for (i = 0; i < ARRAY_SIZE(inject_ops); i++) { op = &inject_ops[i]; if (!debugfs_create_file(op->file, mode, debug_dir, op, &fops_nfsd)) goto fail; diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 3714231a9d0f..1c91391f4805 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -107,7 +107,7 @@ struct nfsd_net { bool lockd_up; /* Time of server startup */ - struct timeval nfssvc_boot; + struct timespec64 nfssvc_boot; /* * Max number of connections this nfsd container will allow. Defaults diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index f38acd905441..2758480555fa 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -748,8 +748,9 @@ nfs3svc_encode_writeres(struct svc_rqst *rqstp, __be32 *p) if (resp->status == 0) { *p++ = htonl(resp->count); *p++ = htonl(resp->committed); - *p++ = htonl(nn->nfssvc_boot.tv_sec); - *p++ = htonl(nn->nfssvc_boot.tv_usec); + /* unique identifier, y2038 overflow can be ignored */ + *p++ = htonl((u32)nn->nfssvc_boot.tv_sec); + *p++ = htonl(nn->nfssvc_boot.tv_nsec); } return xdr_ressize_check(rqstp, p); } @@ -1119,8 +1120,9 @@ nfs3svc_encode_commitres(struct svc_rqst *rqstp, __be32 *p) p = encode_wcc_data(rqstp, p, &resp->fh); /* Write verifier */ if (resp->status == 0) { - *p++ = htonl(nn->nfssvc_boot.tv_sec); - *p++ = htonl(nn->nfssvc_boot.tv_usec); + /* unique identifier, y2038 overflow can be ignored */ + *p++ = htonl((u32)nn->nfssvc_boot.tv_sec); + *p++ = htonl(nn->nfssvc_boot.tv_nsec); } return xdr_ressize_check(rqstp, p); } diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index ea45d954e8d7..7d888369f85a 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -336,7 +336,7 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls) trace_layout_recall(&ls->ls_stid.sc_stateid); - atomic_inc(&ls->ls_stid.sc_count); + refcount_inc(&ls->ls_stid.sc_count); nfsd4_run_cb(&ls->ls_recall); out_unlock: @@ -441,7 +441,7 @@ nfsd4_insert_layout(struct nfsd4_layoutget *lgp, struct nfs4_layout_stateid *ls) goto done; } - atomic_inc(&ls->ls_stid.sc_count); + refcount_inc(&ls->ls_stid.sc_count); list_add_tail(&new->lo_perstate, &ls->ls_layouts); new = NULL; done: diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 8487486ec496..008ea0b627d0 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -485,9 +485,6 @@ static __be32 nfsd4_getfh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { - if (!cstate->current_fh.fh_dentry) - return nfserr_nofilehandle; - u->getfh = &cstate->current_fh; return nfs_ok; } @@ -535,9 +532,6 @@ static __be32 nfsd4_savefh(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { - if (!cstate->current_fh.fh_dentry) - return nfserr_nofilehandle; - fh_dup2(&cstate->save_fh, &cstate->current_fh); if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG)) { memcpy(&cstate->save_stateid, &cstate->current_stateid, sizeof(stateid_t)); @@ -570,10 +564,11 @@ static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net) /* * This is opaque to client, so no need to byte-swap. Use - * __force to keep sparse happy + * __force to keep sparse happy. y2038 time_t overflow is + * irrelevant in this usage. */ verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec; - verf[1] = (__force __be32)nn->nfssvc_boot.tv_usec; + verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec; memcpy(verifier->data, verf, sizeof(verifier->data)); } @@ -703,10 +698,8 @@ nfsd4_link(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_link *link = &u->link; - __be32 status = nfserr_nofilehandle; + __be32 status; - if (!cstate->save_fh.fh_dentry) - return status; status = nfsd_link(rqstp, &cstate->current_fh, link->li_name, link->li_namelen, &cstate->save_fh); if (!status) @@ -850,10 +843,8 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_rename *rename = &u->rename; - __be32 status = nfserr_nofilehandle; + __be32 status; - if (!cstate->save_fh.fh_dentry) - return status; if (opens_in_grace(SVC_NET(rqstp)) && !(cstate->save_fh.fh_export->ex_flags & NFSEXP_NOSUBTREECHECK)) return nfserr_grace; diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 0c04f81aa63b..b82817767b9d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -359,7 +359,7 @@ put_nfs4_file(struct nfs4_file *fi) { might_lock(&state_lock); - if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) { + if (refcount_dec_and_lock(&fi->fi_ref, &state_lock)) { hlist_del_rcu(&fi->fi_hash); spin_unlock(&state_lock); WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate)); @@ -568,7 +568,7 @@ alloc_clnt_odstate(struct nfs4_client *clp) co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL); if (co) { co->co_client = clp; - atomic_set(&co->co_odcount, 1); + refcount_set(&co->co_odcount, 1); } return co; } @@ -586,7 +586,7 @@ static inline void get_clnt_odstate(struct nfs4_clnt_odstate *co) { if (co) - atomic_inc(&co->co_odcount); + refcount_inc(&co->co_odcount); } static void @@ -598,7 +598,7 @@ put_clnt_odstate(struct nfs4_clnt_odstate *co) return; fp = co->co_file; - if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { + if (refcount_dec_and_lock(&co->co_odcount, &fp->fi_lock)) { list_del(&co->co_perfile); spin_unlock(&fp->fi_lock); @@ -656,7 +656,7 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla stid->sc_stateid.si_opaque.so_id = new_id; stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid; /* Will be incremented before return to client: */ - atomic_set(&stid->sc_count, 1); + refcount_set(&stid->sc_count, 1); spin_lock_init(&stid->sc_lock); /* @@ -813,7 +813,7 @@ nfs4_put_stid(struct nfs4_stid *s) might_lock(&clp->cl_lock); - if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) { + if (!refcount_dec_and_lock(&s->sc_count, &clp->cl_lock)) { wake_up_all(&close_wq); return; } @@ -913,7 +913,7 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) if (status) return status; ++fp->fi_delegees; - atomic_inc(&dp->dl_stid.sc_count); + refcount_inc(&dp->dl_stid.sc_count); dp->dl_stid.sc_type = NFS4_DELEG_STID; list_add(&dp->dl_perfile, &fp->fi_delegations); list_add(&dp->dl_perclnt, &clp->cl_delegations); @@ -1214,7 +1214,7 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, WARN_ON_ONCE(!list_empty(&stp->st_locks)); - if (!atomic_dec_and_test(&s->sc_count)) { + if (!refcount_dec_and_test(&s->sc_count)) { wake_up_all(&close_wq); return; } @@ -1439,8 +1439,10 @@ free_session_slots(struct nfsd4_session *ses) { int i; - for (i = 0; i < ses->se_fchannel.maxreqs; i++) + for (i = 0; i < ses->se_fchannel.maxreqs; i++) { + free_svc_cred(&ses->se_slots[i]->sl_cred); kfree(ses->se_slots[i]); + } } /* @@ -1472,6 +1474,11 @@ static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) spin_lock(&nfsd_drc_lock); avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, nfsd_drc_max_mem - nfsd_drc_mem_used); + /* + * Never use more than a third of the remaining memory, + * unless it's the only way to give this client a slot: + */ + avail = clamp_t(int, avail, slotsize, avail/3); num = min_t(int, num, avail / slotsize); nfsd_drc_mem_used += num * slotsize; spin_unlock(&nfsd_drc_lock); @@ -2072,7 +2079,7 @@ find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask) s = find_stateid_locked(cl, t); if (s != NULL) { if (typemask & s->sc_type) - atomic_inc(&s->sc_count); + refcount_inc(&s->sc_count); else s = NULL; } @@ -2287,14 +2294,18 @@ nfsd4_store_cache_entry(struct nfsd4_compoundres *resp) dprintk("--> %s slot %p\n", __func__, slot); + slot->sl_flags |= NFSD4_SLOT_INITIALIZED; slot->sl_opcnt = resp->opcnt; slot->sl_status = resp->cstate.status; + free_svc_cred(&slot->sl_cred); + copy_cred(&slot->sl_cred, &resp->rqstp->rq_cred); - slot->sl_flags |= NFSD4_SLOT_INITIALIZED; - if (nfsd4_not_cached(resp)) { - slot->sl_datalen = 0; + if (!nfsd4_cache_this(resp)) { + slot->sl_flags &= ~NFSD4_SLOT_CACHED; return; } + slot->sl_flags |= NFSD4_SLOT_CACHED; + base = resp->cstate.data_offset; slot->sl_datalen = buf->len - base; if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen)) @@ -2321,8 +2332,16 @@ nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args, op = &args->ops[resp->opcnt - 1]; nfsd4_encode_operation(resp, op); - /* Return nfserr_retry_uncached_rep in next operation. */ - if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) { + if (slot->sl_flags & NFSD4_SLOT_CACHED) + return op->status; + if (args->opcnt == 1) { + /* + * The original operation wasn't a solo sequence--we + * always cache those--so this retry must not match the + * original: + */ + op->status = nfserr_seq_false_retry; + } else { op = &args->ops[resp->opcnt++]; op->status = nfserr_retry_uncached_rep; nfsd4_encode_operation(resp, op); @@ -2986,6 +3005,34 @@ static bool nfsd4_request_too_big(struct svc_rqst *rqstp, return xb->len > session->se_fchannel.maxreq_sz; } +static bool replay_matches_cache(struct svc_rqst *rqstp, + struct nfsd4_sequence *seq, struct nfsd4_slot *slot) +{ + struct nfsd4_compoundargs *argp = rqstp->rq_argp; + + if ((bool)(slot->sl_flags & NFSD4_SLOT_CACHETHIS) != + (bool)seq->cachethis) + return false; + /* + * If there's an error than the reply can have fewer ops than + * the call. But if we cached a reply with *more* ops than the + * call you're sending us now, then this new call is clearly not + * really a replay of the old one: + */ + if (slot->sl_opcnt < argp->opcnt) + return false; + /* This is the only check explicitly called by spec: */ + if (!same_creds(&rqstp->rq_cred, &slot->sl_cred)) + return false; + /* + * There may be more comparisons we could actually do, but the + * spec doesn't require us to catch every case where the calls + * don't match (that would require caching the call as well as + * the reply), so we don't bother. + */ + return true; +} + __be32 nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) @@ -3045,6 +3092,9 @@ nfsd4_sequence(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, status = nfserr_seq_misordered; if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) goto out_put_session; + status = nfserr_seq_false_retry; + if (!replay_matches_cache(rqstp, seq, slot)) + goto out_put_session; cstate->slot = slot; cstate->session = session; cstate->clp = clp; @@ -3351,7 +3401,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval, { lockdep_assert_held(&state_lock); - atomic_set(&fp->fi_ref, 1); + refcount_set(&fp->fi_ref, 1); spin_lock_init(&fp->fi_lock); INIT_LIST_HEAD(&fp->fi_stateids); INIT_LIST_HEAD(&fp->fi_delegations); @@ -3514,7 +3564,7 @@ nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open) continue; if (local->st_stateowner == &oo->oo_owner) { ret = local; - atomic_inc(&ret->st_stid.sc_count); + refcount_inc(&ret->st_stid.sc_count); break; } } @@ -3573,7 +3623,7 @@ init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open) goto out_unlock; open->op_stp = NULL; - atomic_inc(&stp->st_stid.sc_count); + refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_OPEN_STID; INIT_LIST_HEAD(&stp->st_locks); stp->st_stateowner = nfs4_get_stateowner(&oo->oo_owner); @@ -3621,7 +3671,7 @@ move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net) * there should be no danger of the refcount going back up again at * this point. */ - wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2); + wait_event(close_wq, refcount_read(&s->st_stid.sc_count) == 2); release_all_access(s); if (s->st_stid.sc_file) { @@ -3647,7 +3697,7 @@ find_file_locked(struct knfsd_fh *fh, unsigned int hashval) hlist_for_each_entry_rcu(fp, &file_hashtbl[hashval], fi_hash) { if (fh_match(&fp->fi_fhandle, fh)) { - if (atomic_inc_not_zero(&fp->fi_ref)) + if (refcount_inc_not_zero(&fp->fi_ref)) return fp; } } @@ -3783,7 +3833,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) * lock) we know the server hasn't removed the lease yet, we know * it's safe to take a reference. */ - atomic_inc(&dp->dl_stid.sc_count); + refcount_inc(&dp->dl_stid.sc_count); nfsd4_run_cb(&dp->dl_recall); } @@ -3966,7 +4016,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei { struct nfs4_stid *ret; - ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID); + ret = find_stateid_by_type(cl, s, + NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID); if (!ret) return NULL; return delegstateid(ret); @@ -3989,6 +4040,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, deleg = find_deleg_stateid(cl, &open->op_delegate_stateid); if (deleg == NULL) goto out; + if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) { + nfs4_put_stid(&deleg->dl_stid); + if (cl->cl_minorversion) + status = nfserr_deleg_revoked; + goto out; + } flags = share_access_to_flags(open->op_share_access); status = nfs4_check_delegmode(deleg, flags); if (status) { @@ -4858,6 +4915,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, struct nfs4_stid **s, struct nfsd_net *nn) { __be32 status; + bool return_revoked = false; + + /* + * only return revoked delegations if explicitly asked. + * otherwise we report revoked or bad_stateid status. + */ + if (typemask & NFS4_REVOKED_DELEG_STID) + return_revoked = true; + else if (typemask & NFS4_DELEG_STID) + typemask |= NFS4_REVOKED_DELEG_STID; if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) return nfserr_bad_stateid; @@ -4872,6 +4939,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate, *s = find_stateid_by_type(cstate->clp, stateid, typemask); if (!*s) return nfserr_bad_stateid; + if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) { + nfs4_put_stid(*s); + if (cstate->minorversion) + return nfserr_deleg_revoked; + return nfserr_bad_stateid; + } return nfs_ok; } @@ -5071,7 +5144,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ret = nfserr_locks_held; break; case NFS4_LOCK_STID: - atomic_inc(&s->sc_count); + refcount_inc(&s->sc_count); spin_unlock(&cl->cl_lock); ret = nfsd4_free_lock_stateid(stateid, s); goto out; @@ -5578,7 +5651,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo, lockdep_assert_held(&clp->cl_lock); - atomic_inc(&stp->st_stid.sc_count); + refcount_inc(&stp->st_stid.sc_count); stp->st_stid.sc_type = NFS4_LOCK_STID; stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner); get_nfs4_file(fp); @@ -5604,7 +5677,7 @@ find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp) list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) { if (lst->st_stid.sc_file == fp) { - atomic_inc(&lst->st_stid.sc_count); + refcount_inc(&lst->st_stid.sc_count); return lst; } } @@ -7006,8 +7079,8 @@ nfs4_state_start_net(struct net *net) nn->nfsd4_manager.block_opens = true; locks_start_grace(net, &nn->nfsd4_manager); nfsd4_client_tracking_init(net); - printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n", - nn->nfsd4_grace, net); + printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n", + nn->nfsd4_grace, net->ns.inum); queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ); return 0; } diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index e02bd2783124..33117d4ffce0 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -447,7 +447,7 @@ void nfsd_reset_versions(void) */ static void set_max_drc(void) { - #define NFSD_DRC_SIZE_SHIFT 10 + #define NFSD_DRC_SIZE_SHIFT 7 nfsd_drc_max_mem = (nr_free_buffer_pages() >> NFSD_DRC_SIZE_SHIFT) * PAGE_SIZE; nfsd_drc_mem_used = 0; @@ -517,7 +517,7 @@ int nfsd_create_serv(struct net *net) register_inet6addr_notifier(&nfsd_inet6addr_notifier); #endif } - do_gettimeofday(&nn->nfssvc_boot); /* record boot time */ + ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */ return 0; } diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 005c911b34ac..f3772ea8ba0d 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h @@ -36,6 +36,7 @@ #define _NFSD4_STATE_H #include <linux/idr.h> +#include <linux/refcount.h> #include <linux/sunrpc/svc_xprt.h> #include "nfsfh.h" @@ -83,7 +84,7 @@ struct nfsd4_callback_ops { * fields that are of general use to any stateid. */ struct nfs4_stid { - atomic_t sc_count; + refcount_t sc_count; #define NFS4_OPEN_STID 1 #define NFS4_LOCK_STID 2 #define NFS4_DELEG_STID 4 @@ -169,11 +170,13 @@ static inline struct nfs4_delegation *delegstateid(struct nfs4_stid *s) struct nfsd4_slot { u32 sl_seqid; __be32 sl_status; + struct svc_cred sl_cred; u32 sl_datalen; u16 sl_opcnt; #define NFSD4_SLOT_INUSE (1 << 0) #define NFSD4_SLOT_CACHETHIS (1 << 1) #define NFSD4_SLOT_INITIALIZED (1 << 2) +#define NFSD4_SLOT_CACHED (1 << 3) u8 sl_flags; char sl_data[]; }; @@ -465,7 +468,7 @@ struct nfs4_clnt_odstate { struct nfs4_client *co_client; struct nfs4_file *co_file; struct list_head co_perfile; - atomic_t co_odcount; + refcount_t co_odcount; }; /* @@ -481,7 +484,7 @@ struct nfs4_clnt_odstate { * the global state_lock spinlock. */ struct nfs4_file { - atomic_t fi_ref; + refcount_t fi_ref; spinlock_t fi_lock; struct hlist_node fi_hash; /* hash on fi_fhandle */ struct list_head fi_stateids; @@ -634,7 +637,7 @@ struct nfs4_file *find_file(struct knfsd_fh *fh); void put_nfs4_file(struct nfs4_file *fi); static inline void get_nfs4_file(struct nfs4_file *fi) { - atomic_inc(&fi->fi_ref); + refcount_inc(&fi->fi_ref); } struct file *find_any_file(struct nfs4_file *f); diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 1e4edbf70052..bc29511b6405 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -649,9 +649,18 @@ static inline bool nfsd4_is_solo_sequence(struct nfsd4_compoundres *resp) return resp->opcnt == 1 && args->ops[0].opnum == OP_SEQUENCE; } -static inline bool nfsd4_not_cached(struct nfsd4_compoundres *resp) +/* + * The session reply cache only needs to cache replies that the client + * actually asked us to. But it's almost free for us to cache compounds + * consisting of only a SEQUENCE op, so we may as well cache those too. + * Also, the protocol doesn't give us a convenient response in the case + * of a replay of a solo SEQUENCE op that wasn't cached + * (RETRY_UNCACHED_REP can only be returned in the second op of a + * compound). + */ +static inline bool nfsd4_cache_this(struct nfsd4_compoundres *resp) { - return !(resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS) + return (resp->cstate.slot->sl_flags & NFSD4_SLOT_CACHETHIS) || nfsd4_is_solo_sequence(resp); } diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c index c2d8233b1e82..480ea059a680 100644 --- a/fs/orangefs/acl.c +++ b/fs/orangefs/acl.c @@ -155,13 +155,11 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) int orangefs_init_acl(struct inode *inode, struct inode *dir) { - struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); struct posix_acl *default_acl, *acl; umode_t mode = inode->i_mode; + struct iattr iattr; int error = 0; - ClearModeFlag(orangefs_inode); - error = posix_acl_create(dir, &mode, &default_acl, &acl); if (error) return error; @@ -180,9 +178,11 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir) /* If mode of the inode was changed, then do a forcible ->setattr */ if (mode != inode->i_mode) { - SetModeFlag(orangefs_inode); + memset(&iattr, 0, sizeof iattr); inode->i_mode = mode; - orangefs_flush_inode(inode); + iattr.ia_mode = mode; + iattr.ia_valid |= ATTR_MODE; + orangefs_inode_setattr(inode, &iattr); } return error; diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c index a8cc588d6224..e2c2699d8016 100644 --- a/fs/orangefs/dir.c +++ b/fs/orangefs/dir.c @@ -386,7 +386,6 @@ static int orangefs_dir_release(struct inode *inode, struct file *file) { struct orangefs_dir *od = file->private_data; struct orangefs_dir_part *part = od->part; - orangefs_flush_inode(inode); while (part) { struct orangefs_dir_part *next = part->next; vfree(part); diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index e4a8e6a7eb17..1668fd645c45 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -383,9 +383,15 @@ out: if (type == ORANGEFS_IO_READ) { file_accessed(file); } else { - SetMtimeFlag(orangefs_inode); - inode->i_mtime = current_time(inode); - mark_inode_dirty_sync(inode); + file_update_time(file); + /* + * Must invalidate to ensure write loop doesn't + * prevent kernel from reading updated + * attribute. Size probably changed because of + * the write, and other clients could update + * any other attribute. + */ + orangefs_inode->getattr_time = jiffies - 1; } } @@ -615,8 +621,6 @@ static int orangefs_file_release(struct inode *inode, struct file *file) "orangefs_file_release: called on %pD\n", file); - orangefs_flush_inode(inode); - /* * remove all associated inode pages from the page cache and * readahead cache (if any); this forces an expensive refresh of @@ -666,8 +670,6 @@ static int orangefs_fsync(struct file *file, ret); op_release(new_op); - - orangefs_flush_inode(file_inode(file)); return ret; } diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 28825a5b6d09..fe1d705ad91f 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -290,6 +290,22 @@ int orangefs_permission(struct inode *inode, int mask) return generic_permission(inode, mask); } +int orangefs_update_time(struct inode *inode, struct timespec *time, int flags) +{ + struct iattr iattr; + gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_update_time: %pU\n", + get_khandle_from_ino(inode)); + generic_update_time(inode, time, flags); + memset(&iattr, 0, sizeof iattr); + if (flags & S_ATIME) + iattr.ia_valid |= ATTR_ATIME; + if (flags & S_CTIME) + iattr.ia_valid |= ATTR_CTIME; + if (flags & S_MTIME) + iattr.ia_valid |= ATTR_MTIME; + return orangefs_inode_setattr(inode, &iattr); +} + /* ORANGEDS2 implementation of VFS inode operations for files */ const struct inode_operations orangefs_file_inode_operations = { .get_acl = orangefs_get_acl, @@ -298,6 +314,7 @@ const struct inode_operations orangefs_file_inode_operations = { .getattr = orangefs_getattr, .listxattr = orangefs_listxattr, .permission = orangefs_permission, + .update_time = orangefs_update_time, }; static int orangefs_init_iops(struct inode *inode) diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c index 7e9e5d0ea3bc..c98bba2dbc94 100644 --- a/fs/orangefs/namei.c +++ b/fs/orangefs/namei.c @@ -22,7 +22,9 @@ static int orangefs_create(struct inode *dir, { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; + struct orangefs_object_kref ref; struct inode *inode; + struct iattr iattr; int ret; gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd\n", @@ -55,8 +57,10 @@ static int orangefs_create(struct inode *dir, if (ret < 0) goto out; - inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, - &new_op->downcall.resp.create.refn); + ref = new_op->downcall.resp.create.refn; + op_release(new_op); + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0, &ref); if (IS_ERR(inode)) { gossip_err("%s: Failed to allocate inode for file :%pd:\n", __func__, @@ -82,12 +86,13 @@ static int orangefs_create(struct inode *dir, __func__, dentry); - SetMtimeFlag(parent); dir->i_mtime = dir->i_ctime = current_time(dir); + memset(&iattr, 0, sizeof iattr); + iattr.ia_valid |= ATTR_MTIME; + orangefs_inode_setattr(dir, &iattr); mark_inode_dirty_sync(dir); ret = 0; out: - op_release(new_op); gossip_debug(GOSSIP_NAME_DEBUG, "%s: %pd: returning %d\n", __func__, @@ -221,6 +226,7 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry) struct inode *inode = dentry->d_inode; struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; + struct iattr iattr; int ret; gossip_debug(GOSSIP_NAME_DEBUG, @@ -253,8 +259,10 @@ static int orangefs_unlink(struct inode *dir, struct dentry *dentry) if (!ret) { drop_nlink(inode); - SetMtimeFlag(parent); dir->i_mtime = dir->i_ctime = current_time(dir); + memset(&iattr, 0, sizeof iattr); + iattr.ia_valid |= ATTR_MTIME; + orangefs_inode_setattr(dir, &iattr); mark_inode_dirty_sync(dir); } return ret; @@ -266,7 +274,9 @@ static int orangefs_symlink(struct inode *dir, { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; + struct orangefs_object_kref ref; struct inode *inode; + struct iattr iattr; int mode = 755; int ret; @@ -307,8 +317,10 @@ static int orangefs_symlink(struct inode *dir, goto out; } - inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, - &new_op->downcall.resp.sym.refn); + ref = new_op->downcall.resp.sym.refn; + op_release(new_op); + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0, &ref); if (IS_ERR(inode)) { gossip_err ("*** Failed to allocate orangefs symlink inode\n"); @@ -331,12 +343,13 @@ static int orangefs_symlink(struct inode *dir, get_khandle_from_ino(inode), dentry); - SetMtimeFlag(parent); dir->i_mtime = dir->i_ctime = current_time(dir); + memset(&iattr, 0, sizeof iattr); + iattr.ia_valid |= ATTR_MTIME; + orangefs_inode_setattr(dir, &iattr); mark_inode_dirty_sync(dir); ret = 0; out: - op_release(new_op); return ret; } @@ -344,7 +357,9 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode { struct orangefs_inode_s *parent = ORANGEFS_I(dir); struct orangefs_kernel_op_s *new_op; + struct orangefs_object_kref ref; struct inode *inode; + struct iattr iattr; int ret; new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR); @@ -373,8 +388,10 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode goto out; } - inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, - &new_op->downcall.resp.mkdir.refn); + ref = new_op->downcall.resp.mkdir.refn; + op_release(new_op); + + inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0, &ref); if (IS_ERR(inode)) { gossip_err("*** Failed to allocate orangefs dir inode\n"); ret = PTR_ERR(inode); @@ -400,11 +417,12 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode * NOTE: we have no good way to keep nlink consistent for directories * across clients; keep constant at 1. */ - SetMtimeFlag(parent); dir->i_mtime = dir->i_ctime = current_time(dir); + memset(&iattr, 0, sizeof iattr); + iattr.ia_valid |= ATTR_MTIME; + orangefs_inode_setattr(dir, &iattr); mark_inode_dirty_sync(dir); out: - op_release(new_op); return ret; } @@ -470,4 +488,5 @@ const struct inode_operations orangefs_dir_inode_operations = { .getattr = orangefs_getattr, .listxattr = orangefs_listxattr, .permission = orangefs_permission, + .update_time = orangefs_update_time, }; diff --git a/fs/orangefs/orangefs-debug.h b/fs/orangefs/orangefs-debug.h index b6001bb28f5a..c7db56a31b92 100644 --- a/fs/orangefs/orangefs-debug.h +++ b/fs/orangefs/orangefs-debug.h @@ -15,8 +15,10 @@ #ifdef __KERNEL__ #include <linux/types.h> +#include <linux/kernel.h> #else #include <stdint.h> +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #endif #define GOSSIP_NO_DEBUG (__u64)0 @@ -88,6 +90,6 @@ static struct __keyword_mask_s s_kmod_keyword_mask_map[] = { }; static const int num_kmod_keyword_mask_map = (int) - (sizeof(s_kmod_keyword_mask_map) / sizeof(struct __keyword_mask_s)); + (ARRAY_SIZE(s_kmod_keyword_mask_map)); #endif /* __ORANGEFS_DEBUG_H */ diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index f44d5eb74fcc..97adf7d100b5 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -209,37 +209,10 @@ struct orangefs_inode_s { struct inode vfs_inode; sector_t last_failed_block_index_read; - /* - * State of in-memory attributes not yet flushed to disk associated - * with this object - */ - unsigned long pinode_flags; - unsigned long getattr_time; u32 getattr_mask; }; -#define P_ATIME_FLAG 0 -#define P_MTIME_FLAG 1 -#define P_CTIME_FLAG 2 -#define P_MODE_FLAG 3 - -#define ClearAtimeFlag(pinode) clear_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) -#define SetAtimeFlag(pinode) set_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) -#define AtimeFlag(pinode) test_bit(P_ATIME_FLAG, &(pinode)->pinode_flags) - -#define ClearMtimeFlag(pinode) clear_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) -#define SetMtimeFlag(pinode) set_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) -#define MtimeFlag(pinode) test_bit(P_MTIME_FLAG, &(pinode)->pinode_flags) - -#define ClearCtimeFlag(pinode) clear_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) -#define SetCtimeFlag(pinode) set_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) -#define CtimeFlag(pinode) test_bit(P_CTIME_FLAG, &(pinode)->pinode_flags) - -#define ClearModeFlag(pinode) clear_bit(P_MODE_FLAG, &(pinode)->pinode_flags) -#define SetModeFlag(pinode) set_bit(P_MODE_FLAG, &(pinode)->pinode_flags) -#define ModeFlag(pinode) test_bit(P_MODE_FLAG, &(pinode)->pinode_flags) - /* per superblock private orangefs info */ struct orangefs_sb_info_s { struct orangefs_khandle root_khandle; @@ -436,6 +409,8 @@ int orangefs_getattr(const struct path *path, struct kstat *stat, int orangefs_permission(struct inode *inode, int mask); +int orangefs_update_time(struct inode *, struct timespec *, int); + /* * defined in xattr.c */ @@ -478,8 +453,6 @@ bool __is_daemon_in_service(void); */ __s32 fsid_of_op(struct orangefs_kernel_op_s *op); -int orangefs_flush_inode(struct inode *inode); - ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name, void *buffer, diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c index f82336496311..97fe93129f38 100644 --- a/fs/orangefs/orangefs-utils.c +++ b/fs/orangefs/orangefs-utils.c @@ -4,6 +4,7 @@ * * See COPYING in top-level directory. */ +#include <linux/kernel.h> #include "protocol.h" #include "orangefs-kernel.h" #include "orangefs-dev-proto.h" @@ -437,89 +438,8 @@ int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr) op_release(new_op); - /* - * successful setattr should clear the atime, mtime and - * ctime flags. - */ - if (ret == 0) { - ClearAtimeFlag(orangefs_inode); - ClearMtimeFlag(orangefs_inode); - ClearCtimeFlag(orangefs_inode); - ClearModeFlag(orangefs_inode); + if (ret == 0) orangefs_inode->getattr_time = jiffies - 1; - } - - return ret; -} - -int orangefs_flush_inode(struct inode *inode) -{ - /* - * If it is a dirty inode, this function gets called. - * Gather all the information that needs to be setattr'ed - * Right now, this will only be used for mode, atime, mtime - * and/or ctime. - */ - struct iattr wbattr; - int ret; - int mtime_flag; - int ctime_flag; - int atime_flag; - int mode_flag; - struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); - - memset(&wbattr, 0, sizeof(wbattr)); - - /* - * check inode flags up front, and clear them if they are set. This - * will prevent multiple processes from all trying to flush the same - * inode if they call close() simultaneously - */ - mtime_flag = MtimeFlag(orangefs_inode); - ClearMtimeFlag(orangefs_inode); - ctime_flag = CtimeFlag(orangefs_inode); - ClearCtimeFlag(orangefs_inode); - atime_flag = AtimeFlag(orangefs_inode); - ClearAtimeFlag(orangefs_inode); - mode_flag = ModeFlag(orangefs_inode); - ClearModeFlag(orangefs_inode); - - /* -- Lazy atime,mtime and ctime update -- - * Note: all times are dictated by server in the new scheme - * and not by the clients - * - * Also mode updates are being handled now.. - */ - - if (mtime_flag) - wbattr.ia_valid |= ATTR_MTIME; - if (ctime_flag) - wbattr.ia_valid |= ATTR_CTIME; - if (atime_flag) - wbattr.ia_valid |= ATTR_ATIME; - - if (mode_flag) { - wbattr.ia_mode = inode->i_mode; - wbattr.ia_valid |= ATTR_MODE; - } - - gossip_debug(GOSSIP_UTILS_DEBUG, - "*********** orangefs_flush_inode: %pU " - "(ia_valid %d)\n", - get_khandle_from_ino(inode), - wbattr.ia_valid); - if (wbattr.ia_valid == 0) { - gossip_debug(GOSSIP_UTILS_DEBUG, - "orangefs_flush_inode skipping setattr()\n"); - return 0; - } - - gossip_debug(GOSSIP_UTILS_DEBUG, - "orangefs_flush_inode (%pU) writing mode %o\n", - get_khandle_from_ino(inode), - inode->i_mode); - - ret = orangefs_inode_setattr(inode, &wbattr); return ret; } @@ -606,7 +526,7 @@ int orangefs_normalize_to_errno(__s32 error_code) /* Convert ORANGEFS encoded errno values into regular errno values. */ } else if ((-error_code) & ORANGEFS_ERROR_BIT) { i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS); - if (i < sizeof(PINT_errno_mapping)/sizeof(*PINT_errno_mapping)) + if (i < ARRAY_SIZE(PINT_errno_mapping)) error_code = -PINT_errno_mapping[i]; else error_code = -EINVAL; diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 47ebd9bfd1a1..366750eef201 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -99,8 +99,6 @@ static void orangefs_inode_cache_ctor(void *req) inode_init_once(&orangefs_inode->vfs_inode); init_rwsem(&orangefs_inode->xattr_sem); - - orangefs_inode->vfs_inode.i_version = 1; } static struct inode *orangefs_alloc_inode(struct super_block *sb) @@ -119,7 +117,6 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb) orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL; orangefs_inode->last_failed_block_index_read = 0; memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target)); - orangefs_inode->pinode_flags = 0; gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_alloc_inode: allocated %p\n", @@ -299,21 +296,9 @@ void fsid_key_table_finalize(void) { } -/* Called whenever the VFS dirties the inode in response to atime updates */ -static void orangefs_dirty_inode(struct inode *inode, int flags) -{ - struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); - - gossip_debug(GOSSIP_SUPER_DEBUG, - "orangefs_dirty_inode: %pU\n", - get_khandle_from_ino(inode)); - SetAtimeFlag(orangefs_inode); -} - static const struct super_operations orangefs_s_ops = { .alloc_inode = orangefs_alloc_inode, .destroy_inode = orangefs_destroy_inode, - .dirty_inode = orangefs_dirty_inode, .drop_inode = generic_delete_inode, .statfs = orangefs_statfs, .remount_fs = orangefs_remount_fs, diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c index d856cdf91763..db107fe91ab3 100644 --- a/fs/orangefs/symlink.c +++ b/fs/orangefs/symlink.c @@ -15,4 +15,5 @@ const struct inode_operations orangefs_symlink_inode_operations = { .getattr = orangefs_getattr, .listxattr = orangefs_listxattr, .permission = orangefs_permission, + .update_time = orangefs_update_time, }; diff --git a/fs/proc/base.c b/fs/proc/base.c index 9d357b2ea6cb..31934cb9dfc8 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1682,7 +1682,7 @@ const struct inode_operations proc_pid_link_inode_operations = { /* building an inode */ -void task_dump_owner(struct task_struct *task, mode_t mode, +void task_dump_owner(struct task_struct *task, umode_t mode, kuid_t *ruid, kgid_t *rgid) { /* Depending on the state of dumpable compute who should own a diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 9aad373cf11d..4a67188c8d74 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -100,7 +100,7 @@ static inline struct task_struct *get_proc_task(struct inode *inode) return get_pid_task(proc_pid(inode), PIDTYPE_PID); } -void task_dump_owner(struct task_struct *task, mode_t mode, +void task_dump_owner(struct task_struct *task, umode_t mode, kuid_t *ruid, kgid_t *rgid); unsigned name_to_int(const struct qstr *qstr); diff --git a/fs/xfs/libxfs/xfs_iext_tree.c b/fs/xfs/libxfs/xfs_iext_tree.c index 19e546a41251..89bf16b4d937 100644 --- a/fs/xfs/libxfs/xfs_iext_tree.c +++ b/fs/xfs/libxfs/xfs_iext_tree.c @@ -850,9 +850,9 @@ static void xfs_iext_free_last_leaf( struct xfs_ifork *ifp) { - ifp->if_u1.if_root = NULL; ifp->if_height--; kmem_free(ifp->if_u1.if_root); + ifp->if_u1.if_root = NULL; } void diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 1c90ec41e9df..c79a1616b79d 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -42,11 +42,6 @@ STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int); STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int); STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int); -static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev) -{ - return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev)); -} - /* * Copy inode type and data and attr format specific information from the * on-disk inode to the in-core inode and fork structures. For fifos, devices, @@ -792,7 +787,8 @@ xfs_iflush_fork( case XFS_DINODE_FMT_DEV: if (iip->ili_fields & XFS_ILOG_DEV) { ASSERT(whichfork == XFS_DATA_FORK); - xfs_dinode_put_rdev(dip, sysv_encode_dev(VFS_I(ip)->i_rdev)); + xfs_dinode_put_rdev(dip, + linux_to_xfs_dev_t(VFS_I(ip)->i_rdev)); } break; diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 6282bfc1afa9..99562ec0de56 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -204,6 +204,16 @@ static inline kgid_t xfs_gid_to_kgid(uint32_t gid) return make_kgid(&init_user_ns, gid); } +static inline dev_t xfs_to_linux_dev_t(xfs_dev_t dev) +{ + return MKDEV(sysv_major(dev) & 0x1ff, sysv_minor(dev)); +} + +static inline xfs_dev_t linux_to_xfs_dev_t(dev_t dev) +{ + return sysv_encode_dev(dev); +} + /* * Various platform dependent calls that don't fit anywhere else */ diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index 7a7140543012..df9807a3caae 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -284,6 +284,11 @@ struct drm_display_info { * @hdmi: advance features of a HDMI sink. */ struct drm_hdmi_info hdmi; + + /** + * @non_desktop: Non desktop display (HMD). + */ + bool non_desktop; }; int drm_display_info_set_bus_formats(struct drm_display_info *info, diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 0b4ac2ebc610..b21e827c5c78 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -728,6 +728,13 @@ struct drm_mode_config { */ struct drm_property *suggested_y_property; + /** + * @non_desktop_property: Optional connector property with a hint + * that device isn't a standard display, and the console/desktop, + * should not be displayed on it. + */ + struct drm_property *non_desktop_property; + /* dumb ioctl parameters */ uint32_t preferred_depth, prefer_shadow; diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h deleted file mode 100644 index a75d304473d5..000000000000 --- a/include/dt-bindings/msm/msm-bus-ids.h +++ /dev/null @@ -1,887 +0,0 @@ -/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 and - * only version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __MSM_BUS_IDS_H -#define __MSM_BUS_IDS_H - -/* Aggregation types */ -#define AGG_SCHEME_NONE 0 -#define AGG_SCHEME_LEG 1 -#define AGG_SCHEME_1 2 - -/* Topology related enums */ -#define MSM_BUS_FAB_DEFAULT 0 -#define MSM_BUS_FAB_APPSS 0 -#define MSM_BUS_FAB_SYSTEM 1024 -#define MSM_BUS_FAB_MMSS 2048 -#define MSM_BUS_FAB_SYSTEM_FPB 3072 -#define MSM_BUS_FAB_CPSS_FPB 4096 - -#define MSM_BUS_FAB_BIMC 0 -#define MSM_BUS_FAB_SYS_NOC 1024 -#define MSM_BUS_FAB_MMSS_NOC 2048 -#define MSM_BUS_FAB_OCMEM_NOC 3072 -#define MSM_BUS_FAB_PERIPH_NOC 4096 -#define MSM_BUS_FAB_CONFIG_NOC 5120 -#define MSM_BUS_FAB_OCMEM_VNOC 6144 -#define MSM_BUS_FAB_MMSS_AHB 2049 -#define MSM_BUS_FAB_A0_NOC 6145 -#define MSM_BUS_FAB_A1_NOC 6146 -#define MSM_BUS_FAB_A2_NOC 6147 -#define MSM_BUS_FAB_GNOC 6148 -#define MSM_BUS_FAB_CR_VIRT 6149 - -#define MSM_BUS_MASTER_FIRST 1 -#define MSM_BUS_MASTER_AMPSS_M0 1 -#define MSM_BUS_MASTER_AMPSS_M1 2 -#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3 -#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4 -#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5 -#define MSM_BUS_MASTER_SPS 6 -#define MSM_BUS_MASTER_ADM_PORT0 7 -#define MSM_BUS_MASTER_ADM_PORT1 8 -#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9 -#define MSM_BUS_MASTER_ADM1_PORT1 10 -#define MSM_BUS_MASTER_LPASS_PROC 11 -#define MSM_BUS_MASTER_MSS_PROCI 12 -#define MSM_BUS_MASTER_MSS_PROCD 13 -#define MSM_BUS_MASTER_MSS_MDM_PORT0 14 -#define MSM_BUS_MASTER_LPASS 15 -#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16 -#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17 -#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18 -#define MSM_BUS_MASTER_ADM1_CI 19 -#define MSM_BUS_MASTER_ADM0_CI 20 -#define MSM_BUS_MASTER_MSS_MDM_PORT1 21 -#define MSM_BUS_MASTER_MDP_PORT0 22 -#define MSM_BUS_MASTER_MDP_PORT1 23 -#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24 -#define MSM_BUS_MASTER_ROTATOR 25 -#define MSM_BUS_MASTER_GRAPHICS_3D 26 -#define MSM_BUS_MASTER_JPEG_DEC 27 -#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28 -#define MSM_BUS_MASTER_VFE 29 -#define MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE -#define MSM_BUS_MASTER_VPE 30 -#define MSM_BUS_MASTER_JPEG_ENC 31 -#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32 -#define MSM_BUS_MMSS_MASTER_APPS_FAB 33 -#define MSM_BUS_MASTER_HD_CODEC_PORT0 34 -#define MSM_BUS_MASTER_HD_CODEC_PORT1 35 -#define MSM_BUS_MASTER_SPDM 36 -#define MSM_BUS_MASTER_RPM 37 -#define MSM_BUS_MASTER_MSS 38 -#define MSM_BUS_MASTER_RIVA 39 -#define MSM_BUS_MASTER_SNOC_VMEM 40 -#define MSM_BUS_MASTER_MSS_SW_PROC 41 -#define MSM_BUS_MASTER_MSS_FW_PROC 42 -#define MSM_BUS_MASTER_HMSS 43 -#define MSM_BUS_MASTER_GSS_NAV 44 -#define MSM_BUS_MASTER_PCIE 45 -#define MSM_BUS_MASTER_SATA 46 -#define MSM_BUS_MASTER_CRYPTO 47 -#define MSM_BUS_MASTER_VIDEO_CAP 48 -#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49 -#define MSM_BUS_MASTER_VIDEO_ENC 50 -#define MSM_BUS_MASTER_VIDEO_DEC 51 -#define MSM_BUS_MASTER_LPASS_AHB 52 -#define MSM_BUS_MASTER_QDSS_BAM 53 -#define MSM_BUS_MASTER_SNOC_CFG 54 -#define MSM_BUS_MASTER_CRYPTO_CORE0 55 -#define MSM_BUS_MASTER_CRYPTO_CORE1 56 -#define MSM_BUS_MASTER_MSS_NAV 57 -#define MSM_BUS_MASTER_OCMEM_DMA 58 -#define MSM_BUS_MASTER_WCSS 59 -#define MSM_BUS_MASTER_QDSS_ETR 60 -#define MSM_BUS_MASTER_USB3 61 -#define MSM_BUS_MASTER_JPEG 62 -#define MSM_BUS_MASTER_VIDEO_P0 63 -#define MSM_BUS_MASTER_VIDEO_P1 64 -#define MSM_BUS_MASTER_MSS_PROC 65 -#define MSM_BUS_MASTER_JPEG_OCMEM 66 -#define MSM_BUS_MASTER_MDP_OCMEM 67 -#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68 -#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69 -#define MSM_BUS_MASTER_VFE_OCMEM 70 -#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71 -#define MSM_BUS_MASTER_RPM_INST 72 -#define MSM_BUS_MASTER_RPM_DATA 73 -#define MSM_BUS_MASTER_RPM_SYS 74 -#define MSM_BUS_MASTER_DEHR 75 -#define MSM_BUS_MASTER_QDSS_DAP 76 -#define MSM_BUS_MASTER_TIC 77 -#define MSM_BUS_MASTER_SDCC_1 78 -#define MSM_BUS_MASTER_SDCC_3 79 -#define MSM_BUS_MASTER_SDCC_4 80 -#define MSM_BUS_MASTER_SDCC_2 81 -#define MSM_BUS_MASTER_TSIF 82 -#define MSM_BUS_MASTER_BAM_DMA 83 -#define MSM_BUS_MASTER_BLSP_2 84 -#define MSM_BUS_MASTER_USB_HSIC 85 -#define MSM_BUS_MASTER_BLSP_1 86 -#define MSM_BUS_MASTER_USB_HS 87 -#define MSM_BUS_MASTER_PNOC_CFG 88 -#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89 -#define MSM_BUS_MASTER_IPA 90 -#define MSM_BUS_MASTER_QPIC 91 -#define MSM_BUS_MASTER_MDPE 92 -#define MSM_BUS_MASTER_USB_HS2 93 -#define MSM_BUS_MASTER_VPU 94 -#define MSM_BUS_MASTER_UFS 95 -#define MSM_BUS_MASTER_BCAST 96 -#define MSM_BUS_MASTER_CRYPTO_CORE2 97 -#define MSM_BUS_MASTER_EMAC 98 -#define MSM_BUS_MASTER_VPU_1 99 -#define MSM_BUS_MASTER_PCIE_1 100 -#define MSM_BUS_MASTER_USB3_1 101 -#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102 -#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103 -#define MSM_BUS_MASTER_TCU_0 104 -#define MSM_BUS_MASTER_TCU_1 105 -#define MSM_BUS_MASTER_CPP 106 -#define MSM_BUS_MASTER_AUDIO 107 -#define MSM_BUS_MASTER_PCIE_2 108 -#define MSM_BUS_MASTER_VFE1 109 -#define MSM_BUS_MASTER_XM_USB_HS1 110 -#define MSM_BUS_MASTER_PCNOC_BIMC_1 111 -#define MSM_BUS_MASTER_BIMC_PCNOC 112 -#define MSM_BUS_MASTER_XI_USB_HSIC 113 -#define MSM_BUS_MASTER_SGMII 114 -#define MSM_BUS_SPMI_FETCHER 115 -#define MSM_BUS_MASTER_GNOC_BIMC 116 -#define MSM_BUS_MASTER_CRVIRT_A2NOC 117 -#define MSM_BUS_MASTER_CNOC_A2NOC 118 -#define MSM_BUS_MASTER_WLAN 119 -#define MSM_BUS_MASTER_MSS_CE 120 -#define MSM_BUS_MASTER_CDSP_PROC 121 -#define MSM_BUS_MASTER_GNOC_SNOC 122 -#define MSM_BUS_MASTER_PIMEM 123 -#define MSM_BUS_MASTER_MASTER_LAST 124 - -#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB -#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB - -#define MSM_BUS_SNOC_MM_INT_0 10000 -#define MSM_BUS_SNOC_MM_INT_1 10001 -#define MSM_BUS_SNOC_MM_INT_2 10002 -#define MSM_BUS_SNOC_MM_INT_BIMC 10003 -#define MSM_BUS_SNOC_INT_0 10004 -#define MSM_BUS_SNOC_INT_1 10005 -#define MSM_BUS_SNOC_INT_BIMC 10006 -#define MSM_BUS_SNOC_BIMC_0_MAS 10007 -#define MSM_BUS_SNOC_BIMC_1_MAS 10008 -#define MSM_BUS_SNOC_QDSS_INT 10009 -#define MSM_BUS_PNOC_SNOC_MAS 10010 -#define MSM_BUS_PNOC_SNOC_SLV 10011 -#define MSM_BUS_PNOC_INT_0 10012 -#define MSM_BUS_PNOC_INT_1 10013 -#define MSM_BUS_PNOC_M_0 10014 -#define MSM_BUS_PNOC_M_1 10015 -#define MSM_BUS_BIMC_SNOC_MAS 10016 -#define MSM_BUS_BIMC_SNOC_SLV 10017 -#define MSM_BUS_PNOC_SLV_0 10018 -#define MSM_BUS_PNOC_SLV_1 10019 -#define MSM_BUS_PNOC_SLV_2 10020 -#define MSM_BUS_PNOC_SLV_3 10021 -#define MSM_BUS_PNOC_SLV_4 10022 -#define MSM_BUS_PNOC_SLV_8 10023 -#define MSM_BUS_PNOC_SLV_9 10024 -#define MSM_BUS_SNOC_BIMC_0_SLV 10025 -#define MSM_BUS_SNOC_BIMC_1_SLV 10026 -#define MSM_BUS_MNOC_BIMC_MAS 10027 -#define MSM_BUS_MNOC_BIMC_SLV 10028 -#define MSM_BUS_BIMC_MNOC_MAS 10029 -#define MSM_BUS_BIMC_MNOC_SLV 10030 -#define MSM_BUS_SNOC_BIMC_MAS 10031 -#define MSM_BUS_SNOC_BIMC_SLV 10032 -#define MSM_BUS_CNOC_SNOC_MAS 10033 -#define MSM_BUS_CNOC_SNOC_SLV 10034 -#define MSM_BUS_SNOC_CNOC_MAS 10035 -#define MSM_BUS_SNOC_CNOC_SLV 10036 -#define MSM_BUS_OVNOC_SNOC_MAS 10037 -#define MSM_BUS_OVNOC_SNOC_SLV 10038 -#define MSM_BUS_SNOC_OVNOC_MAS 10039 -#define MSM_BUS_SNOC_OVNOC_SLV 10040 -#define MSM_BUS_SNOC_PNOC_MAS 10041 -#define MSM_BUS_SNOC_PNOC_SLV 10042 -#define MSM_BUS_BIMC_INT_APPS_EBI 10043 -#define MSM_BUS_BIMC_INT_APPS_SNOC 10044 -#define MSM_BUS_SNOC_BIMC_2_MAS 10045 -#define MSM_BUS_SNOC_BIMC_2_SLV 10046 -#define MSM_BUS_PNOC_SLV_5 10047 -#define MSM_BUS_PNOC_SLV_7 10048 -#define MSM_BUS_PNOC_INT_2 10049 -#define MSM_BUS_PNOC_INT_3 10050 -#define MSM_BUS_PNOC_INT_4 10051 -#define MSM_BUS_PNOC_INT_5 10052 -#define MSM_BUS_PNOC_INT_6 10053 -#define MSM_BUS_PNOC_INT_7 10054 -#define MSM_BUS_BIMC_SNOC_1_MAS 10055 -#define MSM_BUS_BIMC_SNOC_1_SLV 10056 -#define MSM_BUS_PNOC_A1NOC_MAS 10057 -#define MSM_BUS_PNOC_A1NOC_SLV 10058 -#define MSM_BUS_CNOC_A1NOC_MAS 10059 -#define MSM_BUS_A0NOC_SNOC_MAS 10060 -#define MSM_BUS_A0NOC_SNOC_SLV 10061 -#define MSM_BUS_A1NOC_SNOC_SLV 10062 -#define MSM_BUS_A1NOC_SNOC_MAS 10063 -#define MSM_BUS_A2NOC_SNOC_MAS 10064 -#define MSM_BUS_A2NOC_SNOC_SLV 10065 -#define MSM_BUS_SNOC_INT_2 10066 -#define MSM_BUS_A0NOC_QDSS_INT 10067 -#define MSM_BUS_INT_LAST 10068 - -#define MSM_BUS_INT_TEST_ID 20000 -#define MSM_BUS_INT_TEST_LAST 20050 - -#define MSM_BUS_SLAVE_FIRST 512 -#define MSM_BUS_SLAVE_EBI_CH0 512 -#define MSM_BUS_SLAVE_EBI_CH1 513 -#define MSM_BUS_SLAVE_AMPSS_L2 514 -#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515 -#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516 -#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517 -#define MSM_BUS_SLAVE_SPS 518 -#define MSM_BUS_SLAVE_SYSTEM_IMEM 519 -#define MSM_BUS_SLAVE_AMPSS 520 -#define MSM_BUS_SLAVE_MSS 521 -#define MSM_BUS_SLAVE_LPASS 522 -#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523 -#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524 -#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525 -#define MSM_BUS_SLAVE_CORESIGHT 526 -#define MSM_BUS_SLAVE_RIVA 527 -#define MSM_BUS_SLAVE_SMI 528 -#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529 -#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530 -#define MSM_BUS_SLAVE_MM_IMEM 531 -#define MSM_BUS_SLAVE_CRYPTO 532 -#define MSM_BUS_SLAVE_SPDM 533 -#define MSM_BUS_SLAVE_RPM 534 -#define MSM_BUS_SLAVE_RPM_MSG_RAM 535 -#define MSM_BUS_SLAVE_MPM 536 -#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537 -#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538 -#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539 -#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540 -#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541 -#define MSM_BUS_SLAVE_GSBI1_UART 542 -#define MSM_BUS_SLAVE_GSBI2_UART 543 -#define MSM_BUS_SLAVE_GSBI3_UART 544 -#define MSM_BUS_SLAVE_GSBI4_UART 545 -#define MSM_BUS_SLAVE_GSBI5_UART 546 -#define MSM_BUS_SLAVE_GSBI6_UART 547 -#define MSM_BUS_SLAVE_GSBI7_UART 548 -#define MSM_BUS_SLAVE_GSBI8_UART 549 -#define MSM_BUS_SLAVE_GSBI9_UART 550 -#define MSM_BUS_SLAVE_GSBI10_UART 551 -#define MSM_BUS_SLAVE_GSBI11_UART 552 -#define MSM_BUS_SLAVE_GSBI12_UART 553 -#define MSM_BUS_SLAVE_GSBI1_QUP 554 -#define MSM_BUS_SLAVE_GSBI2_QUP 555 -#define MSM_BUS_SLAVE_GSBI3_QUP 556 -#define MSM_BUS_SLAVE_GSBI4_QUP 557 -#define MSM_BUS_SLAVE_GSBI5_QUP 558 -#define MSM_BUS_SLAVE_GSBI6_QUP 559 -#define MSM_BUS_SLAVE_GSBI7_QUP 560 -#define MSM_BUS_SLAVE_GSBI8_QUP 561 -#define MSM_BUS_SLAVE_GSBI9_QUP 562 -#define MSM_BUS_SLAVE_GSBI10_QUP 563 -#define MSM_BUS_SLAVE_GSBI11_QUP 564 -#define MSM_BUS_SLAVE_GSBI12_QUP 565 -#define MSM_BUS_SLAVE_EBI2_NAND 566 -#define MSM_BUS_SLAVE_EBI2_CS0 567 -#define MSM_BUS_SLAVE_EBI2_CS1 568 -#define MSM_BUS_SLAVE_EBI2_CS2 569 -#define MSM_BUS_SLAVE_EBI2_CS3 570 -#define MSM_BUS_SLAVE_EBI2_CS4 571 -#define MSM_BUS_SLAVE_EBI2_CS5 572 -#define MSM_BUS_SLAVE_USB_FS1 573 -#define MSM_BUS_SLAVE_USB_FS2 574 -#define MSM_BUS_SLAVE_TSIF 575 -#define MSM_BUS_SLAVE_MSM_TSSC 576 -#define MSM_BUS_SLAVE_MSM_PDM 577 -#define MSM_BUS_SLAVE_MSM_DIMEM 578 -#define MSM_BUS_SLAVE_MSM_TCSR 579 -#define MSM_BUS_SLAVE_MSM_PRNG 580 -#define MSM_BUS_SLAVE_GSS 581 -#define MSM_BUS_SLAVE_SATA 582 -#define MSM_BUS_SLAVE_USB3 583 -#define MSM_BUS_SLAVE_WCSS 584 -#define MSM_BUS_SLAVE_OCIMEM 585 -#define MSM_BUS_SLAVE_SNOC_OCMEM 586 -#define MSM_BUS_SLAVE_SERVICE_SNOC 587 -#define MSM_BUS_SLAVE_QDSS_STM 588 -#define MSM_BUS_SLAVE_CAMERA_CFG 589 -#define MSM_BUS_SLAVE_DISPLAY_CFG 590 -#define MSM_BUS_SLAVE_OCMEM_CFG 591 -#define MSM_BUS_SLAVE_CPR_CFG 592 -#define MSM_BUS_SLAVE_CPR_XPU_CFG 593 -#define MSM_BUS_SLAVE_MISC_CFG 594 -#define MSM_BUS_SLAVE_MISC_XPU_CFG 595 -#define MSM_BUS_SLAVE_VENUS_CFG 596 -#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597 -#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598 -#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599 -#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600 -#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601 -#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602 -#define MSM_BUS_SLAVE_SERVICE_MNOC 603 -#define MSM_BUS_SLAVE_OCMEM 604 -#define MSM_BUS_SLAVE_SERVICE_ONOC 605 -#define MSM_BUS_SLAVE_SDCC_1 606 -#define MSM_BUS_SLAVE_SDCC_3 607 -#define MSM_BUS_SLAVE_SDCC_2 608 -#define MSM_BUS_SLAVE_SDCC_4 609 -#define MSM_BUS_SLAVE_BAM_DMA 610 -#define MSM_BUS_SLAVE_BLSP_2 611 -#define MSM_BUS_SLAVE_USB_HSIC 612 -#define MSM_BUS_SLAVE_BLSP_1 613 -#define MSM_BUS_SLAVE_USB_HS 614 -#define MSM_BUS_SLAVE_PDM 615 -#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616 -#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617 -#define MSM_BUS_SLAVE_PRNG 618 -#define MSM_BUS_SLAVE_SERVICE_PNOC 619 -#define MSM_BUS_SLAVE_CLK_CTL 620 -#define MSM_BUS_SLAVE_CNOC_MSS 621 -#define MSM_BUS_SLAVE_SECURITY 622 -#define MSM_BUS_SLAVE_TCSR 623 -#define MSM_BUS_SLAVE_TLMM 624 -#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625 -#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626 -#define MSM_BUS_SLAVE_IMEM_CFG 627 -#define MSM_BUS_SLAVE_MESSAGE_RAM 628 -#define MSM_BUS_SLAVE_BIMC_CFG 629 -#define MSM_BUS_SLAVE_BOOT_ROM 630 -#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631 -#define MSM_BUS_SLAVE_PMIC_ARB 632 -#define MSM_BUS_SLAVE_SPDM_WRAPPER 633 -#define MSM_BUS_SLAVE_DEHR_CFG 634 -#define MSM_BUS_SLAVE_QDSS_CFG 635 -#define MSM_BUS_SLAVE_RBCPR_CFG 636 -#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637 -#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638 -#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639 -#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640 -#define MSM_BUS_SLAVE_PNOC_CFG 641 -#define MSM_BUS_SLAVE_SNOC_CFG 642 -#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643 -#define MSM_BUS_SLAVE_PHY_APU_CFG 644 -#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645 -#define MSM_BUS_SLAVE_SERVICE_CNOC 646 -#define MSM_BUS_SLAVE_IPS_CFG 647 -#define MSM_BUS_SLAVE_QPIC 648 -#define MSM_BUS_SLAVE_DSI_CFG 649 -#define MSM_BUS_SLAVE_UFS_CFG 650 -#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651 -#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652 -#define MSM_BUS_SLAVE_PCIE_CFG 653 -#define MSM_BUS_SLAVE_USB_PHYS_CFG 654 -#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655 -#define MSM_BUS_SLAVE_AVSYNC_CFG 656 -#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657 -#define MSM_BUS_SLAVE_VPU_CFG 658 -#define MSM_BUS_SLAVE_BCAST_CFG 659 -#define MSM_BUS_SLAVE_KLM_CFG 660 -#define MSM_BUS_SLAVE_GENI_IR_CFG 661 -#define MSM_BUS_SLAVE_OCMEM_GFX 662 -#define MSM_BUS_SLAVE_CATS_128 663 -#define MSM_BUS_SLAVE_OCMEM_64 664 -#define MSM_BUS_SLAVE_PCIE_0 665 -#define MSM_BUS_SLAVE_PCIE_1 666 -#define MSM_BUS_SLAVE_PCIE_0_CFG 667 -#define MSM_BUS_SLAVE_PCIE_1_CFG 668 -#define MSM_BUS_SLAVE_SRVC_MNOC 669 -#define MSM_BUS_SLAVE_USB_HS2 670 -#define MSM_BUS_SLAVE_AUDIO 671 -#define MSM_BUS_SLAVE_TCU 672 -#define MSM_BUS_SLAVE_APPSS 673 -#define MSM_BUS_SLAVE_PCIE_PARF 674 -#define MSM_BUS_SLAVE_USB3_PHY_CFG 675 -#define MSM_BUS_SLAVE_IPA_CFG 676 -#define MSM_BUS_SLAVE_A0NOC_SNOC 677 -#define MSM_BUS_SLAVE_A1NOC_SNOC 678 -#define MSM_BUS_SLAVE_A2NOC_SNOC 679 -#define MSM_BUS_SLAVE_HMSS_L3 680 -#define MSM_BUS_SLAVE_PIMEM_CFG 681 -#define MSM_BUS_SLAVE_DCC_CFG 682 -#define MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683 -#define MSM_BUS_SLAVE_PCIE_2_CFG 684 -#define MSM_BUS_SLAVE_PCIE20_AHB2PHY 685 -#define MSM_BUS_SLAVE_A0NOC_CFG 686 -#define MSM_BUS_SLAVE_A1NOC_CFG 687 -#define MSM_BUS_SLAVE_A2NOC_CFG 688 -#define MSM_BUS_SLAVE_A1NOC_MPU_CFG 689 -#define MSM_BUS_SLAVE_A2NOC_MPU_CFG 690 -#define MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691 -#define MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692 -#define MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693 -#define MSM_BUS_SLAVE_LPASS_SMMU_CFG 694 -#define MSM_BUS_SLAVE_MMAGIC_CFG 695 -#define MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696 -#define MSM_BUS_SLAVE_SSC_CFG 697 -#define MSM_BUS_SLAVE_DSA_CFG 698 -#define MSM_BUS_SLAVE_DSA_MPU_CFG 699 -#define MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700 -#define MSM_BUS_SLAVE_SMMU_CPP_CFG 701 -#define MSM_BUS_SLAVE_SMMU_JPEG_CFG 702 -#define MSM_BUS_SLAVE_SMMU_MDP_CFG 703 -#define MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704 -#define MSM_BUS_SLAVE_SMMU_VENUS_CFG 705 -#define MSM_BUS_SLAVE_SMMU_VFE_CFG 706 -#define MSM_BUS_SLAVE_A0NOC_MPU_CFG 707 -#define MSM_BUS_SLAVE_VMEM_CFG 708 -#define MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709 -#define MSM_BUS_SLAVE_VMEM 710 -#define MSM_BUS_SLAVE_AHB2PHY 711 -#define MSM_BUS_SLAVE_PIMEM 712 -#define MSM_BUS_SLAVE_SNOC_VMEM 713 -#define MSM_BUS_SLAVE_PCIE_2 714 -#define MSM_BUS_SLAVE_RBCPR_MX 715 -#define MSM_BUS_SLAVE_RBCPR_CX 716 -#define MSM_BUS_SLAVE_BIMC_PCNOC 717 -#define MSM_BUS_SLAVE_PCNOC_BIMC_1 718 -#define MSM_BUS_SLAVE_SGMII 719 -#define MSM_BUS_SLAVE_SPMI_FETCHER 720 -#define MSM_BUS_PNOC_SLV_6 721 -#define MSM_BUS_SLAVE_MMSS_SMMU_CFG 722 -#define MSM_BUS_SLAVE_WLAN 723 -#define MSM_BUS_SLAVE_CRVIRT_A2NOC 724 -#define MSM_BUS_SLAVE_CNOC_A2NOC 725 -#define MSM_BUS_SLAVE_GLM 726 -#define MSM_BUS_SLAVE_GNOC_BIMC 727 -#define MSM_BUS_SLAVE_GNOC_SNOC 728 -#define MSM_BUS_SLAVE_QM_CFG 729 -#define MSM_BUS_SLAVE_TLMM_EAST 730 -#define MSM_BUS_SLAVE_TLMM_NORTH 731 -#define MSM_BUS_SLAVE_TLMM_WEST 732 -#define MSM_BUS_SLAVE_SKL 733 -#define MSM_BUS_SLAVE_LPASS_TCM 734 -#define MSM_BUS_SLAVE_TLMM_SOUTH 735 -#define MSM_BUS_SLAVE_TLMM_CENTER 736 -#define MSM_BUS_MSS_NAV_CE_MPU_CFG 737 -#define MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738 -#define MSM_BUS_SLAVE_CDSP 739 -#define MSM_BUS_SLAVE_CDSP_SMMU_CFG 740 -#define MSM_BUS_SLAVE_LPASS_MPU_CFG 741 -#define MSM_BUS_SLAVE_CSI_PHY_CFG 742 -#define MSM_BUS_SLAVE_LAST 743 - -#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB -#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB - -/* - * ID's used in RPM messages - */ -#define ICBID_MASTER_APPSS_PROC 0 -#define ICBID_MASTER_MSS_PROC 1 -#define ICBID_MASTER_MNOC_BIMC 2 -#define ICBID_MASTER_SNOC_BIMC 3 -#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC -#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4 -#define ICBID_MASTER_CNOC_MNOC_CFG 5 -#define ICBID_MASTER_GFX3D 6 -#define ICBID_MASTER_JPEG 7 -#define ICBID_MASTER_MDP 8 -#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP -#define ICBID_MASTER_MDPS ICBID_MASTER_MDP -#define ICBID_MASTER_VIDEO 9 -#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO -#define ICBID_MASTER_VIDEO_P1 10 -#define ICBID_MASTER_VFE 11 -#define ICBID_MASTER_VFE0 ICBID_MASTER_VFE -#define ICBID_MASTER_CNOC_ONOC_CFG 12 -#define ICBID_MASTER_JPEG_OCMEM 13 -#define ICBID_MASTER_MDP_OCMEM 14 -#define ICBID_MASTER_VIDEO_P0_OCMEM 15 -#define ICBID_MASTER_VIDEO_P1_OCMEM 16 -#define ICBID_MASTER_VFE_OCMEM 17 -#define ICBID_MASTER_LPASS_AHB 18 -#define ICBID_MASTER_QDSS_BAM 19 -#define ICBID_MASTER_SNOC_CFG 20 -#define ICBID_MASTER_BIMC_SNOC 21 -#define ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC -#define ICBID_MASTER_CNOC_SNOC 22 -#define ICBID_MASTER_CRYPTO 23 -#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO -#define ICBID_MASTER_CRYPTO_CORE1 24 -#define ICBID_MASTER_LPASS_PROC 25 -#define ICBID_MASTER_MSS 26 -#define ICBID_MASTER_MSS_NAV 27 -#define ICBID_MASTER_OCMEM_DMA 28 -#define ICBID_MASTER_PNOC_SNOC 29 -#define ICBID_MASTER_WCSS 30 -#define ICBID_MASTER_QDSS_ETR 31 -#define ICBID_MASTER_USB3 32 -#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3 -#define ICBID_MASTER_SDCC_1 33 -#define ICBID_MASTER_SDCC_3 34 -#define ICBID_MASTER_SDCC_2 35 -#define ICBID_MASTER_SDCC_4 36 -#define ICBID_MASTER_TSIF 37 -#define ICBID_MASTER_BAM_DMA 38 -#define ICBID_MASTER_BLSP_2 39 -#define ICBID_MASTER_USB_HSIC 40 -#define ICBID_MASTER_BLSP_1 41 -#define ICBID_MASTER_USB_HS 42 -#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS -#define ICBID_MASTER_PNOC_CFG 43 -#define ICBID_MASTER_SNOC_PNOC 44 -#define ICBID_MASTER_RPM_INST 45 -#define ICBID_MASTER_RPM_DATA 46 -#define ICBID_MASTER_RPM_SYS 47 -#define ICBID_MASTER_DEHR 48 -#define ICBID_MASTER_QDSS_DAP 49 -#define ICBID_MASTER_SPDM 50 -#define ICBID_MASTER_TIC 51 -#define ICBID_MASTER_SNOC_CNOC 52 -#define ICBID_MASTER_GFX3D_OCMEM 53 -#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM -#define ICBID_MASTER_OVIRT_SNOC 54 -#define ICBID_MASTER_SNOC_OVIRT 55 -#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT -#define ICBID_MASTER_ONOC_OVIRT 56 -#define ICBID_MASTER_USB_HS2 57 -#define ICBID_MASTER_QPIC 58 -#define ICBID_MASTER_IPA 59 -#define ICBID_MASTER_DSI 60 -#define ICBID_MASTER_MDP1 61 -#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1 -#define ICBID_MASTER_VPU_PROC 62 -#define ICBID_MASTER_VPU 63 -#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU -#define ICBID_MASTER_CRYPTO_CORE2 64 -#define ICBID_MASTER_PCIE_0 65 -#define ICBID_MASTER_PCIE_1 66 -#define ICBID_MASTER_SATA 67 -#define ICBID_MASTER_UFS 68 -#define ICBID_MASTER_USB3_1 69 -#define ICBID_MASTER_VIDEO_OCMEM 70 -#define ICBID_MASTER_VPU1 71 -#define ICBID_MASTER_VCAP 72 -#define ICBID_MASTER_EMAC 73 -#define ICBID_MASTER_BCAST 74 -#define ICBID_MASTER_MMSS_PROC 75 -#define ICBID_MASTER_SNOC_BIMC_1 76 -#define ICBID_MASTER_SNOC_PCNOC 77 -#define ICBID_MASTER_AUDIO 78 -#define ICBID_MASTER_MM_INT_0 79 -#define ICBID_MASTER_MM_INT_1 80 -#define ICBID_MASTER_MM_INT_2 81 -#define ICBID_MASTER_MM_INT_BIMC 82 -#define ICBID_MASTER_MSS_INT 83 -#define ICBID_MASTER_PCNOC_CFG 84 -#define ICBID_MASTER_PCNOC_INT_0 85 -#define ICBID_MASTER_PCNOC_INT_1 86 -#define ICBID_MASTER_PCNOC_M_0 87 -#define ICBID_MASTER_PCNOC_M_1 88 -#define ICBID_MASTER_PCNOC_S_0 89 -#define ICBID_MASTER_PCNOC_S_1 90 -#define ICBID_MASTER_PCNOC_S_2 91 -#define ICBID_MASTER_PCNOC_S_3 92 -#define ICBID_MASTER_PCNOC_S_4 93 -#define ICBID_MASTER_PCNOC_S_6 94 -#define ICBID_MASTER_PCNOC_S_7 95 -#define ICBID_MASTER_PCNOC_S_8 96 -#define ICBID_MASTER_PCNOC_S_9 97 -#define ICBID_MASTER_QDSS_INT 98 -#define ICBID_MASTER_SNOC_INT_0 99 -#define ICBID_MASTER_SNOC_INT_1 100 -#define ICBID_MASTER_SNOC_INT_BIMC 101 -#define ICBID_MASTER_TCU_0 102 -#define ICBID_MASTER_TCU_1 103 -#define ICBID_MASTER_BIMC_INT_0 104 -#define ICBID_MASTER_BIMC_INT_1 105 -#define ICBID_MASTER_CAMERA 106 -#define ICBID_MASTER_RICA 107 -#define ICBID_MASTER_SNOC_BIMC_2 108 -#define ICBID_MASTER_BIMC_SNOC_1 109 -#define ICBID_MASTER_A0NOC_SNOC 110 -#define ICBID_MASTER_A1NOC_SNOC 111 -#define ICBID_MASTER_A2NOC_SNOC 112 -#define ICBID_MASTER_PIMEM 113 -#define ICBID_MASTER_SNOC_VMEM 114 -#define ICBID_MASTER_CPP 115 -#define ICBID_MASTER_CNOC_A1NOC 116 -#define ICBID_MASTER_PNOC_A1NOC 117 -#define ICBID_MASTER_HMSS 118 -#define ICBID_MASTER_PCIE_2 119 -#define ICBID_MASTER_ROTATOR 120 -#define ICBID_MASTER_VENUS_VMEM 121 -#define ICBID_MASTER_DCC 122 -#define ICBID_MASTER_MCDMA 123 -#define ICBID_MASTER_PCNOC_INT_2 124 -#define ICBID_MASTER_PCNOC_INT_3 125 -#define ICBID_MASTER_PCNOC_INT_4 126 -#define ICBID_MASTER_PCNOC_INT_5 127 -#define ICBID_MASTER_PCNOC_INT_6 128 -#define ICBID_MASTER_PCNOC_S_5 129 -#define ICBID_MASTER_SENSORS_AHB 130 -#define ICBID_MASTER_SENSORS_PROC 131 -#define ICBID_MASTER_QSPI 132 -#define ICBID_MASTER_VFE1 133 -#define ICBID_MASTER_SNOC_INT_2 134 -#define ICBID_MASTER_SMMNOC_BIMC 135 -#define ICBID_MASTER_CRVIRT_A1NOC 136 -#define ICBID_MASTER_XM_USB_HS1 137 -#define ICBID_MASTER_XI_USB_HS1 138 -#define ICBID_MASTER_PCNOC_BIMC_1 139 -#define ICBID_MASTER_BIMC_PCNOC 140 -#define ICBID_MASTER_XI_HSIC 141 -#define ICBID_MASTER_SGMII 142 -#define ICBID_MASTER_SPMI_FETCHER 143 -#define ICBID_MASTER_GNOC_BIMC 144 -#define ICBID_MASTER_CRVIRT_A2NOC 145 -#define ICBID_MASTER_CNOC_A2NOC 146 -#define ICBID_MASTER_WLAN 147 -#define ICBID_MASTER_MSS_CE 148 -#define ICBID_MASTER_CDSP_PROC 149 -#define ICBID_MASTER_GNOC_SNOC 150 - -#define ICBID_SLAVE_EBI1 0 -#define ICBID_SLAVE_APPSS_L2 1 -#define ICBID_SLAVE_BIMC_SNOC 2 -#define ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC -#define ICBID_SLAVE_CAMERA_CFG 3 -#define ICBID_SLAVE_DISPLAY_CFG 4 -#define ICBID_SLAVE_OCMEM_CFG 5 -#define ICBID_SLAVE_CPR_CFG 6 -#define ICBID_SLAVE_CPR_XPU_CFG 7 -#define ICBID_SLAVE_MISC_CFG 8 -#define ICBID_SLAVE_MISC_XPU_CFG 9 -#define ICBID_SLAVE_VENUS_CFG 10 -#define ICBID_SLAVE_GFX3D_CFG 11 -#define ICBID_SLAVE_MMSS_CLK_CFG 12 -#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13 -#define ICBID_SLAVE_MNOC_MPU_CFG 14 -#define ICBID_SLAVE_ONOC_MPU_CFG 15 -#define ICBID_SLAVE_MNOC_BIMC 16 -#define ICBID_SLAVE_SERVICE_MNOC 17 -#define ICBID_SLAVE_OCMEM 18 -#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM -#define ICBID_SLAVE_SERVICE_ONOC 19 -#define ICBID_SLAVE_APPSS 20 -#define ICBID_SLAVE_LPASS 21 -#define ICBID_SLAVE_USB3 22 -#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3 -#define ICBID_SLAVE_WCSS 23 -#define ICBID_SLAVE_SNOC_BIMC 24 -#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC -#define ICBID_SLAVE_SNOC_CNOC 25 -#define ICBID_SLAVE_IMEM 26 -#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM -#define ICBID_SLAVE_SNOC_OVIRT 27 -#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT -#define ICBID_SLAVE_SNOC_PNOC 28 -#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC -#define ICBID_SLAVE_SERVICE_SNOC 29 -#define ICBID_SLAVE_QDSS_STM 30 -#define ICBID_SLAVE_SDCC_1 31 -#define ICBID_SLAVE_SDCC_3 32 -#define ICBID_SLAVE_SDCC_2 33 -#define ICBID_SLAVE_SDCC_4 34 -#define ICBID_SLAVE_TSIF 35 -#define ICBID_SLAVE_BAM_DMA 36 -#define ICBID_SLAVE_BLSP_2 37 -#define ICBID_SLAVE_USB_HSIC 38 -#define ICBID_SLAVE_BLSP_1 39 -#define ICBID_SLAVE_USB_HS 40 -#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS -#define ICBID_SLAVE_PDM 41 -#define ICBID_SLAVE_PERIPH_APU_CFG 42 -#define ICBID_SLAVE_PNOC_MPU_CFG 43 -#define ICBID_SLAVE_PRNG 44 -#define ICBID_SLAVE_PNOC_SNOC 45 -#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC -#define ICBID_SLAVE_SERVICE_PNOC 46 -#define ICBID_SLAVE_CLK_CTL 47 -#define ICBID_SLAVE_CNOC_MSS 48 -#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS -#define ICBID_SLAVE_SECURITY 49 -#define ICBID_SLAVE_TCSR 50 -#define ICBID_SLAVE_TLMM 51 -#define ICBID_SLAVE_CRYPTO_0_CFG 52 -#define ICBID_SLAVE_CRYPTO_1_CFG 53 -#define ICBID_SLAVE_IMEM_CFG 54 -#define ICBID_SLAVE_MESSAGE_RAM 55 -#define ICBID_SLAVE_BIMC_CFG 56 -#define ICBID_SLAVE_BOOT_ROM 57 -#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58 -#define ICBID_SLAVE_PMIC_ARB 59 -#define ICBID_SLAVE_SPDM_WRAPPER 60 -#define ICBID_SLAVE_DEHR_CFG 61 -#define ICBID_SLAVE_MPM 62 -#define ICBID_SLAVE_QDSS_CFG 63 -#define ICBID_SLAVE_RBCPR_CFG 64 -#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG -#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65 -#define ICBID_SLAVE_CNOC_MNOC_CFG 66 -#define ICBID_SLAVE_SNOC_MPU_CFG 67 -#define ICBID_SLAVE_CNOC_ONOC_CFG 68 -#define ICBID_SLAVE_PNOC_CFG 69 -#define ICBID_SLAVE_SNOC_CFG 70 -#define ICBID_SLAVE_EBI1_DLL_CFG 71 -#define ICBID_SLAVE_PHY_APU_CFG 72 -#define ICBID_SLAVE_EBI1_PHY_CFG 73 -#define ICBID_SLAVE_RPM 74 -#define ICBID_SLAVE_CNOC_SNOC 75 -#define ICBID_SLAVE_SERVICE_CNOC 76 -#define ICBID_SLAVE_OVIRT_SNOC 77 -#define ICBID_SLAVE_OVIRT_OCMEM 78 -#define ICBID_SLAVE_USB_HS2 79 -#define ICBID_SLAVE_QPIC 80 -#define ICBID_SLAVE_IPS_CFG 81 -#define ICBID_SLAVE_DSI_CFG 82 -#define ICBID_SLAVE_USB3_1 83 -#define ICBID_SLAVE_PCIE_0 84 -#define ICBID_SLAVE_PCIE_1 85 -#define ICBID_SLAVE_PSS_SMMU_CFG 86 -#define ICBID_SLAVE_CRYPTO_2_CFG 87 -#define ICBID_SLAVE_PCIE_0_CFG 88 -#define ICBID_SLAVE_PCIE_1_CFG 89 -#define ICBID_SLAVE_SATA_CFG 90 -#define ICBID_SLAVE_SPSS_GENI_IR 91 -#define ICBID_SLAVE_UFS_CFG 92 -#define ICBID_SLAVE_AVSYNC_CFG 93 -#define ICBID_SLAVE_VPU_CFG 94 -#define ICBID_SLAVE_USB_PHY_CFG 95 -#define ICBID_SLAVE_RBCPR_MX_CFG 96 -#define ICBID_SLAVE_PCIE_PARF 97 -#define ICBID_SLAVE_VCAP_CFG 98 -#define ICBID_SLAVE_EMAC_CFG 99 -#define ICBID_SLAVE_BCAST_CFG 100 -#define ICBID_SLAVE_KLM_CFG 101 -#define ICBID_SLAVE_DISPLAY_PWM 102 -#define ICBID_SLAVE_GENI 103 -#define ICBID_SLAVE_SNOC_BIMC_1 104 -#define ICBID_SLAVE_AUDIO 105 -#define ICBID_SLAVE_CATS_0 106 -#define ICBID_SLAVE_CATS_1 107 -#define ICBID_SLAVE_MM_INT_0 108 -#define ICBID_SLAVE_MM_INT_1 109 -#define ICBID_SLAVE_MM_INT_2 110 -#define ICBID_SLAVE_MM_INT_BIMC 111 -#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112 -#define ICBID_SLAVE_MSS_INT 113 -#define ICBID_SLAVE_PCNOC_INT_0 114 -#define ICBID_SLAVE_PCNOC_INT_1 115 -#define ICBID_SLAVE_PCNOC_M_0 116 -#define ICBID_SLAVE_PCNOC_M_1 117 -#define ICBID_SLAVE_PCNOC_S_0 118 -#define ICBID_SLAVE_PCNOC_S_1 119 -#define ICBID_SLAVE_PCNOC_S_2 120 -#define ICBID_SLAVE_PCNOC_S_3 121 -#define ICBID_SLAVE_PCNOC_S_4 122 -#define ICBID_SLAVE_PCNOC_S_6 123 -#define ICBID_SLAVE_PCNOC_S_7 124 -#define ICBID_SLAVE_PCNOC_S_8 125 -#define ICBID_SLAVE_PCNOC_S_9 126 -#define ICBID_SLAVE_PRNG_XPU_CFG 127 -#define ICBID_SLAVE_QDSS_INT 128 -#define ICBID_SLAVE_RPM_XPU_CFG 129 -#define ICBID_SLAVE_SNOC_INT_0 130 -#define ICBID_SLAVE_SNOC_INT_1 131 -#define ICBID_SLAVE_SNOC_INT_BIMC 132 -#define ICBID_SLAVE_TCU 133 -#define ICBID_SLAVE_BIMC_INT_0 134 -#define ICBID_SLAVE_BIMC_INT_1 135 -#define ICBID_SLAVE_RICA_CFG 136 -#define ICBID_SLAVE_SNOC_BIMC_2 137 -#define ICBID_SLAVE_BIMC_SNOC_1 138 -#define ICBID_SLAVE_PNOC_A1NOC 139 -#define ICBID_SLAVE_SNOC_VMEM 140 -#define ICBID_SLAVE_A0NOC_SNOC 141 -#define ICBID_SLAVE_A1NOC_SNOC 142 -#define ICBID_SLAVE_A2NOC_SNOC 143 -#define ICBID_SLAVE_A0NOC_CFG 144 -#define ICBID_SLAVE_A0NOC_MPU_CFG 145 -#define ICBID_SLAVE_A0NOC_SMMU_CFG 146 -#define ICBID_SLAVE_A1NOC_CFG 147 -#define ICBID_SLAVE_A1NOC_MPU_CFG 148 -#define ICBID_SLAVE_A1NOC_SMMU_CFG 149 -#define ICBID_SLAVE_A2NOC_CFG 150 -#define ICBID_SLAVE_A2NOC_MPU_CFG 151 -#define ICBID_SLAVE_A2NOC_SMMU_CFG 152 -#define ICBID_SLAVE_AHB2PHY 153 -#define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154 -#define ICBID_SLAVE_DCC_CFG 155 -#define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156 -#define ICBID_SLAVE_DSA_CFG 157 -#define ICBID_SLAVE_DSA_MPU_CFG 158 -#define ICBID_SLAVE_SSC_MPU_CFG 159 -#define ICBID_SLAVE_HMSS_L3 160 -#define ICBID_SLAVE_LPASS_SMMU_CFG 161 -#define ICBID_SLAVE_MMAGIC_CFG 162 -#define ICBID_SLAVE_PCIE20_AHB2PHY 163 -#define ICBID_SLAVE_PCIE_2 164 -#define ICBID_SLAVE_PCIE_2_CFG 165 -#define ICBID_SLAVE_PIMEM 166 -#define ICBID_SLAVE_PIMEM_CFG 167 -#define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168 -#define ICBID_SLAVE_RBCPR_CX 169 -#define ICBID_SLAVE_RBCPR_MX 170 -#define ICBID_SLAVE_SMMU_CPP_CFG 171 -#define ICBID_SLAVE_SMMU_JPEG_CFG 172 -#define ICBID_SLAVE_SMMU_MDP_CFG 173 -#define ICBID_SLAVE_SMMU_ROTATOR_CFG 174 -#define ICBID_SLAVE_SMMU_VENUS_CFG 175 -#define ICBID_SLAVE_SMMU_VFE_CFG 176 -#define ICBID_SLAVE_SSC_CFG 177 -#define ICBID_SLAVE_VENUS_THROTTLE_CFG 178 -#define ICBID_SLAVE_VMEM 179 -#define ICBID_SLAVE_VMEM_CFG 180 -#define ICBID_SLAVE_QDSS_MPU_CFG 181 -#define ICBID_SLAVE_USB3_PHY_CFG 182 -#define ICBID_SLAVE_IPA_CFG 183 -#define ICBID_SLAVE_PCNOC_INT_2 184 -#define ICBID_SLAVE_PCNOC_INT_3 185 -#define ICBID_SLAVE_PCNOC_INT_4 186 -#define ICBID_SLAVE_PCNOC_INT_5 187 -#define ICBID_SLAVE_PCNOC_INT_6 188 -#define ICBID_SLAVE_PCNOC_S_5 189 -#define ICBID_SLAVE_QSPI 190 -#define ICBID_SLAVE_A1NOC_MS_MPU_CFG 191 -#define ICBID_SLAVE_A2NOC_MS_MPU_CFG 192 -#define ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193 -#define ICBID_SLAVE_MSS_MPU_CFG 194 -#define ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195 -#define ICBID_SLAVE_SKL 196 -#define ICBID_SLAVE_SNOC_INT_2 197 -#define ICBID_SLAVE_SMMNOC_BIMC 198 -#define ICBID_SLAVE_CRVIRT_A1NOC 199 -#define ICBID_SLAVE_SGMII 200 -#define ICBID_SLAVE_QHS4_APPS 201 -#define ICBID_SLAVE_BIMC_PCNOC 202 -#define ICBID_SLAVE_PCNOC_BIMC_1 203 -#define ICBID_SLAVE_SPMI_FETCHER 204 -#define ICBID_SLAVE_MMSS_SMMU_CFG 205 -#define ICBID_SLAVE_WLAN 206 -#define ICBID_SLAVE_CRVIRT_A2NOC 207 -#define ICBID_SLAVE_CNOC_A2NOC 208 -#define ICBID_SLAVE_GLM 209 -#define ICBID_SLAVE_GNOC_BIMC 210 -#define ICBID_SLAVE_GNOC_SNOC 211 -#define ICBID_SLAVE_QM_CFG 212 -#define ICBID_SLAVE_TLMM_EAST 213 -#define ICBID_SLAVE_TLMM_NORTH 214 -#define ICBID_SLAVE_TLMM_WEST 215 -#define ICBID_SLAVE_LPASS_TCM 216 -#define ICBID_SLAVE_TLMM_SOUTH 217 -#define ICBID_SLAVE_TLMM_CENTER 218 -#define ICBID_SLAVE_MSS_NAV_CE_MPU_CFG 219 -#define ICBID_SLAVE_A2NOC_THROTTLE_CFG 220 -#define ICBID_SLAVE_CDSP 221 -#define ICBID_SLAVE_CDSP_SMMU_CFG 222 -#define ICBID_SLAVE_LPASS_MPU_CFG 223 -#define ICBID_SLAVE_CSI_PHY_CFG 224 -#endif diff --git a/include/linux/bpf.h b/include/linux/bpf.h index c397934f91dd..e55e4255a210 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -78,6 +78,7 @@ enum bpf_arg_type { * functions that access data on eBPF program stack */ ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ + ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, * helper function must fill all bytes or clear * them in error case. @@ -334,9 +335,8 @@ extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; extern const struct bpf_verifier_ops xdp_analyzer_ops; struct bpf_prog *bpf_prog_get(u32 ufd); -struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, - struct net_device *netdev); + bool attach_drv); struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); void bpf_prog_sub(struct bpf_prog *prog, int i); struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); @@ -425,15 +425,9 @@ static inline struct bpf_prog *bpf_prog_get(u32 ufd) return ERR_PTR(-EOPNOTSUPP); } -static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, - enum bpf_prog_type type) -{ - return ERR_PTR(-EOPNOTSUPP); -} - static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, - struct net_device *netdev) + bool attach_drv) { return ERR_PTR(-EOPNOTSUPP); } @@ -514,9 +508,14 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, } #endif /* CONFIG_BPF_SYSCALL */ +static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, + enum bpf_prog_type type) +{ + return bpf_prog_get_type_dev(ufd, type, false); +} + int bpf_prog_offload_compile(struct bpf_prog *prog); void bpf_prog_offload_destroy(struct bpf_prog *prog); -u32 bpf_prog_offload_ifindex(struct bpf_prog *prog); #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 07b96aaca256..c561b986bab0 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -115,7 +115,7 @@ struct bpf_insn_aux_data { struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ }; int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int converted_op_size; /* the valid value width after perceived conversion */ + bool seen; /* this insn was processed by the verifier */ }; #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ @@ -171,7 +171,7 @@ static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); #else -int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) +static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) { return -EOPNOTSUPP; } diff --git a/include/linux/fs.h b/include/linux/fs.h index e9379e258d64..2995a271ec46 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -971,8 +971,8 @@ struct lock_manager { struct net; void locks_start_grace(struct net *, struct lock_manager *); void locks_end_grace(struct lock_manager *); -int locks_in_grace(struct net *); -int opens_in_grace(struct net *); +bool locks_in_grace(struct net *); +bool opens_in_grace(struct net *); /* that will die - we need it for nfs_lock_info */ #include <linux/nfs_fs_i.h> diff --git a/include/linux/key-type.h b/include/linux/key-type.h index 9520fc3c3b9a..05d8fb5a06c4 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -44,7 +44,7 @@ struct key_preparsed_payload { const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ - time_t expiry; /* Expiry time of key */ + time64_t expiry; /* Expiry time of key */ } __randomize_layout; typedef int (*request_key_actor_t)(struct key_construction *key, diff --git a/include/linux/key.h b/include/linux/key.h index 8a15cabe928d..e58ee10f6e58 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -24,6 +24,7 @@ #include <linux/atomic.h> #include <linux/assoc_array.h> #include <linux/refcount.h> +#include <linux/time64.h> #ifdef __KERNEL__ #include <linux/uidgid.h> @@ -162,10 +163,10 @@ struct key { struct key_user *user; /* owner of this key */ void *security; /* security data for this key */ union { - time_t expiry; /* time at which key expires (or 0) */ - time_t revoked_at; /* time at which key was revoked */ + time64_t expiry; /* time at which key expires (or 0) */ + time64_t revoked_at; /* time at which key was revoked */ }; - time_t last_used_at; /* last time used for LRU keyring discard */ + time64_t last_used_at; /* last time used for LRU keyring discard */ kuid_t uid; kgid_t gid; key_perm_t perm; /* access permissions */ diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 6cd0f6b7658b..cd55bf14ad51 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -267,7 +267,7 @@ struct mtd_info { */ unsigned int bitflip_threshold; - // Kernel-only stuff starts here. + /* Kernel-only stuff starts here. */ const char *name; int index; @@ -297,10 +297,6 @@ struct mtd_info { int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, void **virt, resource_size_t *phys); int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); - unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, - unsigned long len, - unsigned long offset, - unsigned long flags); int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf); int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, diff --git a/include/linux/mtd/nand-gpio.h b/include/linux/mtd/nand-gpio.h index fdef72d6e198..7ab51bc4a380 100644 --- a/include/linux/mtd/nand-gpio.h +++ b/include/linux/mtd/nand-gpio.h @@ -5,11 +5,6 @@ #include <linux/mtd/rawnand.h> struct gpio_nand_platdata { - int gpio_nce; - int gpio_nwp; - int gpio_cle; - int gpio_ale; - int gpio_rdy; void (*adjust_parts)(struct gpio_nand_platdata *, size_t); struct mtd_partition *parts; unsigned int num_parts; diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 2b05f4273bab..749bb08c4772 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h @@ -177,6 +177,9 @@ enum nand_ecc_algo { */ #define NAND_NEED_SCRAMBLING 0x00002000 +/* Device needs 3rd row address cycle */ +#define NAND_ROW_ADDR_3 0x00004000 + /* Options valid for Samsung large page devices */ #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 1f0a7fc7772f..d0c66a0975cf 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -232,10 +232,17 @@ enum spi_nor_option_flags { }; /** + * struct flash_info - Forward declaration of a structure used internally by + * spi_nor_scan() + */ +struct flash_info; + +/** * struct spi_nor - Structure for defining a the SPI NOR layer * @mtd: point to a mtd_info structure * @lock: the lock for the read/write/erase/lock/unlock operations * @dev: point to a spi device, or a spi nor controller device. + * @info: spi-nor part JDEC MFR id and other info * @page_size: the page size of the SPI NOR * @addr_width: number of address bytes * @erase_opcode: the opcode for erasing a sector @@ -262,6 +269,7 @@ enum spi_nor_option_flags { * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is + * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode * completely locked * @priv: the private data */ @@ -269,6 +277,7 @@ struct spi_nor { struct mtd_info mtd; struct mutex lock; struct device *dev; + const struct flash_info *info; u32 page_size; u8 addr_width; u8 erase_opcode; @@ -296,6 +305,7 @@ struct spi_nor { int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*quad_enable)(struct spi_nor *nor); void *priv; }; diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index dc8b4896b77b..b1b0ca7ccb2b 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -54,8 +54,9 @@ enum { NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */ NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */ NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */ + NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */ /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */ - NETIF_F_GSO_ESP_BIT, + NETIF_F_GSO_UDP_BIT, NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ @@ -132,6 +133,7 @@ enum { #define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM) #define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP) #define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP) +#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP) #define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER) #define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX) #define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6b274bfe489f..ef789e1d679e 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -4140,6 +4140,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type) BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT)); BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT)); + BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT)); return (features & feature) == feature; } diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 609e232c00da..c308964777eb 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h @@ -70,6 +70,7 @@ struct pci_dev; * @NTB_TOPO_SEC: On secondary side of remote ntb. * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. + * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. */ enum ntb_topo { NTB_TOPO_NONE = -1, @@ -77,6 +78,7 @@ enum ntb_topo { NTB_TOPO_SEC, NTB_TOPO_B2B_USD, NTB_TOPO_B2B_DSD, + NTB_TOPO_SWITCH, }; static inline int ntb_topo_is_b2b(enum ntb_topo topo) @@ -97,6 +99,7 @@ static inline char *ntb_topo_string(enum ntb_topo topo) case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; + case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; } return "NTB_TOPO_INVALID"; } @@ -730,7 +733,8 @@ static inline int ntb_link_disable(struct ntb_dev *ntb) * Hardware and topology may support a different number of memory windows. * Moreover different peer devices can support different number of memory * windows. Simply speaking this method returns the number of possible inbound - * memory windows to share with specified peer device. + * memory windows to share with specified peer device. Note: this may return + * zero if the link is not up yet. * * Return: the number of memory windows. */ @@ -751,7 +755,7 @@ static inline int ntb_mw_count(struct ntb_dev *ntb, int pidx) * Get the alignments of an inbound memory window with specified index. * NULL may be given for any output parameter if the value is not needed. * The alignment and size parameters may be used for allocation of proper - * shared memory. + * shared memory. Note: this must only be called when the link is up. * * Return: Zero on success, otherwise a negative error number. */ @@ -760,6 +764,9 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, resource_size_t *size_align, resource_size_t *size_max) { + if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx))) + return -ENOTCONN; + return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, size_max); } diff --git a/include/linux/platform_data/mtd-nand-omap2.h b/include/linux/platform_data/mtd-nand-omap2.h index 25e267f1970c..619df2431e75 100644 --- a/include/linux/platform_data/mtd-nand-omap2.h +++ b/include/linux/platform_data/mtd-nand-omap2.h @@ -64,21 +64,4 @@ struct gpmc_nand_regs { void __iomem *gpmc_bch_result5[GPMC_BCH_NUM_REMAINDER]; void __iomem *gpmc_bch_result6[GPMC_BCH_NUM_REMAINDER]; }; - -struct omap_nand_platform_data { - int cs; - struct mtd_partition *parts; - int nr_parts; - bool flash_bbt; - enum nand_io xfer_type; - int devsize; - enum omap_ecc ecc_opt; - - struct device_node *elm_of_node; - - /* deprecated */ - struct gpmc_nand_regs reg; - struct device_node *of_node; - bool dev_ready; -}; #endif diff --git a/include/linux/printk.h b/include/linux/printk.h index 905bba92f015..e9b603ee9953 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -132,10 +132,8 @@ struct va_format { */ #define no_printk(fmt, ...) \ ({ \ - do { \ - if (0) \ - printk(fmt, ##__VA_ARGS__); \ - } while (0); \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ 0; \ }) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ed06e1c28fc7..bc486ef23f20 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -568,6 +568,8 @@ enum { SKB_GSO_SCTP = 1 << 14, SKB_GSO_ESP = 1 << 15, + + SKB_GSO_UDP = 1 << 16, }; #if BITS_PER_LONG > 32 diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 3b9f0d1dbb80..786ae2255f05 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -47,6 +47,7 @@ struct svc_pool { struct svc_pool_stats sp_stats; /* statistics on pool operation */ #define SP_TASK_PENDING (0) /* still work to do even if no * xprt is queued. */ +#define SP_CONGESTED (1) unsigned long sp_flags; } ____cacheline_aligned_in_smp; diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h new file mode 100644 index 000000000000..09d73d0d1aa8 --- /dev/null +++ b/include/linux/switchtec.h @@ -0,0 +1,373 @@ +/* + * Microsemi Switchtec PCIe Driver + * Copyright (c) 2017, Microsemi Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef _SWITCHTEC_H +#define _SWITCHTEC_H + +#include <linux/pci.h> +#include <linux/cdev.h> + +#define MICROSEMI_VENDOR_ID 0x11f8 +#define MICROSEMI_NTB_CLASSCODE 0x068000 +#define MICROSEMI_MGMT_CLASSCODE 0x058000 + +#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 +#define SWITCHTEC_MAX_PFF_CSR 48 + +#define SWITCHTEC_EVENT_OCCURRED BIT(0) +#define SWITCHTEC_EVENT_CLEAR BIT(0) +#define SWITCHTEC_EVENT_EN_LOG BIT(1) +#define SWITCHTEC_EVENT_EN_CLI BIT(2) +#define SWITCHTEC_EVENT_EN_IRQ BIT(3) +#define SWITCHTEC_EVENT_FATAL BIT(4) + +enum { + SWITCHTEC_GAS_MRPC_OFFSET = 0x0000, + SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000, + SWITCHTEC_GAS_SW_EVENT_OFFSET = 0x1800, + SWITCHTEC_GAS_SYS_INFO_OFFSET = 0x2000, + SWITCHTEC_GAS_FLASH_INFO_OFFSET = 0x2200, + SWITCHTEC_GAS_PART_CFG_OFFSET = 0x4000, + SWITCHTEC_GAS_NTB_OFFSET = 0x10000, + SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000, +}; + +struct mrpc_regs { + u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; + u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; + u32 cmd; + u32 status; + u32 ret_value; +} __packed; + +enum mrpc_status { + SWITCHTEC_MRPC_STATUS_INPROGRESS = 1, + SWITCHTEC_MRPC_STATUS_DONE = 2, + SWITCHTEC_MRPC_STATUS_ERROR = 0xFF, + SWITCHTEC_MRPC_STATUS_INTERRUPTED = 0x100, +}; + +struct sw_event_regs { + u64 event_report_ctrl; + u64 reserved1; + u64 part_event_bitmap; + u64 reserved2; + u32 global_summary; + u32 reserved3[3]; + u32 stack_error_event_hdr; + u32 stack_error_event_data; + u32 reserved4[4]; + u32 ppu_error_event_hdr; + u32 ppu_error_event_data; + u32 reserved5[4]; + u32 isp_error_event_hdr; + u32 isp_error_event_data; + u32 reserved6[4]; + u32 sys_reset_event_hdr; + u32 reserved7[5]; + u32 fw_exception_hdr; + u32 reserved8[5]; + u32 fw_nmi_hdr; + u32 reserved9[5]; + u32 fw_non_fatal_hdr; + u32 reserved10[5]; + u32 fw_fatal_hdr; + u32 reserved11[5]; + u32 twi_mrpc_comp_hdr; + u32 twi_mrpc_comp_data; + u32 reserved12[4]; + u32 twi_mrpc_comp_async_hdr; + u32 twi_mrpc_comp_async_data; + u32 reserved13[4]; + u32 cli_mrpc_comp_hdr; + u32 cli_mrpc_comp_data; + u32 reserved14[4]; + u32 cli_mrpc_comp_async_hdr; + u32 cli_mrpc_comp_async_data; + u32 reserved15[4]; + u32 gpio_interrupt_hdr; + u32 gpio_interrupt_data; + u32 reserved16[4]; +} __packed; + +enum { + SWITCHTEC_CFG0_RUNNING = 0x04, + SWITCHTEC_CFG1_RUNNING = 0x05, + SWITCHTEC_IMG0_RUNNING = 0x03, + SWITCHTEC_IMG1_RUNNING = 0x07, +}; + +struct sys_info_regs { + u32 device_id; + u32 device_version; + u32 firmware_version; + u32 reserved1; + u32 vendor_table_revision; + u32 table_format_version; + u32 partition_id; + u32 cfg_file_fmt_version; + u16 cfg_running; + u16 img_running; + u32 reserved2[57]; + char vendor_id[8]; + char product_id[16]; + char product_revision[4]; + char component_vendor[8]; + u16 component_id; + u8 component_revision; +} __packed; + +struct flash_info_regs { + u32 flash_part_map_upd_idx; + + struct active_partition_info { + u32 address; + u32 build_version; + u32 build_string; + } active_img; + + struct active_partition_info active_cfg; + struct active_partition_info inactive_img; + struct active_partition_info inactive_cfg; + + u32 flash_length; + + struct partition_info { + u32 address; + u32 length; + } cfg0; + + struct partition_info cfg1; + struct partition_info img0; + struct partition_info img1; + struct partition_info nvlog; + struct partition_info vendor[8]; +}; + +enum { + SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000, + SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000, + SWITCHTEC_NTB_REG_DBMSG_OFFSET = 0x64000, +}; + +struct ntb_info_regs { + u8 partition_count; + u8 partition_id; + u16 reserved1; + u64 ep_map; + u16 requester_id; +} __packed; + +struct part_cfg_regs { + u32 status; + u32 state; + u32 port_cnt; + u32 usp_port_mode; + u32 usp_pff_inst_id; + u32 vep_pff_inst_id; + u32 dsp_pff_inst_id[47]; + u32 reserved1[11]; + u16 vep_vector_number; + u16 usp_vector_number; + u32 port_event_bitmap; + u32 reserved2[3]; + u32 part_event_summary; + u32 reserved3[3]; + u32 part_reset_hdr; + u32 part_reset_data[5]; + u32 mrpc_comp_hdr; + u32 mrpc_comp_data[5]; + u32 mrpc_comp_async_hdr; + u32 mrpc_comp_async_data[5]; + u32 dyn_binding_hdr; + u32 dyn_binding_data[5]; + u32 reserved4[159]; +} __packed; + +enum { + NTB_CTRL_PART_OP_LOCK = 0x1, + NTB_CTRL_PART_OP_CFG = 0x2, + NTB_CTRL_PART_OP_RESET = 0x3, + + NTB_CTRL_PART_STATUS_NORMAL = 0x1, + NTB_CTRL_PART_STATUS_LOCKED = 0x2, + NTB_CTRL_PART_STATUS_LOCKING = 0x3, + NTB_CTRL_PART_STATUS_CONFIGURING = 0x4, + NTB_CTRL_PART_STATUS_RESETTING = 0x5, + + NTB_CTRL_BAR_VALID = 1 << 0, + NTB_CTRL_BAR_DIR_WIN_EN = 1 << 4, + NTB_CTRL_BAR_LUT_WIN_EN = 1 << 5, + + NTB_CTRL_REQ_ID_EN = 1 << 0, + + NTB_CTRL_LUT_EN = 1 << 0, + + NTB_PART_CTRL_ID_PROT_DIS = 1 << 0, +}; + +struct ntb_ctrl_regs { + u32 partition_status; + u32 partition_op; + u32 partition_ctrl; + u32 bar_setup; + u32 bar_error; + u16 lut_table_entries; + u16 lut_table_offset; + u32 lut_error; + u16 req_id_table_size; + u16 req_id_table_offset; + u32 req_id_error; + u32 reserved1[7]; + struct { + u32 ctl; + u32 win_size; + u64 xlate_addr; + } bar_entry[6]; + u32 reserved2[216]; + u32 req_id_table[256]; + u32 reserved3[512]; + u64 lut_entry[512]; +} __packed; + +#define NTB_DBMSG_IMSG_STATUS BIT_ULL(32) +#define NTB_DBMSG_IMSG_MASK BIT_ULL(40) + +struct ntb_dbmsg_regs { + u32 reserved1[1024]; + u64 odb; + u64 odb_mask; + u64 idb; + u64 idb_mask; + u8 idb_vec_map[64]; + u32 msg_map; + u32 reserved2; + struct { + u32 msg; + u32 status; + } omsg[4]; + + struct { + u32 msg; + u8 status; + u8 mask; + u8 src; + u8 reserved; + } imsg[4]; + + u8 reserved3[3928]; + u8 msix_table[1024]; + u8 reserved4[3072]; + u8 pba[24]; + u8 reserved5[4072]; +} __packed; + +enum { + SWITCHTEC_PART_CFG_EVENT_RESET = 1 << 0, + SWITCHTEC_PART_CFG_EVENT_MRPC_CMP = 1 << 1, + SWITCHTEC_PART_CFG_EVENT_MRPC_ASYNC_CMP = 1 << 2, + SWITCHTEC_PART_CFG_EVENT_DYN_PART_CMP = 1 << 3, +}; + +struct pff_csr_regs { + u16 vendor_id; + u16 device_id; + u32 pci_cfg_header[15]; + u32 pci_cap_region[48]; + u32 pcie_cap_region[448]; + u32 indirect_gas_window[128]; + u32 indirect_gas_window_off; + u32 reserved[127]; + u32 pff_event_summary; + u32 reserved2[3]; + u32 aer_in_p2p_hdr; + u32 aer_in_p2p_data[5]; + u32 aer_in_vep_hdr; + u32 aer_in_vep_data[5]; + u32 dpc_hdr; + u32 dpc_data[5]; + u32 cts_hdr; + u32 cts_data[5]; + u32 reserved3[6]; + u32 hotplug_hdr; + u32 hotplug_data[5]; + u32 ier_hdr; + u32 ier_data[5]; + u32 threshold_hdr; + u32 threshold_data[5]; + u32 power_mgmt_hdr; + u32 power_mgmt_data[5]; + u32 tlp_throttling_hdr; + u32 tlp_throttling_data[5]; + u32 force_speed_hdr; + u32 force_speed_data[5]; + u32 credit_timeout_hdr; + u32 credit_timeout_data[5]; + u32 link_state_hdr; + u32 link_state_data[5]; + u32 reserved4[174]; +} __packed; + +struct switchtec_ntb; + +struct switchtec_dev { + struct pci_dev *pdev; + struct device dev; + struct cdev cdev; + + int partition; + int partition_count; + int pff_csr_count; + char pff_local[SWITCHTEC_MAX_PFF_CSR]; + + void __iomem *mmio; + struct mrpc_regs __iomem *mmio_mrpc; + struct sw_event_regs __iomem *mmio_sw_event; + struct sys_info_regs __iomem *mmio_sys_info; + struct flash_info_regs __iomem *mmio_flash_info; + struct ntb_info_regs __iomem *mmio_ntb; + struct part_cfg_regs __iomem *mmio_part_cfg; + struct part_cfg_regs __iomem *mmio_part_cfg_all; + struct pff_csr_regs __iomem *mmio_pff_csr; + + /* + * The mrpc mutex must be held when accessing the other + * mrpc_ fields, alive flag and stuser->state field + */ + struct mutex mrpc_mutex; + struct list_head mrpc_queue; + int mrpc_busy; + struct work_struct mrpc_work; + struct delayed_work mrpc_timeout; + bool alive; + + wait_queue_head_t event_wq; + atomic_t event_cnt; + + struct work_struct link_event_work; + void (*link_notifier)(struct switchtec_dev *stdev); + u8 link_event_count[SWITCHTEC_MAX_PFF_CSR]; + + struct switchtec_ntb *sndev; +}; + +static inline struct switchtec_dev *to_stdev(struct device *dev) +{ + return container_of(dev, struct switchtec_dev, dev); +} + +extern struct class *switchtec_class; + +#endif diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 210034c896e3..f144216febc6 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -9,7 +9,7 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, const struct virtio_net_hdr *hdr, bool little_endian) { - unsigned short gso_type = 0; + unsigned int gso_type = 0; if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { @@ -19,6 +19,9 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, case VIRTIO_NET_HDR_GSO_TCPV6: gso_type = SKB_GSO_TCPV6; break; + case VIRTIO_NET_HDR_GSO_UDP: + gso_type = SKB_GSO_UDP; + break; default: return -EINVAL; } diff --git a/include/linux/wmi.h b/include/linux/wmi.h index cd0d7734dc49..4757cb5077e5 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -18,6 +18,7 @@ #include <linux/device.h> #include <linux/acpi.h> +#include <uapi/linux/wmi.h> struct wmi_device { struct device dev; @@ -26,13 +27,17 @@ struct wmi_device { bool setable; }; +/* evaluate the ACPI method associated with this device */ +extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev, + u8 instance, u32 method_id, + const struct acpi_buffer *in, + struct acpi_buffer *out); + /* Caller must kfree the result. */ extern union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance); -/* Gets another device on the same bus. Caller must put_device the result. */ -extern struct wmi_device *wmidev_get_other_guid(struct wmi_device *wdev, - const char *guid_string); +extern int set_required_buffer_size(struct wmi_device *wdev, u64 length); struct wmi_device_id { const char *guid_string; @@ -45,6 +50,8 @@ struct wmi_driver { int (*probe)(struct wmi_device *wdev); int (*remove)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); + long (*filter_callback)(struct wmi_device *wdev, unsigned int cmd, + struct wmi_ioctl_buffer *arg); }; extern int __must_check __wmi_driver_register(struct wmi_driver *driver, diff --git a/include/net/ipv6.h b/include/net/ipv6.h index ec14f0d5a3a1..f73797e2fa60 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -767,6 +767,7 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add __be32 ipv6_select_ident(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr); +__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); int ip6_dst_hoplimit(struct dst_entry *dst); diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 1fb6ad3c5006..7ae177c8e399 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -15,6 +15,8 @@ struct scsi_cmnd; struct scsi_lun; struct scsi_sense_hdr; +typedef unsigned int __bitwise blist_flags_t; + struct scsi_mode_data { __u32 length; __u16 block_descriptor_length; @@ -141,7 +143,7 @@ struct scsi_device { unsigned char current_tag; /* current tag */ struct scsi_target *sdev_target; /* used only for single_lun */ - unsigned int sdev_bflags; /* black/white flags as also found in + blist_flags_t sdev_bflags; /* black/white flags as also found in * scsi_devinfo.[hc]. For now used only to * pass settings from slave_alloc to scsi * core. */ diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 3cf125b56c3a..ea67c32e870e 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h @@ -6,55 +6,55 @@ */ /* Only scan LUN 0 */ -#define BLIST_NOLUN ((__force __u32 __bitwise)(1 << 0)) +#define BLIST_NOLUN ((__force blist_flags_t)(1 << 0)) /* Known to have LUNs, force scanning. * DEPRECATED: Use max_luns=N */ -#define BLIST_FORCELUN ((__force __u32 __bitwise)(1 << 1)) +#define BLIST_FORCELUN ((__force blist_flags_t)(1 << 1)) /* Flag for broken handshaking */ -#define BLIST_BORKEN ((__force __u32 __bitwise)(1 << 2)) +#define BLIST_BORKEN ((__force blist_flags_t)(1 << 2)) /* unlock by special command */ -#define BLIST_KEY ((__force __u32 __bitwise)(1 << 3)) +#define BLIST_KEY ((__force blist_flags_t)(1 << 3)) /* Do not use LUNs in parallel */ -#define BLIST_SINGLELUN ((__force __u32 __bitwise)(1 << 4)) +#define BLIST_SINGLELUN ((__force blist_flags_t)(1 << 4)) /* Buggy Tagged Command Queuing */ -#define BLIST_NOTQ ((__force __u32 __bitwise)(1 << 5)) +#define BLIST_NOTQ ((__force blist_flags_t)(1 << 5)) /* Non consecutive LUN numbering */ -#define BLIST_SPARSELUN ((__force __u32 __bitwise)(1 << 6)) +#define BLIST_SPARSELUN ((__force blist_flags_t)(1 << 6)) /* Avoid LUNS >= 5 */ -#define BLIST_MAX5LUN ((__force __u32 __bitwise)(1 << 7)) +#define BLIST_MAX5LUN ((__force blist_flags_t)(1 << 7)) /* Treat as (removable) CD-ROM */ -#define BLIST_ISROM ((__force __u32 __bitwise)(1 << 8)) +#define BLIST_ISROM ((__force blist_flags_t)(1 << 8)) /* LUNs past 7 on a SCSI-2 device */ -#define BLIST_LARGELUN ((__force __u32 __bitwise)(1 << 9)) +#define BLIST_LARGELUN ((__force blist_flags_t)(1 << 9)) /* override additional length field */ -#define BLIST_INQUIRY_36 ((__force __u32 __bitwise)(1 << 10)) +#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1 << 10)) /* do not do automatic start on add */ -#define BLIST_NOSTARTONADD ((__force __u32 __bitwise)(1 << 12)) +#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1 << 12)) /* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */ -#define BLIST_REPORTLUN2 ((__force __u32 __bitwise)(1 << 17)) +#define BLIST_REPORTLUN2 ((__force blist_flags_t)(1 << 17)) /* don't try REPORT_LUNS scan (SCSI-3 devs) */ -#define BLIST_NOREPORTLUN ((__force __u32 __bitwise)(1 << 18)) +#define BLIST_NOREPORTLUN ((__force blist_flags_t)(1 << 18)) /* don't use PREVENT-ALLOW commands */ -#define BLIST_NOT_LOCKABLE ((__force __u32 __bitwise)(1 << 19)) +#define BLIST_NOT_LOCKABLE ((__force blist_flags_t)(1 << 19)) /* device is actually for RAID config */ -#define BLIST_NO_ULD_ATTACH ((__force __u32 __bitwise)(1 << 20)) +#define BLIST_NO_ULD_ATTACH ((__force blist_flags_t)(1 << 20)) /* select without ATN */ -#define BLIST_SELECT_NO_ATN ((__force __u32 __bitwise)(1 << 21)) +#define BLIST_SELECT_NO_ATN ((__force blist_flags_t)(1 << 21)) /* retry HARDWARE_ERROR */ -#define BLIST_RETRY_HWERROR ((__force __u32 __bitwise)(1 << 22)) +#define BLIST_RETRY_HWERROR ((__force blist_flags_t)(1 << 22)) /* maximum 512 sector cdb length */ -#define BLIST_MAX_512 ((__force __u32 __bitwise)(1 << 23)) +#define BLIST_MAX_512 ((__force blist_flags_t)(1 << 23)) /* Disable T10 PI (DIF) */ -#define BLIST_NO_DIF ((__force __u32 __bitwise)(1 << 25)) +#define BLIST_NO_DIF ((__force blist_flags_t)(1 << 25)) /* Ignore SBC-3 VPD pages */ -#define BLIST_SKIP_VPD_PAGES ((__force __u32 __bitwise)(1 << 26)) +#define BLIST_SKIP_VPD_PAGES ((__force blist_flags_t)(1 << 26)) /* Attempt to read VPD pages */ -#define BLIST_TRY_VPD_PAGES ((__force __u32 __bitwise)(1 << 28)) +#define BLIST_TRY_VPD_PAGES ((__force blist_flags_t)(1 << 28)) /* don't try to issue RSOC */ -#define BLIST_NO_RSOC ((__force __u32 __bitwise)(1 << 29)) +#define BLIST_NO_RSOC ((__force blist_flags_t)(1 << 29)) /* maximum 1024 sector cdb length */ -#define BLIST_MAX_1024 ((__force __u32 __bitwise)(1 << 30)) +#define BLIST_MAX_1024 ((__force blist_flags_t)(1 << 30)) /* Use UNMAP limit for WRITE SAME */ -#define BLIST_UNMAP_LIMIT_WS ((__force __u32 __bitwise)(1 << 31)) +#define BLIST_UNMAP_LIMIT_WS ((__force blist_flags_t)(1 << 31)) #endif diff --git a/include/sound/control.h b/include/sound/control.h index a1f1152bc687..ca13a44ae9d4 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -249,7 +249,9 @@ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kctl, void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only); #define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true) int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, - int (*func)(struct snd_kcontrol *, void *), + int (*func)(struct snd_kcontrol *vslave, + struct snd_kcontrol *slave, + void *arg), void *arg); /* diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index ebe96796027a..36cb50c111a6 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -49,6 +49,7 @@ enum rxrpc_conn_trace { rxrpc_conn_put_client, rxrpc_conn_put_service, rxrpc_conn_queued, + rxrpc_conn_reap_service, rxrpc_conn_seen, }; @@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace { enum rxrpc_timer_trace { rxrpc_timer_begin, + rxrpc_timer_exp_ack, + rxrpc_timer_exp_hard, + rxrpc_timer_exp_idle, + rxrpc_timer_exp_keepalive, + rxrpc_timer_exp_lost_ack, + rxrpc_timer_exp_normal, + rxrpc_timer_exp_ping, + rxrpc_timer_exp_resend, rxrpc_timer_expired, rxrpc_timer_init_for_reply, rxrpc_timer_init_for_send_reply, + rxrpc_timer_restart, rxrpc_timer_set_for_ack, + rxrpc_timer_set_for_hard, + rxrpc_timer_set_for_idle, + rxrpc_timer_set_for_keepalive, + rxrpc_timer_set_for_lost_ack, + rxrpc_timer_set_for_normal, rxrpc_timer_set_for_ping, rxrpc_timer_set_for_resend, rxrpc_timer_set_for_send, @@ -150,6 +165,7 @@ enum rxrpc_timer_trace { enum rxrpc_propose_ack_trace { rxrpc_propose_ack_client_tx_end, rxrpc_propose_ack_input_data, + rxrpc_propose_ack_ping_for_keepalive, rxrpc_propose_ack_ping_for_lost_ack, rxrpc_propose_ack_ping_for_lost_reply, rxrpc_propose_ack_ping_for_params, @@ -206,6 +222,7 @@ enum rxrpc_congest_change { EM(rxrpc_conn_put_client, "PTc") \ EM(rxrpc_conn_put_service, "PTs") \ EM(rxrpc_conn_queued, "QUE") \ + EM(rxrpc_conn_reap_service, "RPs") \ E_(rxrpc_conn_seen, "SEE") #define rxrpc_client_traces \ @@ -296,16 +313,31 @@ enum rxrpc_congest_change { #define rxrpc_timer_traces \ EM(rxrpc_timer_begin, "Begin ") \ EM(rxrpc_timer_expired, "*EXPR*") \ + EM(rxrpc_timer_exp_ack, "ExpAck") \ + EM(rxrpc_timer_exp_hard, "ExpHrd") \ + EM(rxrpc_timer_exp_idle, "ExpIdl") \ + EM(rxrpc_timer_exp_keepalive, "ExpKA ") \ + EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \ + EM(rxrpc_timer_exp_normal, "ExpNml") \ + EM(rxrpc_timer_exp_ping, "ExpPng") \ + EM(rxrpc_timer_exp_resend, "ExpRsn") \ EM(rxrpc_timer_init_for_reply, "IniRpl") \ EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ + EM(rxrpc_timer_restart, "Restrt") \ EM(rxrpc_timer_set_for_ack, "SetAck") \ + EM(rxrpc_timer_set_for_hard, "SetHrd") \ + EM(rxrpc_timer_set_for_idle, "SetIdl") \ + EM(rxrpc_timer_set_for_keepalive, "KeepAl") \ + EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \ + EM(rxrpc_timer_set_for_normal, "SetNml") \ EM(rxrpc_timer_set_for_ping, "SetPng") \ EM(rxrpc_timer_set_for_resend, "SetRTx") \ - E_(rxrpc_timer_set_for_send, "SetTx ") + E_(rxrpc_timer_set_for_send, "SetSnd") #define rxrpc_propose_ack_traces \ EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ EM(rxrpc_propose_ack_input_data, "DataIn ") \ + EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \ EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ EM(rxrpc_propose_ack_ping_for_params, "Params ") \ @@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx, TRACE_EVENT(rxrpc_timer, TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why, - ktime_t now, unsigned long now_j), + unsigned long now), - TP_ARGS(call, why, now, now_j), + TP_ARGS(call, why, now), TP_STRUCT__entry( __field(struct rxrpc_call *, call ) __field(enum rxrpc_timer_trace, why ) - __field_struct(ktime_t, now ) - __field_struct(ktime_t, expire_at ) - __field_struct(ktime_t, ack_at ) - __field_struct(ktime_t, resend_at ) - __field(unsigned long, now_j ) - __field(unsigned long, timer ) + __field(long, now ) + __field(long, ack_at ) + __field(long, ack_lost_at ) + __field(long, resend_at ) + __field(long, ping_at ) + __field(long, expect_rx_by ) + __field(long, expect_req_by ) + __field(long, expect_term_by ) + __field(long, timer ) ), TP_fast_assign( - __entry->call = call; - __entry->why = why; - __entry->now = now; - __entry->expire_at = call->expire_at; - __entry->ack_at = call->ack_at; - __entry->resend_at = call->resend_at; - __entry->now_j = now_j; - __entry->timer = call->timer.expires; + __entry->call = call; + __entry->why = why; + __entry->now = now; + __entry->ack_at = call->ack_at; + __entry->ack_lost_at = call->ack_lost_at; + __entry->resend_at = call->resend_at; + __entry->expect_rx_by = call->expect_rx_by; + __entry->expect_req_by = call->expect_req_by; + __entry->expect_term_by = call->expect_term_by; + __entry->timer = call->timer.expires; ), - TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld", + TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld", __entry->call, __print_symbolic(__entry->why, rxrpc_timer_traces), - ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)), - ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)), - ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)), - __entry->timer - __entry->now_j) + __entry->ack_at - __entry->now, + __entry->ack_lost_at - __entry->now, + __entry->resend_at - __entry->now, + __entry->expect_rx_by - __entry->now, + __entry->expect_req_by - __entry->now, + __entry->expect_term_by - __entry->now, + __entry->timer - __entry->now) ); TRACE_EVENT(rxrpc_rx_lose, @@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest, memcpy(&__entry->sum, summary, sizeof(__entry->sum)); ), - TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", + TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", __entry->call, __entry->ack_serial, __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ecbdbfe86eb6..8c153f68509e 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -486,20 +486,22 @@ TRACE_EVENT(svc_recv, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(u32, xid) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = status > 0 ? be32_to_cpu(rqst->rq_xid) : 0; __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), - TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s", __entry->addr, + TP_printk("addr=%pIScp xid=0x%08x status=%d flags=%s", + (struct sockaddr *)__get_dynamic_array(addr), __entry->xid, __entry->status, show_rqstp_flags(__entry->flags)) ); @@ -544,22 +546,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status, TP_ARGS(rqst, status), TP_STRUCT__entry( - __field(struct sockaddr *, addr) __field(u32, xid) - __field(int, dropme) __field(int, status) __field(unsigned long, flags) + __dynamic_array(unsigned char, addr, rqst->rq_addrlen) ), TP_fast_assign( - __entry->addr = (struct sockaddr *)&rqst->rq_addr; __entry->xid = be32_to_cpu(rqst->rq_xid); __entry->status = status; __entry->flags = rqst->rq_flags; + memcpy(__get_dynamic_array(addr), + &rqst->rq_addr, rqst->rq_addrlen); ), TP_printk("addr=%pIScp rq_xid=0x%08x status=%d flags=%s", - __entry->addr, __entry->xid, + (struct sockaddr *)__get_dynamic_array(addr), + __entry->xid, __entry->status, show_rqstp_flags(__entry->flags)) ); diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index e880ae6434ee..4c223ab30293 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -262,7 +262,7 @@ union bpf_attr { __u32 kern_version; /* checked when prog_type=kprobe */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; - __u32 prog_target_ifindex; /* ifindex of netdev to prep for */ + __u32 prog_ifindex; /* ifindex of netdev to prep for */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -897,10 +897,6 @@ enum sk_action { #define BPF_TAG_SIZE 8 -enum bpf_prog_status { - BPF_PROG_STATUS_DEV_BOUND = (1 << 0), -}; - struct bpf_prog_info { __u32 type; __u32 id; @@ -914,8 +910,6 @@ struct bpf_prog_info { __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; - __u32 ifindex; - __u32 status; } __attribute__((aligned(8))); struct bpf_map_info { diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h index 9d4afea308a4..9335d92c14a4 100644 --- a/include/uapi/linux/rxrpc.h +++ b/include/uapi/linux/rxrpc.h @@ -59,6 +59,7 @@ enum rxrpc_cmsg_type { RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ + RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */ RXRPC__SUPPORTED }; diff --git a/include/uapi/linux/vm_sockets_diag.h b/include/uapi/linux/vm_sockets_diag.h index 14cd7dc5a187..0b4dd54f3d1e 100644 --- a/include/uapi/linux/vm_sockets_diag.h +++ b/include/uapi/linux/vm_sockets_diag.h @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* AF_VSOCK sock_diag(7) interface for querying open sockets */ #ifndef _UAPI__VM_SOCKETS_DIAG_H__ diff --git a/include/uapi/linux/wmi.h b/include/uapi/linux/wmi.h new file mode 100644 index 000000000000..7a92e9e3d1c0 --- /dev/null +++ b/include/uapi/linux/wmi.h @@ -0,0 +1,73 @@ +/* + * User API methods for ACPI-WMI mapping driver + * + * Copyright (C) 2017 Dell, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _UAPI_LINUX_WMI_H +#define _UAPI_LINUX_WMI_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/* WMI bus will filter all WMI vendor driver requests through this IOC */ +#define WMI_IOC 'W' + +/* All ioctl requests through WMI should declare their size followed by + * relevant data objects + */ +struct wmi_ioctl_buffer { + __u64 length; + __u8 data[]; +}; + +/* This structure may be modified by the firmware when we enter + * system management mode through SMM, hence the volatiles + */ +struct calling_interface_buffer { + __u16 cmd_class; + __u16 cmd_select; + volatile __u32 input[4]; + volatile __u32 output[4]; +} __packed; + +struct dell_wmi_extensions { + __u32 argattrib; + __u32 blength; + __u8 data[]; +} __packed; + +struct dell_wmi_smbios_buffer { + __u64 length; + struct calling_interface_buffer std; + struct dell_wmi_extensions ext; +} __packed; + +/* Whitelisted smbios class/select commands */ +#define CLASS_TOKEN_READ 0 +#define CLASS_TOKEN_WRITE 1 +#define SELECT_TOKEN_STD 0 +#define SELECT_TOKEN_BAT 1 +#define SELECT_TOKEN_AC 2 +#define CLASS_FLASH_INTERFACE 7 +#define SELECT_FLASH_INTERFACE 3 +#define CLASS_ADMIN_PROP 10 +#define SELECT_ADMIN_PROP 3 +#define CLASS_INFO 17 +#define SELECT_RFKILL 11 +#define SELECT_APP_REGISTRATION 3 +#define SELECT_DOCK 22 + +/* whitelisted tokens */ +#define CAPSULE_EN_TOKEN 0x0461 +#define CAPSULE_DIS_TOKEN 0x0462 +#define WSMT_EN_TOKEN 0x04EC +#define WSMT_DIS_TOKEN 0x04ED + +/* Dell SMBIOS calling IOCTL command used by dell-smbios-wmi */ +#define DELL_WMI_SMBIOS_CMD _IOWR(WMI_IOC, 0, struct dell_wmi_smbios_buffer) + +#endif diff --git a/include/video/iga.h b/include/video/iga.h deleted file mode 100644 index 83ca18492e00..000000000000 --- a/include/video/iga.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* $Id: iga.h,v 1.2 1999/09/11 22:56:31 zaitcev Exp $ - * iga1682.h: Sparc/PCI iga1682 driver constants etc. - * - * Copyleft 1998 V. Roganov and G. Raiko - */ - -#ifndef _IGA1682_H -#define _IGA1682_H 1 - -#define IGA_ATTR_CTL 0x3C0 -#define IGA_IDX_VGA_OVERSCAN 0x11 -#define DAC_W_INDEX 0x03C8 -#define DAC_DATA 0x03C9 -#define IGA_EXT_CNTRL 0x3CE -#define IGA_IDX_EXT_BUS_CNTL 0x30 -#define MEM_SIZE_ALIAS 0x3 -#define MEM_SIZE_1M 0x0 -#define MEM_SIZE_2M 0x1 -#define MEM_SIZE_4M 0x2 -#define MEM_SIZE_RESERVED 0x3 -#define IGA_IDX_OVERSCAN_COLOR 0x58 -#define IGA_IDX_EXT_MEM_2 0x72 - -#endif /* !(_IGA1682_H) */ diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 2816feb38be1..68ec884440b7 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c @@ -14,8 +14,9 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) struct net *net = current->nsproxy->net_ns; struct bpf_dev_offload *offload; - if (!capable(CAP_SYS_ADMIN)) - return -EPERM; + if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && + attr->prog_type != BPF_PROG_TYPE_XDP) + return -EINVAL; if (attr->prog_flags) return -EINVAL; @@ -28,7 +29,7 @@ int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) init_waitqueue_head(&offload->verifier_done); rtnl_lock(); - offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex); + offload->netdev = __dev_get_by_index(net, attr->prog_ifindex); if (!offload->netdev) { rtnl_unlock(); kfree(offload); @@ -85,6 +86,10 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog) struct bpf_dev_offload *offload = prog->aux->offload; struct netdev_bpf data = {}; + /* Caution - if netdev is destroyed before the program, this function + * will be called twice. + */ + data.offload.prog = prog; if (offload->verifier_running) @@ -144,18 +149,6 @@ int bpf_prog_offload_compile(struct bpf_prog *prog) return bpf_prog_offload_translate(prog); } -u32 bpf_prog_offload_ifindex(struct bpf_prog *prog) -{ - struct bpf_dev_offload *offload = prog->aux->offload; - u32 ifindex; - - rtnl_lock(); - ifindex = offload->netdev ? offload->netdev->ifindex : 0; - rtnl_unlock(); - - return ifindex; -} - const struct bpf_prog_ops bpf_offload_prog_ops = { }; @@ -169,6 +162,10 @@ static int bpf_offload_notification(struct notifier_block *notifier, switch (event) { case NETDEV_UNREGISTER: + /* ignore namespace changes */ + if (netdev->reg_state != NETREG_UNREGISTERING) + break; + list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, offloads) { if (offload->netdev == netdev) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 09badc37e864..2c4cfeaa8d5e 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1057,22 +1057,23 @@ struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) } EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); -static bool bpf_prog_can_attach(struct bpf_prog *prog, - enum bpf_prog_type *attach_type, - struct net_device *netdev) +static bool bpf_prog_get_ok(struct bpf_prog *prog, + enum bpf_prog_type *attach_type, bool attach_drv) { - struct bpf_dev_offload *offload = prog->aux->offload; + /* not an attachment, just a refcount inc, always allow */ + if (!attach_type) + return true; if (prog->type != *attach_type) return false; - if (offload && offload->netdev != netdev) + if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) return false; return true; } static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, - struct net_device *netdev) + bool attach_drv) { struct fd f = fdget(ufd); struct bpf_prog *prog; @@ -1080,7 +1081,7 @@ static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, prog = ____bpf_prog_get(f); if (IS_ERR(prog)) return prog; - if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) { + if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { prog = ERR_PTR(-EINVAL); goto out; } @@ -1093,23 +1094,13 @@ out: struct bpf_prog *bpf_prog_get(u32 ufd) { - return __bpf_prog_get(ufd, NULL, NULL); + return __bpf_prog_get(ufd, NULL, false); } -struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) -{ - struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL); - - if (!IS_ERR(prog)) - trace_bpf_prog_get_type(prog); - return prog; -} -EXPORT_SYMBOL_GPL(bpf_prog_get_type); - struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, - struct net_device *netdev) + bool attach_drv) { - struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev); + struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv); if (!IS_ERR(prog)) trace_bpf_prog_get_type(prog); @@ -1118,7 +1109,7 @@ struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); /* last field in 'union bpf_attr' used by this command */ -#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex +#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex static int bpf_prog_load(union bpf_attr *attr) { @@ -1181,7 +1172,7 @@ static int bpf_prog_load(union bpf_attr *attr) atomic_set(&prog->aux->refcnt, 1); prog->gpl_compatible = is_gpl ? 1 : 0; - if (attr->prog_target_ifindex) { + if (attr->prog_ifindex) { err = bpf_prog_offload_init(prog, attr); if (err) goto free_prog; @@ -1625,11 +1616,6 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, return -EFAULT; } - if (bpf_prog_is_dev_bound(prog->aux)) { - info.status |= BPF_PROG_STATUS_DEV_BOUND; - info.ifindex = bpf_prog_offload_ifindex(prog); - } - done: if (copy_to_user(uinfo, &info, info_len) || put_user(info_len, &uattr->info.info_len)) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index dd54d20ace2f..d4593571c404 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1384,13 +1384,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, if (type != expected_type) goto err_type; } else if (arg_type == ARG_PTR_TO_MEM || + arg_type == ARG_PTR_TO_MEM_OR_NULL || arg_type == ARG_PTR_TO_UNINIT_MEM) { expected_type = PTR_TO_STACK; /* One exception here. In case function allows for NULL to be * passed in as argument, it's a SCALAR_VALUE type. Final test * happens during stack boundary checking. */ - if (register_is_null(*reg)) + if (register_is_null(*reg) && + arg_type == ARG_PTR_TO_MEM_OR_NULL) /* final test in check_stack_boundary() */; else if (!type_is_pkt_pointer(type) && type != PTR_TO_MAP_VALUE && @@ -3825,6 +3827,7 @@ static int do_check(struct bpf_verifier_env *env) return err; regs = cur_regs(env); + env->insn_aux_data[insn_idx].seen = true; if (class == BPF_ALU || class == BPF_ALU64) { err = check_alu_op(env, insn); if (err) @@ -4020,6 +4023,7 @@ process_bpf_exit: return err; insn_idx++; + env->insn_aux_data[insn_idx].seen = true; } else { verbose(env, "invalid BPF_LD mode\n"); return -EINVAL; @@ -4202,6 +4206,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, u32 off, u32 cnt) { struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; + int i; if (cnt == 1) return 0; @@ -4211,6 +4216,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len, memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); memcpy(new_data + off + cnt - 1, old_data + off, sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); + for (i = off; i < off + cnt - 1; i++) + new_data[i].seen = true; env->insn_aux_data = new_data; vfree(old_data); return 0; @@ -4229,6 +4236,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of return new_prog; } +/* The verifier does more data flow analysis than llvm and will not explore + * branches that are dead at run time. Malicious programs can have dead code + * too. Therefore replace all dead at-run-time code with nops. + */ +static void sanitize_dead_code(struct bpf_verifier_env *env) +{ + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); + struct bpf_insn *insn = env->prog->insnsi; + const int insn_cnt = env->prog->len; + int i; + + for (i = 0; i < insn_cnt; i++) { + if (aux_data[i].seen) + continue; + memcpy(insn + i, &nop, sizeof(nop)); + } +} + /* convert load instructions that access fields of 'struct __sk_buff' * into sequence of instructions that access fields of 'struct sk_buff' */ @@ -4556,6 +4582,9 @@ skip_full_check: free_states(env); if (ret == 0) + sanitize_dead_code(env); + + if (ret == 0) /* program is valid, convert *(u32*)(ctx + off) accesses */ ret = convert_ctx_accesses(env); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 512f7c2baedd..5d81206a572d 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2190,7 +2190,7 @@ again: } if (console_seq < log_first_seq) { - len = sprintf(text, "** %u printk messages dropped ** ", + len = sprintf(text, "** %u printk messages dropped **\n", (unsigned)(log_first_seq - console_seq)); /* messages are gone, move to first one */ diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c index 724d9292d4b9..3e3c2004bb23 100644 --- a/kernel/printk/printk_safe.c +++ b/kernel/printk/printk_safe.c @@ -72,7 +72,7 @@ static void queue_flush_work(struct printk_safe_seq_buf *s) * have dedicated buffers, because otherwise printk-safe preempted by * NMI-printk would have overwritten the NMI messages. * - * The messages are fushed from irq work (or from panic()), possibly, + * The messages are flushed from irq work (or from panic()), possibly, * from other CPU, concurrently with printk_safe_log_store(). Should this * happen, printk_safe_log_store() will notice the buffer->len mismatch * and repeat the write. diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index a5580c670866..27d1f4ffa3de 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -78,16 +78,12 @@ EXPORT_SYMBOL_GPL(trace_call_bpf); BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) { - int ret = 0; - - if (unlikely(size == 0)) - goto out; + int ret; ret = probe_kernel_read(dst, unsafe_ptr, size); if (unlikely(ret < 0)) memset(dst, 0, size); - out: return ret; } @@ -407,7 +403,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, - .arg5_type = ARG_CONST_SIZE, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); @@ -498,7 +494,7 @@ static const struct bpf_func_proto bpf_probe_read_str_proto = { .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, - .arg2_type = ARG_CONST_SIZE, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg3_type = ARG_ANYTHING, }; @@ -609,7 +605,7 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, - .arg5_type = ARG_CONST_SIZE, + .arg5_type = ARG_CONST_SIZE_OR_ZERO, }; BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map, diff --git a/lib/Kconfig b/lib/Kconfig index 368972f0db78..c5e84fbcb30b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -46,10 +46,6 @@ config GENERIC_IOMAP bool select GENERIC_PCI_IOMAP -config GENERIC_IO - bool - default n - config STMP_DEVICE bool diff --git a/net/9p/client.c b/net/9p/client.c index 4674235b0d9b..b433aff5ff13 100644 --- a/net/9p/client.c +++ b/net/9p/client.c @@ -82,7 +82,7 @@ int p9_show_client_options(struct seq_file *m, struct p9_client *clnt) { if (clnt->msize != 8192) seq_printf(m, ",msize=%u", clnt->msize); - seq_printf(m, "trans=%s", clnt->trans_mod->name); + seq_printf(m, ",trans=%s", clnt->trans_mod->name); switch (clnt->proto_version) { case p9_proto_legacy: @@ -773,8 +773,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) } again: /* Wait for the response */ - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Make sure our req is coherent with regard to updates in other diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index 903a190319b9..985046ae4231 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -724,12 +724,12 @@ static int p9_fd_show_options(struct seq_file *m, struct p9_client *clnt) { if (clnt->trans_mod == &p9_tcp_trans) { if (clnt->trans_opts.tcp.port != P9_PORT) - seq_printf(m, "port=%u", clnt->trans_opts.tcp.port); + seq_printf(m, ",port=%u", clnt->trans_opts.tcp.port); } else if (clnt->trans_mod == &p9_fd_trans) { if (clnt->trans_opts.fd.rfd != ~0) - seq_printf(m, "rfd=%u", clnt->trans_opts.fd.rfd); + seq_printf(m, ",rfd=%u", clnt->trans_opts.fd.rfd); if (clnt->trans_opts.fd.wfd != ~0) - seq_printf(m, "wfd=%u", clnt->trans_opts.fd.wfd); + seq_printf(m, ",wfd=%u", clnt->trans_opts.fd.wfd); } return 0; } diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index f24b25c25106..f3a4efcf1456 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -286,8 +286,8 @@ req_retry: if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) return err; @@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan, * Other zc request to finish here */ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { - err = wait_event_interruptible(vp_wq, + err = wait_event_killable(vp_wq, (atomic_read(&vp_pinned) < chan->p9_max_pages)); if (err == -ERESTARTSYS) return err; @@ -471,8 +471,8 @@ req_retry_pinned: if (err == -ENOSPC) { chan->ring_bufs_avail = 0; spin_unlock_irqrestore(&chan->lock, flags); - err = wait_event_interruptible(*chan->vc_wq, - chan->ring_bufs_avail); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); if (err == -ERESTARTSYS) goto err_out; @@ -489,8 +489,7 @@ req_retry_pinned: virtqueue_kick(chan->vq); spin_unlock_irqrestore(&chan->lock, flags); p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); - err = wait_event_interruptible(*req->wq, - req->status >= REQ_STATUS_RCVD); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); /* * Non kernel buffers are pinned, unpin them */ diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c index 6ad3e043c617..325c56043007 100644 --- a/net/9p/trans_xen.c +++ b/net/9p/trans_xen.c @@ -156,8 +156,8 @@ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req) ring = &priv->rings[num]; again: - while (wait_event_interruptible(ring->wq, - p9_xen_write_todo(ring, size)) != 0) + while (wait_event_killable(ring->wq, + p9_xen_write_todo(ring, size)) != 0) ; spin_lock_irqsave(&ring->lock, flags); diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c index 67bb1f11e613..9a5850f264ed 100644 --- a/net/ceph/ceph_hash.c +++ b/net/ceph/ceph_hash.c @@ -47,28 +47,38 @@ unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length) /* handle the last 11 bytes */ c = c + length; - switch (len) { /* all the case statements fall through */ + switch (len) { case 11: c = c + ((__u32)k[10] << 24); + /* fall through */ case 10: c = c + ((__u32)k[9] << 16); + /* fall through */ case 9: c = c + ((__u32)k[8] << 8); /* the first byte of c is reserved for the length */ + /* fall through */ case 8: b = b + ((__u32)k[7] << 24); + /* fall through */ case 7: b = b + ((__u32)k[6] << 16); + /* fall through */ case 6: b = b + ((__u32)k[5] << 8); + /* fall through */ case 5: b = b + k[4]; + /* fall through */ case 4: a = a + ((__u32)k[3] << 24); + /* fall through */ case 3: a = a + ((__u32)k[2] << 16); + /* fall through */ case 2: a = a + ((__u32)k[1] << 8); + /* fall through */ case 1: a = a + k[0]; /* case 0: nothing left to add */ diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c index 489610ac1cdd..bf9d079cbafd 100644 --- a/net/ceph/crypto.c +++ b/net/ceph/crypto.c @@ -37,7 +37,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf) return -ENOTSUPP; } - WARN_ON(!key->len); + if (!key->len) + return -EINVAL; + key->key = kmemdup(buf, key->len, GFP_NOIO); if (!key->key) { ret = -ENOMEM; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index ad93342c90d7..8a4d3758030b 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -430,6 +430,7 @@ static void ceph_sock_state_change(struct sock *sk) switch (sk->sk_state) { case TCP_CLOSE: dout("%s TCP_CLOSE\n", __func__); + /* fall through */ case TCP_CLOSE_WAIT: dout("%s TCP_CLOSE_WAIT\n", __func__); con_sock_state_closing(con); diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 9ae1bab8c05d..1547107f4854 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -1279,9 +1279,10 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, /* * Older OSDs don't set reply tid even if the orignal - * request had a non-zero tid. Workaround this weirdness - * by falling through to the allocate case. + * request had a non-zero tid. Work around this weirdness + * by allocating a new message. */ + /* fall through */ case CEPH_MSG_MON_MAP: case CEPH_MSG_MDS_MAP: case CEPH_MSG_OSD_MAP: diff --git a/net/core/dev.c b/net/core/dev.c index 8ee29f4f5fa9..07ed21d64f92 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2746,7 +2746,8 @@ EXPORT_SYMBOL(skb_mac_gso_segment); static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path) { if (tx_path) - return skb->ip_summed != CHECKSUM_PARTIAL; + return skb->ip_summed != CHECKSUM_PARTIAL && + skb->ip_summed != CHECKSUM_UNNECESSARY; return skb->ip_summed == CHECKSUM_NONE; } @@ -7139,13 +7140,17 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, __dev_xdp_attached(dev, bpf_op, NULL)) return -EBUSY; - if (bpf_op == ops->ndo_bpf) - prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, - dev); - else - prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); + prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, + bpf_op == ops->ndo_bpf); if (IS_ERR(prog)) return PTR_ERR(prog); + + if (!(flags & XDP_FLAGS_HW_MODE) && + bpf_prog_is_dev_bound(prog->aux)) { + NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); + bpf_prog_put(prog); + return -EINVAL; + } } err = dev_xdp_install(dev, bpf_op, extack, flags, prog); diff --git a/net/core/filter.c b/net/core/filter.c index 1afa17935954..6a85e67fafce 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1646,9 +1646,9 @@ static const struct bpf_func_proto bpf_csum_diff_proto = { .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_MEM, + .arg1_type = ARG_PTR_TO_MEM_OR_NULL, .arg2_type = ARG_CONST_SIZE_OR_ZERO, - .arg3_type = ARG_PTR_TO_MEM, + .arg3_type = ARG_PTR_TO_MEM_OR_NULL, .arg4_type = ARG_CONST_SIZE_OR_ZERO, .arg5_type = ARG_ANYTHING, }; diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 44e3fb7dec8c..1e287420ff49 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c @@ -51,9 +51,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index) INIT_LIST_HEAD(&dst->list); list_add_tail(&dsa_tree_list, &dst->list); - /* Initialize the reference counter to the number of switches, not 1 */ kref_init(&dst->refcount); - refcount_set(&dst->refcount.refcount, 0); return dst; } @@ -64,20 +62,23 @@ static void dsa_tree_free(struct dsa_switch_tree *dst) kfree(dst); } -static struct dsa_switch_tree *dsa_tree_touch(int index) +static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst) { - struct dsa_switch_tree *dst; - - dst = dsa_tree_find(index); - if (!dst) - dst = dsa_tree_alloc(index); + if (dst) + kref_get(&dst->refcount); return dst; } -static void dsa_tree_get(struct dsa_switch_tree *dst) +static struct dsa_switch_tree *dsa_tree_touch(int index) { - kref_get(&dst->refcount); + struct dsa_switch_tree *dst; + + dst = dsa_tree_find(index); + if (dst) + return dsa_tree_get(dst); + else + return dsa_tree_alloc(index); } static void dsa_tree_release(struct kref *ref) @@ -91,7 +92,8 @@ static void dsa_tree_release(struct kref *ref) static void dsa_tree_put(struct dsa_switch_tree *dst) { - kref_put(&dst->refcount, dsa_tree_release); + if (dst) + kref_put(&dst->refcount, dsa_tree_release); } static bool dsa_port_is_dsa(struct dsa_port *port) @@ -765,6 +767,7 @@ int dsa_register_switch(struct dsa_switch *ds) mutex_lock(&dsa2_mutex); err = dsa_switch_probe(ds); + dsa_tree_put(ds->dst); mutex_unlock(&dsa2_mutex); return err; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ce4aa827be05..f00499a46927 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1223,9 +1223,10 @@ EXPORT_SYMBOL(inet_sk_rebuild_header); struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features) { - bool fixedid = false, gso_partial, encap; + bool udpfrag = false, fixedid = false, gso_partial, encap; struct sk_buff *segs = ERR_PTR(-EINVAL); const struct net_offload *ops; + unsigned int offset = 0; struct iphdr *iph; int proto, tot_len; int nhoff; @@ -1260,6 +1261,7 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, segs = ERR_PTR(-EPROTONOSUPPORT); if (!skb->encapsulation || encap) { + udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP); fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID); /* fixed ID is invalid if DF bit is not set */ @@ -1279,7 +1281,13 @@ struct sk_buff *inet_gso_segment(struct sk_buff *skb, skb = segs; do { iph = (struct iphdr *)(skb_mac_header(skb) + nhoff); - if (skb_is_gso(skb)) { + if (udpfrag) { + iph->frag_off = htons(offset >> 3); + if (skb->next) + iph->frag_off |= htons(IP_MF); + offset += skb->len - nhoff - ihl; + tot_len = skb->len - nhoff; + } else if (skb_is_gso(skb)) { if (!fixedid) { iph->id = htons(id); id += skb_shinfo(skb)->gso_segs; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index e360d55be555..01801b77bd0d 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -187,16 +187,57 @@ out_unlock: } EXPORT_SYMBOL(skb_udp_tunnel_segment); -static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); + unsigned int mss; + __wsum csum; + struct udphdr *uh; + struct iphdr *iph; if (skb->encapsulation && (skb_shinfo(skb)->gso_type & - (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) + (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { segs = skb_udp_tunnel_segment(skb, features, false); + goto out; + } + + if (!pskb_may_pull(skb, sizeof(struct udphdr))) + goto out; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) + goto out; + + /* Do software UFO. Complete and fill in the UDP checksum as + * HW cannot do checksum of UDP packets sent as multiple + * IP fragments. + */ + uh = udp_hdr(skb); + iph = ip_hdr(skb); + + uh->check = 0; + csum = skb_checksum(skb, 0, skb->len, 0); + uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* If there is no outer header we can fake a checksum offload + * due to the fact that we have already done the checksum in + * software prior to segmenting the frame. + */ + if (!skb->encap_hdr_csum) + features |= NETIF_F_HW_CSUM; + + /* Fragment the skb. IP headers of the fragments are updated in + * inet_gso_segment() + */ + segs = skb_segment(skb, features); +out: return segs; } @@ -330,7 +371,7 @@ static int udp4_gro_complete(struct sk_buff *skb, int nhoff) static const struct net_offload udpv4_offload = { .callbacks = { - .gso_segment = udp4_tunnel_segment, + .gso_segment = udp4_ufo_fragment, .gro_receive = udp4_gro_receive, .gro_complete = udp4_gro_complete, }, diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c index 4a7e5ffa5108..4fe7c90962dd 100644 --- a/net/ipv6/output_core.c +++ b/net/ipv6/output_core.c @@ -31,6 +31,37 @@ static u32 __ipv6_select_ident(struct net *net, u32 hashrnd, return id; } +/* This function exists only for tap drivers that must support broken + * clients requesting UFO without specifying an IPv6 fragment ID. + * + * This is similar to ipv6_select_ident() but we use an independent hash + * seed to limit information leakage. + * + * The network header must be set before calling this. + */ +__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb) +{ + static u32 ip6_proxy_idents_hashrnd __read_mostly; + struct in6_addr buf[2]; + struct in6_addr *addrs; + u32 id; + + addrs = skb_header_pointer(skb, + skb_network_offset(skb) + + offsetof(struct ipv6hdr, saddr), + sizeof(buf), buf); + if (!addrs) + return 0; + + net_get_random_once(&ip6_proxy_idents_hashrnd, + sizeof(ip6_proxy_idents_hashrnd)); + + id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd, + &addrs[1], &addrs[0]); + return htonl(id); +} +EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); + __be32 ipv6_select_ident(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 05eb7bc36156..7a8d1500d374 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -472,6 +472,11 @@ static struct rt6_info *rt6_multipath_select(struct rt6_info *match, &match->rt6i_siblings, rt6i_siblings) { route_choosen--; if (route_choosen == 0) { + struct inet6_dev *idev = sibling->rt6i_idev; + + if (!netif_carrier_ok(sibling->dst.dev) && + idev->cnf.ignore_routes_with_linkdown) + break; if (rt6_score_route(sibling, oif, strict) < 0) break; match = sibling; @@ -1019,7 +1024,7 @@ static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) { struct net_device *dev = rt->dst.dev; - if (rt->rt6i_flags & RTF_LOCAL) { + if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) { /* for copies of local routes, dst->dev needs to be the * device if it is a master device, the master device if * device is enslaved, and the loopback as the default diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c index 455fd4e39333..a0f89ad76f9d 100644 --- a/net/ipv6/udp_offload.c +++ b/net/ipv6/udp_offload.c @@ -17,15 +17,94 @@ #include <net/ip6_checksum.h> #include "ip6_offload.h" -static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb, - netdev_features_t features) +static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, + netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); + unsigned int mss; + unsigned int unfrag_ip6hlen, unfrag_len; + struct frag_hdr *fptr; + u8 *packet_start, *prevhdr; + u8 nexthdr; + u8 frag_hdr_sz = sizeof(struct frag_hdr); + __wsum csum; + int tnl_hlen; + int err; + + mss = skb_shinfo(skb)->gso_size; + if (unlikely(skb->len <= mss)) + goto out; if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) segs = skb_udp_tunnel_segment(skb, features, true); + else { + const struct ipv6hdr *ipv6h; + struct udphdr *uh; + + if (!pskb_may_pull(skb, sizeof(struct udphdr))) + goto out; + + /* Do software UFO. Complete and fill in the UDP checksum as HW cannot + * do checksum of UDP packets sent as multiple IP fragments. + */ + + uh = udp_hdr(skb); + ipv6h = ipv6_hdr(skb); + + uh->check = 0; + csum = skb_checksum(skb, 0, skb->len, 0); + uh->check = udp_v6_check(skb->len, &ipv6h->saddr, + &ipv6h->daddr, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + skb->ip_summed = CHECKSUM_UNNECESSARY; + + /* If there is no outer header we can fake a checksum offload + * due to the fact that we have already done the checksum in + * software prior to segmenting the frame. + */ + if (!skb->encap_hdr_csum) + features |= NETIF_F_HW_CSUM; + + /* Check if there is enough headroom to insert fragment header. */ + tnl_hlen = skb_tnl_header_len(skb); + if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { + if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) + goto out; + } + + /* Find the unfragmentable header and shift it left by frag_hdr_sz + * bytes to insert fragment header. + */ + err = ip6_find_1stfragopt(skb, &prevhdr); + if (err < 0) + return ERR_PTR(err); + unfrag_ip6hlen = err; + nexthdr = *prevhdr; + *prevhdr = NEXTHDR_FRAGMENT; + unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + + unfrag_ip6hlen + tnl_hlen; + packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; + memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); + + SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; + skb->mac_header -= frag_hdr_sz; + skb->network_header -= frag_hdr_sz; + + fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); + fptr->nexthdr = nexthdr; + fptr->reserved = 0; + fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb); + + /* Fragment the skb. ipv6 header and the remaining fields of the + * fragment header are updated in ipv6_gso_segment() + */ + segs = skb_segment(skb, features); + } +out: return segs; } @@ -75,7 +154,7 @@ static int udp6_gro_complete(struct sk_buff *skb, int nhoff) static const struct net_offload udpv6_offload = { .callbacks = { - .gso_segment = udp6_tunnel_segment, + .gso_segment = udp6_ufo_fragment, .gro_receive = udp6_gro_receive, .gro_complete = udp6_gro_complete, }, diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 0dab33fb9844..ef38e5aecd28 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -308,6 +308,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info, uint32_t cutlen) { + unsigned int gso_type = skb_shinfo(skb)->gso_type; + struct sw_flow_key later_key; struct sk_buff *segs, *nskb; int err; @@ -318,9 +320,21 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb, if (segs == NULL) return -EINVAL; + if (gso_type & SKB_GSO_UDP) { + /* The initial flow key extracted by ovs_flow_key_extract() + * in this case is for a first fragment, so we need to + * properly mark later fragments. + */ + later_key = *key; + later_key.ip.frag = OVS_FRAG_TYPE_LATER; + } + /* Queue all of the segments. */ skb = segs; do { + if (gso_type & SKB_GSO_UDP && skb != segs) + key = &later_key; + err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen); if (err) break; diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 864ddb1e3642..dbe2379329c5 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -631,7 +631,8 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } - if (nh->frag_off & htons(IP_MF)) + if (nh->frag_off & htons(IP_MF) || + skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; else key->ip.frag = OVS_FRAG_TYPE_NONE; @@ -747,6 +748,9 @@ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + key->ip.frag = OVS_FRAG_TYPE_FIRST; + /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index dc424798ba6f..624ea74353dd 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2241,14 +2241,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb) #define MAX_ACTIONS_BUFSIZE (32 * 1024) -static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) +static struct sw_flow_actions *nla_alloc_flow_actions(int size) { struct sw_flow_actions *sfa; - if (size > MAX_ACTIONS_BUFSIZE) { - OVS_NLERR(log, "Flow action size %u bytes exceeds max", size); - return ERR_PTR(-EINVAL); - } + WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE); sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); if (!sfa) @@ -2321,12 +2318,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa, new_acts_size = ksize(*sfa) * 2; if (new_acts_size > MAX_ACTIONS_BUFSIZE) { - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { + OVS_NLERR(log, "Flow action size exceeds max %u", + MAX_ACTIONS_BUFSIZE); return ERR_PTR(-EMSGSIZE); + } new_acts_size = MAX_ACTIONS_BUFSIZE; } - acts = nla_alloc_flow_actions(new_acts_size, log); + acts = nla_alloc_flow_actions(new_acts_size); if (IS_ERR(acts)) return (void *)acts; @@ -3059,7 +3059,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, { int err; - *sfa = nla_alloc_flow_actions(nla_len(attr), log); + *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE)); if (IS_ERR(*sfa)) return PTR_ERR(*sfa); diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index 9b5c46b052fd..8f7cf4c042be 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c @@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, bool upgrade) { struct rxrpc_conn_parameters cp; + struct rxrpc_call_params p; struct rxrpc_call *call; struct rxrpc_sock *rx = rxrpc_sk(sock->sk); int ret; @@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, if (key && !key->payload.data[0]) key = NULL; /* a no-security key */ + memset(&p, 0, sizeof(p)); + p.user_call_ID = user_call_ID; + p.tx_total_len = tx_total_len; + memset(&cp, 0, sizeof(cp)); cp.local = rx->local; cp.key = key; @@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, cp.exclusive = false; cp.upgrade = upgrade; cp.service_id = srx->srx_service; - call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, - gfp); + call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp); /* The socket has been unlocked. */ if (!IS_ERR(call)) { call->notify_rx = notify_rx; @@ -863,6 +867,19 @@ static int rxrpc_release_sock(struct sock *sk) sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; + /* We want to kill off all connections from a service socket + * as fast as possible because we can't share these; client + * sockets, on the other hand, can share an endpoint. + */ + switch (sk->sk_state) { + case RXRPC_SERVER_BOUND: + case RXRPC_SERVER_BOUND2: + case RXRPC_SERVER_LISTENING: + case RXRPC_SERVER_LISTEN_DISABLED: + rx->local->service_closed = true; + break; + } + spin_lock_bh(&sk->sk_receive_queue.lock); sk->sk_state = RXRPC_CLOSE; spin_unlock_bh(&sk->sk_receive_queue.lock); @@ -878,6 +895,8 @@ static int rxrpc_release_sock(struct sock *sk) rxrpc_release_calls_on_socket(rx); flush_workqueue(rxrpc_workqueue); rxrpc_purge_queue(&sk->sk_receive_queue); + rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper); + rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper); rxrpc_put_local(rx->local); rx->local = NULL; diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h index b2151993d384..416688381eb7 100644 --- a/net/rxrpc/ar-internal.h +++ b/net/rxrpc/ar-internal.h @@ -79,17 +79,20 @@ struct rxrpc_net { struct list_head conn_proc_list; /* List of conns in this namespace for proc */ struct list_head service_conns; /* Service conns in this namespace */ rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ - struct delayed_work service_conn_reaper; + struct work_struct service_conn_reaper; + struct timer_list service_conn_reap_timer; unsigned int nr_client_conns; unsigned int nr_active_client_conns; bool kill_all_client_conns; + bool live; spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ struct list_head waiting_client_conns; struct list_head active_client_conns; struct list_head idle_client_conns; - struct delayed_work client_conn_reaper; + struct work_struct client_conn_reaper; + struct timer_list client_conn_reap_timer; struct list_head local_endpoints; struct mutex local_mutex; /* Lock for ->local_endpoints */ @@ -265,6 +268,7 @@ struct rxrpc_local { rwlock_t services_lock; /* lock for services list */ int debug_id; /* debug ID for printks */ bool dead; + bool service_closed; /* Service socket closed */ struct sockaddr_rxrpc srx; /* local address */ }; @@ -338,8 +342,17 @@ enum rxrpc_conn_flag { RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */ RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ + RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */ + RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */ + RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */ + RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */ }; +#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \ + (1UL << RXRPC_CONN_FINAL_ACK_1) | \ + (1UL << RXRPC_CONN_FINAL_ACK_2) | \ + (1UL << RXRPC_CONN_FINAL_ACK_3)) + /* * Events that can be raised upon a connection. */ @@ -393,6 +406,7 @@ struct rxrpc_connection { #define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1) struct list_head waiting_calls; /* Calls waiting for channels */ struct rxrpc_channel { + unsigned long final_ack_at; /* Time at which to issue final ACK */ struct rxrpc_call __rcu *call; /* Active call */ u32 call_id; /* ID of current call */ u32 call_counter; /* Call ID counter */ @@ -404,6 +418,7 @@ struct rxrpc_connection { }; } channels[RXRPC_MAXCALLS]; + struct timer_list timer; /* Conn event timer */ struct work_struct processor; /* connection event processor */ union { struct rb_node client_node; /* Node in local->client_conns */ @@ -457,9 +472,10 @@ enum rxrpc_call_flag { enum rxrpc_call_event { RXRPC_CALL_EV_ACK, /* need to generate ACK */ RXRPC_CALL_EV_ABORT, /* need to generate abort */ - RXRPC_CALL_EV_TIMER, /* Timer expired */ RXRPC_CALL_EV_RESEND, /* Tx resend required */ RXRPC_CALL_EV_PING, /* Ping send required */ + RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */ + RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */ }; /* @@ -503,10 +519,16 @@ struct rxrpc_call { struct rxrpc_peer *peer; /* Peer record for remote address */ struct rxrpc_sock __rcu *socket; /* socket responsible */ struct mutex user_mutex; /* User access mutex */ - ktime_t ack_at; /* When deferred ACK needs to happen */ - ktime_t resend_at; /* When next resend needs to happen */ - ktime_t ping_at; /* When next to send a ping */ - ktime_t expire_at; /* When the call times out */ + unsigned long ack_at; /* When deferred ACK needs to happen */ + unsigned long ack_lost_at; /* When ACK is figured as lost */ + unsigned long resend_at; /* When next resend needs to happen */ + unsigned long ping_at; /* When next to send a ping */ + unsigned long keepalive_at; /* When next to send a keepalive ping */ + unsigned long expect_rx_by; /* When we expect to get a packet by */ + unsigned long expect_req_by; /* When we expect to get a request DATA packet by */ + unsigned long expect_term_by; /* When we expect call termination by */ + u32 next_rx_timo; /* Timeout for next Rx packet (jif) */ + u32 next_req_timo; /* Timeout for next Rx request packet (jif) */ struct timer_list timer; /* Combined event timer */ struct work_struct processor; /* Event processor */ rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ @@ -609,6 +631,8 @@ struct rxrpc_call { ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ rxrpc_serial_t acks_latest; /* serial number of latest ACK received */ rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ + rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */ + rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */ }; /* @@ -632,6 +656,35 @@ struct rxrpc_ack_summary { u8 cumulative_acks; }; +/* + * sendmsg() cmsg-specified parameters. + */ +enum rxrpc_command { + RXRPC_CMD_SEND_DATA, /* send data message */ + RXRPC_CMD_SEND_ABORT, /* request abort generation */ + RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ + RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ +}; + +struct rxrpc_call_params { + s64 tx_total_len; /* Total Tx data length (if send data) */ + unsigned long user_call_ID; /* User's call ID */ + struct { + u32 hard; /* Maximum lifetime (sec) */ + u32 idle; /* Max time since last data packet (msec) */ + u32 normal; /* Max time since last call packet (msec) */ + } timeouts; + u8 nr_timeouts; /* Number of timeouts specified */ +}; + +struct rxrpc_send_params { + struct rxrpc_call_params call; + u32 abort_code; /* Abort code to Tx (if abort) */ + enum rxrpc_command command : 8; /* The command to implement */ + bool exclusive; /* Shared or exclusive call */ + bool upgrade; /* If the connection is upgradeable */ +}; + #include <trace/events/rxrpc.h> /* @@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *); /* * call_event.c */ -void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); -void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t); void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, enum rxrpc_propose_ack_trace); void rxrpc_process_call(struct work_struct *); +static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call, + unsigned long expire_at, + unsigned long now, + enum rxrpc_timer_trace why) +{ + trace_rxrpc_timer(call, why, now); + timer_reduce(&call->timer, expire_at); +} + /* * call_object.c */ @@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime; extern struct kmem_cache *rxrpc_call_jar; struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); -struct rxrpc_call *rxrpc_alloc_call(gfp_t); +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t); struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, - unsigned long, s64, gfp_t); + struct rxrpc_call_params *, gfp_t); int rxrpc_retry_client_call(struct rxrpc_sock *, struct rxrpc_call *, struct rxrpc_conn_parameters *, @@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, */ extern unsigned int rxrpc_max_client_connections; extern unsigned int rxrpc_reap_client_connections; -extern unsigned int rxrpc_conn_idle_client_expiry; -extern unsigned int rxrpc_conn_idle_client_fast_expiry; +extern unsigned long rxrpc_conn_idle_client_expiry; +extern unsigned long rxrpc_conn_idle_client_fast_expiry; extern struct idr rxrpc_client_conn_ids; void rxrpc_destroy_client_conn_ids(void); @@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *); * conn_object.c */ extern unsigned int rxrpc_connection_expiry; +extern unsigned int rxrpc_closed_conn_expiry; struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, @@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn) rxrpc_put_service_conn(conn); } +static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn, + unsigned long expire_at) +{ + timer_reduce(&conn->timer, expire_at); +} + /* * conn_service.c */ @@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local) * misc.c */ extern unsigned int rxrpc_max_backlog __read_mostly; -extern unsigned int rxrpc_requested_ack_delay; -extern unsigned int rxrpc_soft_ack_delay; -extern unsigned int rxrpc_idle_ack_delay; +extern unsigned long rxrpc_requested_ack_delay; +extern unsigned long rxrpc_soft_ack_delay; +extern unsigned long rxrpc_idle_ack_delay; extern unsigned int rxrpc_rx_window_size; extern unsigned int rxrpc_rx_mtu; extern unsigned int rxrpc_rx_jumbo_max; -extern unsigned int rxrpc_resend_timeout; +extern unsigned long rxrpc_resend_timeout; extern const s8 rxrpc_ack_priority[]; @@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net) /* * output.c */ -int rxrpc_send_ack_packet(struct rxrpc_call *, bool); +int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *); int rxrpc_send_abort_packet(struct rxrpc_call *); int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); void rxrpc_reject_packets(struct rxrpc_local *); diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index cbd1701e813a..3028298ca561 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c @@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, /* Now it gets complicated, because calls get registered with the * socket here, particularly if a user ID is preassigned by the user. */ - call = rxrpc_alloc_call(gfp); + call = rxrpc_alloc_call(rx, gfp); if (!call) return -ENOMEM; call->flags |= (1 << RXRPC_CALL_IS_SERVICE); diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c index 3574508baf9a..bda952ffe6a6 100644 --- a/net/rxrpc/call_event.c +++ b/net/rxrpc/call_event.c @@ -22,80 +22,6 @@ #include "ar-internal.h" /* - * Set the timer - */ -void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, - ktime_t now) -{ - unsigned long t_j, now_j = jiffies; - ktime_t t; - bool queue = false; - - if (call->state < RXRPC_CALL_COMPLETE) { - t = call->expire_at; - if (!ktime_after(t, now)) { - trace_rxrpc_timer(call, why, now, now_j); - queue = true; - goto out; - } - - if (!ktime_after(call->resend_at, now)) { - call->resend_at = call->expire_at; - if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) - queue = true; - } else if (ktime_before(call->resend_at, t)) { - t = call->resend_at; - } - - if (!ktime_after(call->ack_at, now)) { - call->ack_at = call->expire_at; - if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events)) - queue = true; - } else if (ktime_before(call->ack_at, t)) { - t = call->ack_at; - } - - if (!ktime_after(call->ping_at, now)) { - call->ping_at = call->expire_at; - if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) - queue = true; - } else if (ktime_before(call->ping_at, t)) { - t = call->ping_at; - } - - t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now))); - t_j += jiffies; - - /* We have to make sure that the calculated jiffies value falls - * at or after the nsec value, or we may loop ceaselessly - * because the timer times out, but we haven't reached the nsec - * timeout yet. - */ - t_j++; - - if (call->timer.expires != t_j || !timer_pending(&call->timer)) { - mod_timer(&call->timer, t_j); - trace_rxrpc_timer(call, why, now, now_j); - } - } - -out: - if (queue) - rxrpc_queue_call(call); -} - -/* - * Set the timer - */ -void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why, - ktime_t now) -{ - read_lock_bh(&call->state_lock); - __rxrpc_set_timer(call, why, now); - read_unlock_bh(&call->state_lock); -} - -/* * Propose a PING ACK be sent. */ static void rxrpc_propose_ping(struct rxrpc_call *call, @@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call, !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) rxrpc_queue_call(call); } else { - ktime_t now = ktime_get_real(); - ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay); + unsigned long now = jiffies; + unsigned long ping_at = now + rxrpc_idle_ack_delay; - if (ktime_before(ping_at, call->ping_at)) { - call->ping_at = ping_at; - rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now); + if (time_before(ping_at, call->ping_at)) { + WRITE_ONCE(call->ping_at, ping_at); + rxrpc_reduce_call_timer(call, ping_at, now, + rxrpc_timer_set_for_ping); } } } @@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, enum rxrpc_propose_ack_trace why) { enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; - unsigned int expiry = rxrpc_soft_ack_delay; - ktime_t now, ack_at; + unsigned long expiry = rxrpc_soft_ack_delay; s8 prior = rxrpc_ack_priority[ack_reason]; /* Pings are handled specially because we don't want to accidentally @@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason, background) rxrpc_queue_call(call); } else { - now = ktime_get_real(); - ack_at = ktime_add_ms(now, expiry); - if (ktime_before(ack_at, call->ack_at)) { - call->ack_at = ack_at; - rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now); + unsigned long now = jiffies, ack_at; + + if (call->peer->rtt_usage > 0) + ack_at = nsecs_to_jiffies(call->peer->rtt); + else + ack_at = expiry; + + ack_at = jiffies + expiry; + if (time_before(ack_at, call->ack_at)) { + WRITE_ONCE(call->ack_at, ack_at); + rxrpc_reduce_call_timer(call, ack_at, now, + rxrpc_timer_set_for_ack); } } @@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call) /* * Perform retransmission of NAK'd and unack'd packets. */ -static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) +static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) { struct rxrpc_skb_priv *sp; struct sk_buff *skb; + unsigned long resend_at; rxrpc_seq_t cursor, seq, top; - ktime_t max_age, oldest, ack_ts; + ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo; int ix; u8 annotation, anno_type, retrans = 0, unacked = 0; _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); - max_age = ktime_sub_ms(now, rxrpc_resend_timeout); + if (call->peer->rtt_usage > 1) + timeout = ns_to_ktime(call->peer->rtt * 3 / 2); + else + timeout = ms_to_ktime(rxrpc_resend_timeout); + min_timeo = ns_to_ktime((1000000000 / HZ) * 4); + if (ktime_before(timeout, min_timeo)) + timeout = min_timeo; + + now = ktime_get_real(); + max_age = ktime_sub(now, timeout); spin_lock_bh(&call->lock); @@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) ktime_to_ns(ktime_sub(skb->tstamp, max_age))); } - call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout); + resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now))); + resend_at += jiffies + rxrpc_resend_timeout; + WRITE_ONCE(call->resend_at, resend_at); if (unacked) rxrpc_congestion_timeout(call); @@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) * retransmitting data. */ if (!retrans) { - rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); + rxrpc_reduce_call_timer(call, resend_at, now, + rxrpc_timer_set_for_resend); spin_unlock_bh(&call->lock); ack_ts = ktime_sub(now, call->acks_latest_ts); if (ktime_to_ns(ack_ts) < call->peer->rtt) goto out; rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, rxrpc_propose_ack_ping_for_lost_ack); - rxrpc_send_ack_packet(call, true); + rxrpc_send_ack_packet(call, true, NULL); goto out; } @@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work) { struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor); - ktime_t now; + rxrpc_serial_t *send_ack; + unsigned long now, next, t; rxrpc_see_call(call); @@ -384,22 +331,89 @@ recheck_state: goto out_put; } - now = ktime_get_real(); - if (ktime_before(call->expire_at, now)) { + /* Work out if any timeouts tripped */ + now = jiffies; + t = READ_ONCE(call->expect_rx_by); + if (time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now); + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); + } + + t = READ_ONCE(call->expect_req_by); + if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST && + time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now); + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); + } + + t = READ_ONCE(call->expect_term_by); + if (time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now); + set_bit(RXRPC_CALL_EV_EXPIRED, &call->events); + } + + t = READ_ONCE(call->ack_at); + if (time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now); + cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET); + set_bit(RXRPC_CALL_EV_ACK, &call->events); + } + + t = READ_ONCE(call->ack_lost_at); + if (time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now); + cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET); + set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events); + } + + t = READ_ONCE(call->keepalive_at); + if (time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now); + cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET); + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true, + rxrpc_propose_ack_ping_for_keepalive); + set_bit(RXRPC_CALL_EV_PING, &call->events); + } + + t = READ_ONCE(call->ping_at); + if (time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now); + cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET); + set_bit(RXRPC_CALL_EV_PING, &call->events); + } + + t = READ_ONCE(call->resend_at); + if (time_after_eq(now, t)) { + trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now); + cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET); + set_bit(RXRPC_CALL_EV_RESEND, &call->events); + } + + /* Process events */ + if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) { rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME); set_bit(RXRPC_CALL_EV_ABORT, &call->events); goto recheck_state; } - if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { + send_ack = NULL; + if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) { + call->acks_lost_top = call->tx_top; + rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, + rxrpc_propose_ack_ping_for_lost_ack); + send_ack = &call->acks_lost_ping; + } + + if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) || + send_ack) { if (call->ackr_reason) { - rxrpc_send_ack_packet(call, false); + rxrpc_send_ack_packet(call, false, send_ack); goto recheck_state; } } if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { - rxrpc_send_ack_packet(call, true); + rxrpc_send_ack_packet(call, true, NULL); goto recheck_state; } @@ -408,7 +422,24 @@ recheck_state: goto recheck_state; } - rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); + /* Make sure the timer is restarted */ + next = call->expect_rx_by; + +#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; } + + set(call->expect_req_by); + set(call->expect_term_by); + set(call->ack_at); + set(call->ack_lost_at); + set(call->resend_at); + set(call->keepalive_at); + set(call->ping_at); + + now = jiffies; + if (time_after_eq(now, next)) + goto recheck_state; + + rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart); /* other events may have been raised since we started checking */ if (call->events && call->state < RXRPC_CALL_COMPLETE) { diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c index 4c7fbc6dcce7..7ee3d6ce5aa2 100644 --- a/net/rxrpc/call_object.c +++ b/net/rxrpc/call_object.c @@ -51,10 +51,14 @@ static void rxrpc_call_timer_expired(unsigned long _call) _enter("%d", call->debug_id); - if (call->state < RXRPC_CALL_COMPLETE) - rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); + if (call->state < RXRPC_CALL_COMPLETE) { + trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies); + rxrpc_queue_call(call); + } } +static struct lock_class_key rxrpc_call_user_mutex_lock_class_key; + /* * find an extant server call * - called in process context with IRQs enabled @@ -95,7 +99,7 @@ found_extant_call: /* * allocate a new call */ -struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp) { struct rxrpc_call *call; @@ -114,6 +118,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) goto nomem_2; mutex_init(&call->user_mutex); + + /* Prevent lockdep reporting a deadlock false positive between the afs + * filesystem and sys_sendmsg() via the mmap sem. + */ + if (rx->sk.sk_kern_sock) + lockdep_set_class(&call->user_mutex, + &rxrpc_call_user_mutex_lock_class_key); + setup_timer(&call->timer, rxrpc_call_timer_expired, (unsigned long)call); INIT_WORK(&call->processor, &rxrpc_process_call); @@ -129,6 +141,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) atomic_set(&call->usage, 1); call->debug_id = atomic_inc_return(&rxrpc_debug_id); call->tx_total_len = -1; + call->next_rx_timo = 20 * HZ; + call->next_req_timo = 1 * HZ; memset(&call->sock_node, 0xed, sizeof(call->sock_node)); @@ -151,7 +165,8 @@ nomem: /* * Allocate a new client call. */ -static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, +static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, + struct sockaddr_rxrpc *srx, gfp_t gfp) { struct rxrpc_call *call; @@ -159,7 +174,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, _enter(""); - call = rxrpc_alloc_call(gfp); + call = rxrpc_alloc_call(rx, gfp); if (!call) return ERR_PTR(-ENOMEM); call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; @@ -178,15 +193,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, */ static void rxrpc_start_call_timer(struct rxrpc_call *call) { - ktime_t now = ktime_get_real(), expire_at; - - expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); - call->expire_at = expire_at; - call->ack_at = expire_at; - call->ping_at = expire_at; - call->resend_at = expire_at; - call->timer.expires = jiffies + LONG_MAX / 2; - rxrpc_set_timer(call, rxrpc_timer_begin, now); + unsigned long now = jiffies; + unsigned long j = now + MAX_JIFFY_OFFSET; + + call->ack_at = j; + call->ack_lost_at = j; + call->resend_at = j; + call->ping_at = j; + call->expect_rx_by = j; + call->expect_req_by = j; + call->expect_term_by = j; + call->timer.expires = now; } /* @@ -197,8 +214,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call) struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, struct rxrpc_conn_parameters *cp, struct sockaddr_rxrpc *srx, - unsigned long user_call_ID, - s64 tx_total_len, + struct rxrpc_call_params *p, gfp_t gfp) __releases(&rx->sk.sk_lock.slock) { @@ -208,18 +224,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, const void *here = __builtin_return_address(0); int ret; - _enter("%p,%lx", rx, user_call_ID); + _enter("%p,%lx", rx, p->user_call_ID); - call = rxrpc_alloc_client_call(srx, gfp); + call = rxrpc_alloc_client_call(rx, srx, gfp); if (IS_ERR(call)) { release_sock(&rx->sk); _leave(" = %ld", PTR_ERR(call)); return call; } - call->tx_total_len = tx_total_len; + call->tx_total_len = p->tx_total_len; trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), - here, (const void *)user_call_ID); + here, (const void *)p->user_call_ID); /* We need to protect a partially set up call against the user as we * will be acting outside the socket lock. @@ -235,16 +251,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, parent = *pp; xcall = rb_entry(parent, struct rxrpc_call, sock_node); - if (user_call_ID < xcall->user_call_ID) + if (p->user_call_ID < xcall->user_call_ID) pp = &(*pp)->rb_left; - else if (user_call_ID > xcall->user_call_ID) + else if (p->user_call_ID > xcall->user_call_ID) pp = &(*pp)->rb_right; else goto error_dup_user_ID; } rcu_assign_pointer(call->socket, rx); - call->user_call_ID = user_call_ID; + call->user_call_ID = p->user_call_ID; __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); rxrpc_get_call(call, rxrpc_call_got_userid); rb_link_node(&call->sock_node, parent, pp); diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c index 5f9624bd311c..7f74ca3059f8 100644 --- a/net/rxrpc/conn_client.c +++ b/net/rxrpc/conn_client.c @@ -85,8 +85,8 @@ __read_mostly unsigned int rxrpc_max_client_connections = 1000; __read_mostly unsigned int rxrpc_reap_client_connections = 900; -__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; -__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; +__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; +__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; /* * We use machine-unique IDs for our client connections. @@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn, trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); + /* Cancel the final ACK on the previous call if it hasn't been sent yet + * as the DATA packet will implicitly ACK it. + */ + clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); + write_lock_bh(&call->state_lock); if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; @@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call, _enter("{%d,%lx},", call->debug_id, call->user_call_ID); - rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work); + rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); rxrpc_cull_active_client_conns(rxnet); ret = rxrpc_get_client_conn(call, cp, srx, gfp); @@ -752,6 +757,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call) } /* + * Set the reap timer. + */ +static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) +{ + unsigned long now = jiffies; + unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; + + if (rxnet->live) + timer_reduce(&rxnet->client_conn_reap_timer, reap_at); +} + +/* * Disconnect a client call. */ void rxrpc_disconnect_client_call(struct rxrpc_call *call) @@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call) goto out_2; } + /* Schedule the final ACK to be transmitted in a short while so that it + * can be skipped if we find a follow-on call. The first DATA packet + * of the follow on call will implicitly ACK this call. + */ + if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { + unsigned long final_ack_at = jiffies + 2; + + WRITE_ONCE(chan->final_ack_at, final_ack_at); + smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */ + set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags); + rxrpc_reduce_conn_timer(conn, final_ack_at); + } + /* Things are more complex and we need the cache lock. We might be * able to simply idle the conn or it might now be lurking on the wait * list. It might even get moved back to the active list whilst we're @@ -878,9 +908,7 @@ idle_connection: list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); if (rxnet->idle_client_conns.next == &conn->cache_link && !rxnet->kill_all_client_conns) - queue_delayed_work(rxrpc_workqueue, - &rxnet->client_conn_reaper, - rxrpc_conn_idle_client_expiry); + rxrpc_set_client_reap_timer(rxnet); } else { trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; @@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work) { struct rxrpc_connection *conn; struct rxrpc_net *rxnet = - container_of(to_delayed_work(work), - struct rxrpc_net, client_conn_reaper); + container_of(work, struct rxrpc_net, client_conn_reaper); unsigned long expiry, conn_expires_at, now; unsigned int nr_conns; bool did_discard = false; @@ -1061,6 +1088,8 @@ next: expiry = rxrpc_conn_idle_client_expiry; if (nr_conns > rxrpc_reap_client_connections) expiry = rxrpc_conn_idle_client_fast_expiry; + if (conn->params.local->service_closed) + expiry = rxrpc_closed_conn_expiry * HZ; conn_expires_at = conn->idle_timestamp + expiry; @@ -1096,9 +1125,8 @@ not_yet_expired: */ _debug("not yet"); if (!rxnet->kill_all_client_conns) - queue_delayed_work(rxrpc_workqueue, - &rxnet->client_conn_reaper, - conn_expires_at - now); + timer_reduce(&rxnet->client_conn_reap_timer, + conn_expires_at); out: spin_unlock(&rxnet->client_conn_cache_lock); @@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) rxnet->kill_all_client_conns = true; spin_unlock(&rxnet->client_conn_cache_lock); - cancel_delayed_work(&rxnet->client_conn_reaper); + del_timer_sync(&rxnet->client_conn_reap_timer); - if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0)) + if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) _debug("destroy: queue failed"); _leave(""); diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 59a51a56e7c8..9e9a8db1bc9c 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c @@ -24,9 +24,10 @@ * Retransmit terminal ACK or ABORT of the previous call. */ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, - struct sk_buff *skb) + struct sk_buff *skb, + unsigned int channel) { - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL; struct rxrpc_channel *chan; struct msghdr msg; struct kvec iov; @@ -48,7 +49,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, _enter("%d", conn->debug_id); - chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK]; + chan = &conn->channels[channel]; /* If the last call got moved on whilst we were waiting to run, just * ignore this packet. @@ -56,7 +57,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, call_id = READ_ONCE(chan->last_call); /* Sync with __rxrpc_disconnect_call() */ smp_rmb(); - if (call_id != sp->hdr.callNumber) + if (skb && call_id != sp->hdr.callNumber) return; msg.msg_name = &conn->params.peer->srx.transport; @@ -65,9 +66,9 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, msg.msg_controllen = 0; msg.msg_flags = 0; - pkt.whdr.epoch = htonl(sp->hdr.epoch); - pkt.whdr.cid = htonl(sp->hdr.cid); - pkt.whdr.callNumber = htonl(sp->hdr.callNumber); + pkt.whdr.epoch = htonl(conn->proto.epoch); + pkt.whdr.cid = htonl(conn->proto.cid); + pkt.whdr.callNumber = htonl(call_id); pkt.whdr.seq = 0; pkt.whdr.type = chan->last_type; pkt.whdr.flags = conn->out_clientflag; @@ -87,11 +88,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, mtu = conn->params.peer->if_mtu; mtu -= conn->params.peer->hdrsize; pkt.ack.bufferSpace = 0; - pkt.ack.maxSkew = htons(skb->priority); - pkt.ack.firstPacket = htonl(chan->last_seq); - pkt.ack.previousPacket = htonl(chan->last_seq - 1); - pkt.ack.serial = htonl(sp->hdr.serial); - pkt.ack.reason = RXRPC_ACK_DUPLICATE; + pkt.ack.maxSkew = htons(skb ? skb->priority : 0); + pkt.ack.firstPacket = htonl(chan->last_seq + 1); + pkt.ack.previousPacket = htonl(chan->last_seq); + pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0); + pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE; pkt.ack.nAcks = 0; pkt.info.rxMTU = htonl(rxrpc_rx_mtu); pkt.info.maxMTU = htonl(mtu); @@ -272,7 +273,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, switch (sp->hdr.type) { case RXRPC_PACKET_TYPE_DATA: case RXRPC_PACKET_TYPE_ACK: - rxrpc_conn_retransmit_call(conn, skb); + rxrpc_conn_retransmit_call(conn, skb, + sp->hdr.cid & RXRPC_CHANNELMASK); return 0; case RXRPC_PACKET_TYPE_BUSY: @@ -379,6 +381,48 @@ abort: } /* + * Process delayed final ACKs that we haven't subsumed into a subsequent call. + */ +static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn) +{ + unsigned long j = jiffies, next_j; + unsigned int channel; + bool set; + +again: + next_j = j + LONG_MAX; + set = false; + for (channel = 0; channel < RXRPC_MAXCALLS; channel++) { + struct rxrpc_channel *chan = &conn->channels[channel]; + unsigned long ack_at; + + if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags)) + continue; + + smp_rmb(); /* vs rxrpc_disconnect_client_call */ + ack_at = READ_ONCE(chan->final_ack_at); + + if (time_before(j, ack_at)) { + if (time_before(ack_at, next_j)) { + next_j = ack_at; + set = true; + } + continue; + } + + if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, + &conn->flags)) + rxrpc_conn_retransmit_call(conn, NULL, channel); + } + + j = jiffies; + if (time_before_eq(next_j, j)) + goto again; + if (set) + rxrpc_reduce_conn_timer(conn, next_j); +} + +/* * connection-level event processor */ void rxrpc_process_connection(struct work_struct *work) @@ -394,6 +438,10 @@ void rxrpc_process_connection(struct work_struct *work) if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) rxrpc_secure_connection(conn); + /* Process delayed ACKs whose time has come. */ + if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) + rxrpc_process_delayed_final_acks(conn); + /* go through the conn-level event packets, releasing the ref on this * connection that each one has when we've finished with it */ while ((skb = skb_dequeue(&conn->rx_queue))) { diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c index fe575798592f..1aad04a32d5e 100644 --- a/net/rxrpc/conn_object.c +++ b/net/rxrpc/conn_object.c @@ -20,10 +20,19 @@ /* * Time till a connection expires after last use (in seconds). */ -unsigned int rxrpc_connection_expiry = 10 * 60; +unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60; +unsigned int __read_mostly rxrpc_closed_conn_expiry = 10; static void rxrpc_destroy_connection(struct rcu_head *); +static void rxrpc_connection_timer(struct timer_list *timer) +{ + struct rxrpc_connection *conn = + container_of(timer, struct rxrpc_connection, timer); + + rxrpc_queue_conn(conn); +} + /* * allocate a new connection */ @@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp) INIT_LIST_HEAD(&conn->cache_link); spin_lock_init(&conn->channel_lock); INIT_LIST_HEAD(&conn->waiting_calls); + timer_setup(&conn->timer, &rxrpc_connection_timer, 0); INIT_WORK(&conn->processor, &rxrpc_process_connection); INIT_LIST_HEAD(&conn->proc_link); INIT_LIST_HEAD(&conn->link); @@ -301,21 +311,29 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn) } /* + * Set the service connection reap timer. + */ +static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet, + unsigned long reap_at) +{ + if (rxnet->live) + timer_reduce(&rxnet->service_conn_reap_timer, reap_at); +} + +/* * Release a service connection */ void rxrpc_put_service_conn(struct rxrpc_connection *conn) { - struct rxrpc_net *rxnet; const void *here = __builtin_return_address(0); int n; n = atomic_dec_return(&conn->usage); trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); ASSERTCMP(n, >=, 0); - if (n == 0) { - rxnet = conn->params.local->rxnet; - rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); - } + if (n == 1) + rxrpc_set_service_reap_timer(conn->params.local->rxnet, + jiffies + rxrpc_connection_expiry); } /* @@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu) _net("DESTROY CONN %d", conn->debug_id); + del_timer_sync(&conn->timer); rxrpc_purge_queue(&conn->rx_queue); conn->security->clear(conn); @@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work) { struct rxrpc_connection *conn, *_p; struct rxrpc_net *rxnet = - container_of(to_delayed_work(work), - struct rxrpc_net, service_conn_reaper); - unsigned long reap_older_than, earliest, idle_timestamp, now; + container_of(work, struct rxrpc_net, service_conn_reaper); + unsigned long expire_at, earliest, idle_timestamp, now; LIST_HEAD(graveyard); _enter(""); now = jiffies; - reap_older_than = now - rxrpc_connection_expiry * HZ; - earliest = ULONG_MAX; + earliest = now + MAX_JIFFY_OFFSET; write_lock(&rxnet->conn_lock); list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { @@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work) if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) continue; - idle_timestamp = READ_ONCE(conn->idle_timestamp); - _debug("reap CONN %d { u=%d,t=%ld }", - conn->debug_id, atomic_read(&conn->usage), - (long)reap_older_than - (long)idle_timestamp); - - if (time_after(idle_timestamp, reap_older_than)) { - if (time_before(idle_timestamp, earliest)) - earliest = idle_timestamp; - continue; + if (rxnet->live) { + idle_timestamp = READ_ONCE(conn->idle_timestamp); + expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; + if (conn->params.local->service_closed) + expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ; + + _debug("reap CONN %d { u=%d,t=%ld }", + conn->debug_id, atomic_read(&conn->usage), + (long)expire_at - (long)now); + + if (time_before(now, expire_at)) { + if (time_before(expire_at, earliest)) + earliest = expire_at; + continue; + } } /* The usage count sits at 1 whilst the object is unused on the @@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work) */ if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) continue; + trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0); if (rxrpc_conn_is_client(conn)) BUG(); @@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work) } write_unlock(&rxnet->conn_lock); - if (earliest != ULONG_MAX) { - _debug("reschedule reaper %ld", (long) earliest - now); + if (earliest != now + MAX_JIFFY_OFFSET) { + _debug("reschedule reaper %ld", (long)earliest - (long)now); ASSERT(time_after(earliest, now)); - rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, - earliest - now); + rxrpc_set_service_reap_timer(rxnet, earliest); } while (!list_empty(&graveyard)) { @@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet) rxrpc_destroy_all_client_connections(rxnet); - rxrpc_connection_expiry = 0; - cancel_delayed_work(&rxnet->client_conn_reaper); - rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0); + del_timer_sync(&rxnet->service_conn_reap_timer); + rxrpc_queue_work(&rxnet->service_conn_reaper); flush_workqueue(rxrpc_workqueue); write_lock(&rxnet->conn_lock); diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c index 1b592073ec96..23a5e61d8f79 100644 --- a/net/rxrpc/input.c +++ b/net/rxrpc/input.c @@ -318,16 +318,18 @@ bad_state: static bool rxrpc_receiving_reply(struct rxrpc_call *call) { struct rxrpc_ack_summary summary = { 0 }; + unsigned long now, timo; rxrpc_seq_t top = READ_ONCE(call->tx_top); if (call->ackr_reason) { spin_lock_bh(&call->lock); call->ackr_reason = 0; - call->resend_at = call->expire_at; - call->ack_at = call->expire_at; spin_unlock_bh(&call->lock); - rxrpc_set_timer(call, rxrpc_timer_init_for_reply, - ktime_get_real()); + now = jiffies; + timo = now + MAX_JIFFY_OFFSET; + WRITE_ONCE(call->resend_at, timo); + WRITE_ONCE(call->ack_at, timo); + trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); } if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) @@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb, if (state >= RXRPC_CALL_COMPLETE) return; + if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { + unsigned long timo = READ_ONCE(call->next_req_timo); + unsigned long now, expect_req_by; + + if (timo) { + now = jiffies; + expect_req_by = now + timo; + WRITE_ONCE(call->expect_req_by, expect_req_by); + rxrpc_reduce_call_timer(call, expect_req_by, now, + rxrpc_timer_set_for_idle); + } + } + /* Received data implicitly ACKs all of the request packets we sent * when we're acting as a client. */ @@ -616,6 +631,43 @@ found: } /* + * Process the response to a ping that we sent to find out if we lost an ACK. + * + * If we got back a ping response that indicates a lower tx_top than what we + * had at the time of the ping transmission, we adjudge all the DATA packets + * sent between the response tx_top and the ping-time tx_top to have been lost. + */ +static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call) +{ + rxrpc_seq_t top, bottom, seq; + bool resend = false; + + spin_lock_bh(&call->lock); + + bottom = call->tx_hard_ack + 1; + top = call->acks_lost_top; + if (before(bottom, top)) { + for (seq = bottom; before_eq(seq, top); seq++) { + int ix = seq & RXRPC_RXTX_BUFF_MASK; + u8 annotation = call->rxtx_annotations[ix]; + u8 anno_type = annotation & RXRPC_TX_ANNO_MASK; + + if (anno_type != RXRPC_TX_ANNO_UNACK) + continue; + annotation &= ~RXRPC_TX_ANNO_MASK; + annotation |= RXRPC_TX_ANNO_RETRANS; + call->rxtx_annotations[ix] = annotation; + resend = true; + } + } + + spin_unlock_bh(&call->lock); + + if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events)) + rxrpc_queue_call(call); +} + +/* * Process a ping response. */ static void rxrpc_input_ping_response(struct rxrpc_call *call, @@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call, smp_rmb(); ping_serial = call->ping_serial; + if (orig_serial == call->acks_lost_ping) + rxrpc_input_check_for_lost_ack(call); + if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || before(orig_serial, ping_serial)) return; @@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb, u16 skew) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + unsigned long timo; _enter("%p,%p", call, skb); + timo = READ_ONCE(call->next_rx_timo); + if (timo) { + unsigned long now = jiffies, expect_rx_by; + + expect_rx_by = jiffies + timo; + WRITE_ONCE(call->expect_rx_by, expect_rx_by); + rxrpc_reduce_call_timer(call, expect_rx_by, now, + rxrpc_timer_set_for_normal); + } + switch (sp->hdr.type) { case RXRPC_PACKET_TYPE_DATA: rxrpc_input_data(call, skb, skew); diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c index 1a2d4b112064..c1d9e7fd7448 100644 --- a/net/rxrpc/misc.c +++ b/net/rxrpc/misc.c @@ -21,33 +21,28 @@ unsigned int rxrpc_max_backlog __read_mostly = 10; /* - * Maximum lifetime of a call (in mx). - */ -unsigned int rxrpc_max_call_lifetime = 60 * 1000; - -/* * How long to wait before scheduling ACK generation after seeing a - * packet with RXRPC_REQUEST_ACK set (in ms). + * packet with RXRPC_REQUEST_ACK set (in jiffies). */ -unsigned int rxrpc_requested_ack_delay = 1; +unsigned long rxrpc_requested_ack_delay = 1; /* - * How long to wait before scheduling an ACK with subtype DELAY (in ms). + * How long to wait before scheduling an ACK with subtype DELAY (in jiffies). * * We use this when we've received new data packets. If those packets aren't * all consumed within this time we will send a DELAY ACK if an ACK was not * requested to let the sender know it doesn't need to resend. */ -unsigned int rxrpc_soft_ack_delay = 1 * 1000; +unsigned long rxrpc_soft_ack_delay = HZ; /* - * How long to wait before scheduling an ACK with subtype IDLE (in ms). + * How long to wait before scheduling an ACK with subtype IDLE (in jiffies). * * We use this when we've consumed some previously soft-ACK'd packets when * further packets aren't immediately received to decide when to send an IDLE * ACK let the other end know that it can free up its Tx buffer space. */ -unsigned int rxrpc_idle_ack_delay = 0.5 * 1000; +unsigned long rxrpc_idle_ack_delay = HZ / 2; /* * Receive window size in packets. This indicates the maximum number of @@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4; /* * Time till packet resend (in milliseconds). */ -unsigned int rxrpc_resend_timeout = 4 * 1000; +unsigned long rxrpc_resend_timeout = 4 * HZ; const s8 rxrpc_ack_priority[] = { [0] = 0, diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c index 7edceb8522f5..f18c9248e0d4 100644 --- a/net/rxrpc/net_ns.c +++ b/net/rxrpc/net_ns.c @@ -14,6 +14,24 @@ unsigned int rxrpc_net_id; +static void rxrpc_client_conn_reap_timeout(struct timer_list *timer) +{ + struct rxrpc_net *rxnet = + container_of(timer, struct rxrpc_net, client_conn_reap_timer); + + if (rxnet->live) + rxrpc_queue_work(&rxnet->client_conn_reaper); +} + +static void rxrpc_service_conn_reap_timeout(struct timer_list *timer) +{ + struct rxrpc_net *rxnet = + container_of(timer, struct rxrpc_net, service_conn_reap_timer); + + if (rxnet->live) + rxrpc_queue_work(&rxnet->service_conn_reaper); +} + /* * Initialise a per-network namespace record. */ @@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net) struct rxrpc_net *rxnet = rxrpc_net(net); int ret; + rxnet->live = true; get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); rxnet->epoch |= RXRPC_RANDOM_EPOCH; @@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net) INIT_LIST_HEAD(&rxnet->conn_proc_list); INIT_LIST_HEAD(&rxnet->service_conns); rwlock_init(&rxnet->conn_lock); - INIT_DELAYED_WORK(&rxnet->service_conn_reaper, - rxrpc_service_connection_reaper); + INIT_WORK(&rxnet->service_conn_reaper, + rxrpc_service_connection_reaper); + timer_setup(&rxnet->service_conn_reap_timer, + rxrpc_service_conn_reap_timeout, 0); rxnet->nr_client_conns = 0; rxnet->nr_active_client_conns = 0; @@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net) INIT_LIST_HEAD(&rxnet->waiting_client_conns); INIT_LIST_HEAD(&rxnet->active_client_conns); INIT_LIST_HEAD(&rxnet->idle_client_conns); - INIT_DELAYED_WORK(&rxnet->client_conn_reaper, - rxrpc_discard_expired_client_conns); + INIT_WORK(&rxnet->client_conn_reaper, + rxrpc_discard_expired_client_conns); + timer_setup(&rxnet->client_conn_reap_timer, + rxrpc_client_conn_reap_timeout, 0); INIT_LIST_HEAD(&rxnet->local_endpoints); mutex_init(&rxnet->local_mutex); @@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net) return 0; err_proc: + rxnet->live = false; return ret; } @@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net) { struct rxrpc_net *rxnet = rxrpc_net(net); + rxnet->live = false; rxrpc_destroy_all_calls(rxnet); rxrpc_destroy_all_connections(rxnet); rxrpc_destroy_all_locals(rxnet); diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index f47659c7b224..42410e910aff 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c @@ -33,6 +33,24 @@ struct rxrpc_abort_buffer { }; /* + * Arrange for a keepalive ping a certain time after we last transmitted. This + * lets the far side know we're still interested in this call and helps keep + * the route through any intervening firewall open. + * + * Receiving a response to the ping will prevent the ->expect_rx_by timer from + * expiring. + */ +static void rxrpc_set_keepalive(struct rxrpc_call *call) +{ + unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6; + + keepalive_at += now; + WRITE_ONCE(call->keepalive_at, keepalive_at); + rxrpc_reduce_call_timer(call, keepalive_at, now, + rxrpc_timer_set_for_keepalive); +} + +/* * Fill out an ACK packet. */ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, @@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, /* * Send an ACK call packet. */ -int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) +int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping, + rxrpc_serial_t *_serial) { struct rxrpc_connection *conn = NULL; struct rxrpc_ack_buffer *pkt; @@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) ntohl(pkt->ack.firstPacket), ntohl(pkt->ack.serial), pkt->ack.reason, pkt->ack.nAcks); + if (_serial) + *_serial = serial; if (ping) { call->ping_serial = serial; @@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) call->ackr_seen = top; spin_unlock_bh(&call->lock); } + + rxrpc_set_keepalive(call); } out: @@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb, * ACKs if a DATA packet appears to have been lost. */ if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && - (retrans || + (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || + retrans || call->cong_mode == RXRPC_CALL_SLOW_START || (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), @@ -370,8 +394,23 @@ done: if (whdr.flags & RXRPC_REQUEST_ACK) { call->peer->rtt_last_req = now; trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); + if (call->peer->rtt_usage > 1) { + unsigned long nowj = jiffies, ack_lost_at; + + ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt); + if (ack_lost_at < 1) + ack_lost_at = 1; + + ack_lost_at += nowj; + WRITE_ONCE(call->ack_lost_at, ack_lost_at); + rxrpc_reduce_call_timer(call, ack_lost_at, nowj, + rxrpc_timer_set_for_lost_ack); + } } } + + rxrpc_set_keepalive(call); + _leave(" = %d [%u]", ret, call->peer->maxdata); return ret; diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index 8510a98b87e1..cc21e8db25b0 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c @@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); +#if 0 // TODO: May want to transmit final ACK under some circumstances anyway if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, rxrpc_propose_ack_terminal_ack); - rxrpc_send_ack_packet(call, false); + rxrpc_send_ack_packet(call, false, NULL); } +#endif write_lock_bh(&call->state_lock); @@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) case RXRPC_CALL_SERVER_RECV_REQUEST: call->tx_phase = true; call->state = RXRPC_CALL_SERVER_ACK_REQUEST; - call->ack_at = call->expire_at; + call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; write_unlock_bh(&call->state_lock); rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, rxrpc_propose_ack_processing_op); @@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call) after_eq(top, call->ackr_seen + 2) || (hard_ack == top && after(hard_ack, call->ackr_consumed))) rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, - true, false, + true, true, rxrpc_propose_ack_rotate_rx); - if (call->ackr_reason) - rxrpc_send_ack_packet(call, false); + if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY) + rxrpc_send_ack_packet(call, false, NULL); } } diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c index 7d2595582c09..a1c53ac066a1 100644 --- a/net/rxrpc/sendmsg.c +++ b/net/rxrpc/sendmsg.c @@ -21,22 +21,6 @@ #include <net/af_rxrpc.h> #include "ar-internal.h" -enum rxrpc_command { - RXRPC_CMD_SEND_DATA, /* send data message */ - RXRPC_CMD_SEND_ABORT, /* request abort generation */ - RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ - RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ -}; - -struct rxrpc_send_params { - s64 tx_total_len; /* Total Tx data length (if send data) */ - unsigned long user_call_ID; /* User's call ID */ - u32 abort_code; /* Abort code to Tx (if abort) */ - enum rxrpc_command command : 8; /* The command to implement */ - bool exclusive; /* Shared or exclusive call */ - bool upgrade; /* If the connection is upgradeable */ -}; - /* * Wait for space to appear in the Tx queue or a signal to occur. */ @@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, rxrpc_notify_end_tx_t notify_end_tx) { struct rxrpc_skb_priv *sp = rxrpc_skb(skb); + unsigned long now; rxrpc_seq_t seq = sp->hdr.seq; int ret, ix; u8 annotation = RXRPC_TX_ANNO_UNACK; @@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, break; case RXRPC_CALL_SERVER_ACK_REQUEST: call->state = RXRPC_CALL_SERVER_SEND_REPLY; - call->ack_at = call->expire_at; + now = jiffies; + WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET); if (call->ackr_reason == RXRPC_ACK_DELAY) call->ackr_reason = 0; - __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply, - ktime_get_real()); + trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); if (!last) break; /* Fall through */ @@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, _debug("need instant resend %d", ret); rxrpc_instant_resend(call, ix); } else { - ktime_t now = ktime_get_real(), resend_at; - - resend_at = ktime_add_ms(now, rxrpc_resend_timeout); - - if (ktime_before(resend_at, call->resend_at)) { - call->resend_at = resend_at; - rxrpc_set_timer(call, rxrpc_timer_set_for_send, now); - } + unsigned long now = jiffies, resend_at; + + if (call->peer->rtt_usage > 1) + resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2); + else + resend_at = rxrpc_resend_timeout; + if (resend_at < 1) + resend_at = 1; + + resend_at = now + rxrpc_resend_timeout; + WRITE_ONCE(call->resend_at, resend_at); + rxrpc_reduce_call_timer(call, resend_at, now, + rxrpc_timer_set_for_send); } rxrpc_free_skb(skb, rxrpc_skb_tx_freed); @@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx, do { /* Check to see if there's a ping ACK to reply to. */ if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) - rxrpc_send_ack_packet(call, false); + rxrpc_send_ack_packet(call, false, NULL); if (!skb) { size_t size, chunk, max, space; @@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) if (msg->msg_flags & MSG_CMSG_COMPAT) { if (len != sizeof(u32)) return -EINVAL; - p->user_call_ID = *(u32 *)CMSG_DATA(cmsg); + p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg); } else { if (len != sizeof(unsigned long)) return -EINVAL; - p->user_call_ID = *(unsigned long *) + p->call.user_call_ID = *(unsigned long *) CMSG_DATA(cmsg); } got_user_ID = true; @@ -522,11 +512,24 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) break; case RXRPC_TX_LENGTH: - if (p->tx_total_len != -1 || len != sizeof(__s64)) + if (p->call.tx_total_len != -1 || len != sizeof(__s64)) + return -EINVAL; + p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg); + if (p->call.tx_total_len < 0) return -EINVAL; - p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg); - if (p->tx_total_len < 0) + break; + + case RXRPC_SET_CALL_TIMEOUT: + if (len & 3 || len < 4 || len > 12) return -EINVAL; + memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len); + p->call.nr_timeouts = len / 4; + if (p->call.timeouts.hard > INT_MAX / HZ) + return -ERANGE; + if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000) + return -ERANGE; + if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000) + return -ERANGE; break; default: @@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p) if (!got_user_ID) return -EINVAL; - if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) + if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) return -EINVAL; _leave(" = 0"); return 0; @@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, cp.exclusive = rx->exclusive | p->exclusive; cp.upgrade = p->upgrade; cp.service_id = srx->srx_service; - call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID, - p->tx_total_len, GFP_KERNEL); + call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL); /* The socket is now unlocked */ _leave(" = %p\n", call); @@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) { enum rxrpc_call_state state; struct rxrpc_call *call; + unsigned long now, j; int ret; struct rxrpc_send_params p = { - .tx_total_len = -1, - .user_call_ID = 0, - .abort_code = 0, - .command = RXRPC_CMD_SEND_DATA, - .exclusive = false, - .upgrade = true, + .call.tx_total_len = -1, + .call.user_call_ID = 0, + .call.nr_timeouts = 0, + .abort_code = 0, + .command = RXRPC_CMD_SEND_DATA, + .exclusive = false, + .upgrade = false, }; _enter(""); @@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = -EINVAL; if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) goto error_release_sock; - call = rxrpc_accept_call(rx, p.user_call_ID, NULL); + call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); /* The socket is now unlocked. */ if (IS_ERR(call)) return PTR_ERR(call); - rxrpc_put_call(call, rxrpc_call_put); - return 0; + ret = 0; + goto out_put_unlock; } - call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); + call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); if (!call) { ret = -EBADSLT; if (p.command != RXRPC_CMD_SEND_DATA) @@ -653,14 +657,39 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) goto error_put; } - if (p.tx_total_len != -1) { + if (p.call.tx_total_len != -1) { ret = -EINVAL; if (call->tx_total_len != -1 || call->tx_pending || call->tx_top != 0) goto error_put; - call->tx_total_len = p.tx_total_len; + call->tx_total_len = p.call.tx_total_len; + } + } + + switch (p.call.nr_timeouts) { + case 3: + j = msecs_to_jiffies(p.call.timeouts.normal); + if (p.call.timeouts.normal > 0 && j == 0) + j = 1; + WRITE_ONCE(call->next_rx_timo, j); + /* Fall through */ + case 2: + j = msecs_to_jiffies(p.call.timeouts.idle); + if (p.call.timeouts.idle > 0 && j == 0) + j = 1; + WRITE_ONCE(call->next_req_timo, j); + /* Fall through */ + case 1: + if (p.call.timeouts.hard > 0) { + j = msecs_to_jiffies(p.call.timeouts.hard); + now = jiffies; + j += now; + WRITE_ONCE(call->expect_term_by, j); + rxrpc_reduce_call_timer(call, j, now, + rxrpc_timer_set_for_hard); } + break; } state = READ_ONCE(call->state); @@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) ret = rxrpc_send_data(rx, call, msg, len, NULL); } +out_put_unlock: mutex_unlock(&call->user_mutex); error_put: rxrpc_put_call(call, rxrpc_call_put); diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c index 34c706d2f79c..4a7af7aff37d 100644 --- a/net/rxrpc/sysctl.c +++ b/net/rxrpc/sysctl.c @@ -21,6 +21,8 @@ static const unsigned int four = 4; static const unsigned int thirtytwo = 32; static const unsigned int n_65535 = 65535; static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; +static const unsigned long one_jiffy = 1; +static const unsigned long max_jiffies = MAX_JIFFY_OFFSET; /* * RxRPC operating parameters. @@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; * information on the individual parameters. */ static struct ctl_table rxrpc_sysctl_table[] = { - /* Values measured in milliseconds */ + /* Values measured in milliseconds but used in jiffies */ { .procname = "req_ack_delay", .data = &rxrpc_requested_ack_delay, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec, - .extra1 = (void *)&zero, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, + .extra1 = (void *)&one_jiffy, + .extra2 = (void *)&max_jiffies, }, { .procname = "soft_ack_delay", .data = &rxrpc_soft_ack_delay, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec, - .extra1 = (void *)&one, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, + .extra1 = (void *)&one_jiffy, + .extra2 = (void *)&max_jiffies, }, { .procname = "idle_ack_delay", .data = &rxrpc_idle_ack_delay, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec, - .extra1 = (void *)&one, - }, - { - .procname = "resend_timeout", - .data = &rxrpc_resend_timeout, - .maxlen = sizeof(unsigned int), - .mode = 0644, - .proc_handler = proc_dointvec, - .extra1 = (void *)&one, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, + .extra1 = (void *)&one_jiffy, + .extra2 = (void *)&max_jiffies, }, { .procname = "idle_conn_expiry", .data = &rxrpc_conn_idle_client_expiry, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_ms_jiffies, - .extra1 = (void *)&one, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, + .extra1 = (void *)&one_jiffy, + .extra2 = (void *)&max_jiffies, }, { .procname = "idle_conn_fast_expiry", .data = &rxrpc_conn_idle_client_fast_expiry, - .maxlen = sizeof(unsigned int), + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec_ms_jiffies, - .extra1 = (void *)&one, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, + .extra1 = (void *)&one_jiffy, + .extra2 = (void *)&max_jiffies, }, - - /* Values measured in seconds but used in jiffies */ { - .procname = "max_call_lifetime", - .data = &rxrpc_max_call_lifetime, - .maxlen = sizeof(unsigned int), + .procname = "resend_timeout", + .data = &rxrpc_resend_timeout, + .maxlen = sizeof(unsigned long), .mode = 0644, - .proc_handler = proc_dointvec, - .extra1 = (void *)&one, + .proc_handler = proc_doulongvec_ms_jiffies_minmax, + .extra1 = (void *)&one_jiffy, + .extra2 = (void *)&max_jiffies, }, /* Non-time values */ diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c index 1c40caadcff9..d836f998117b 100644 --- a/net/sched/act_csum.c +++ b/net/sched/act_csum.c @@ -229,6 +229,9 @@ static int tcf_csum_ipv4_udp(struct sk_buff *skb, unsigned int ihl, const struct iphdr *iph; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, @@ -282,6 +285,9 @@ static int tcf_csum_ipv6_udp(struct sk_buff *skb, unsigned int ihl, const struct ipv6hdr *ip6h; u16 ul; + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + return 1; + /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index ab255b421781..ddcf04b4ab43 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -205,13 +205,14 @@ static void tcf_chain_head_change(struct tcf_chain *chain, static void tcf_chain_flush(struct tcf_chain *chain) { - struct tcf_proto *tp; + struct tcf_proto *tp = rtnl_dereference(chain->filter_chain); tcf_chain_head_change(chain, NULL); - while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) { + while (tp) { RCU_INIT_POINTER(chain->filter_chain, tp->next); - tcf_chain_put(chain); tcf_proto_destroy(tp); + tp = rtnl_dereference(chain->filter_chain); + tcf_chain_put(chain); } } @@ -335,7 +336,8 @@ static void tcf_block_put_final(struct work_struct *work) struct tcf_chain *chain, *tmp; rtnl_lock(); - /* Only chain 0 should be still here. */ + + /* At this point, all the chains should have refcnt == 1. */ list_for_each_entry_safe(chain, tmp, &block->chain_list, list) tcf_chain_put(chain); rtnl_unlock(); @@ -343,15 +345,21 @@ static void tcf_block_put_final(struct work_struct *work) } /* XXX: Standalone actions are not allowed to jump to any chain, and bound - * actions should be all removed after flushing. However, filters are now - * destroyed in tc filter workqueue with RTNL lock, they can not race here. + * actions should be all removed after flushing. */ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, struct tcf_block_ext_info *ei) { - struct tcf_chain *chain, *tmp; + struct tcf_chain *chain; - list_for_each_entry_safe(chain, tmp, &block->chain_list, list) + /* Hold a refcnt for all chains, except 0, so that they don't disappear + * while we are iterating. + */ + list_for_each_entry(chain, &block->chain_list, list) + if (chain->index) + tcf_chain_hold(chain); + + list_for_each_entry(chain, &block->chain_list, list) tcf_chain_flush(chain); tcf_block_offload_unbind(block, q, ei); diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index fb680dafac5a..a9f3e317055c 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -382,15 +382,13 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, { struct bpf_prog *fp; char *name = NULL; + bool skip_sw; u32 bpf_fd; bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); + skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW; - if (gen_flags & TCA_CLS_FLAGS_SKIP_SW) - fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, - qdisc_dev(tp->q)); - else - fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS); + fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw); if (IS_ERR(fp)) return PTR_ERR(fp); diff --git a/net/sctp/stream.c b/net/sctp/stream.c index a11db21dc8a0..a20145b3a949 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c @@ -254,6 +254,30 @@ static int sctp_send_reconf(struct sctp_association *asoc, return retval; } +static bool sctp_stream_outq_is_empty(struct sctp_stream *stream, + __u16 str_nums, __be16 *str_list) +{ + struct sctp_association *asoc; + __u16 i; + + asoc = container_of(stream, struct sctp_association, stream); + if (!asoc->outqueue.out_qlen) + return true; + + if (!str_nums) + return false; + + for (i = 0; i < str_nums; i++) { + __u16 sid = ntohs(str_list[i]); + + if (stream->out[sid].ext && + !list_empty(&stream->out[sid].ext->outq)) + return false; + } + + return true; +} + int sctp_send_reset_streams(struct sctp_association *asoc, struct sctp_reset_streams *params) { @@ -317,6 +341,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc, for (i = 0; i < str_nums; i++) nstr_list[i] = htons(str_list[i]); + if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) { + retval = -EAGAIN; + goto out; + } + chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in); kfree(nstr_list); @@ -377,6 +406,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc) if (asoc->strreset_outstanding) return -EINPROGRESS; + if (!sctp_outq_is_empty(&asoc->outqueue)) + return -EAGAIN; + chunk = sctp_make_strreset_tsnreq(asoc); if (!chunk) return -ENOMEM; @@ -563,7 +595,7 @@ struct sctp_chunk *sctp_process_strreset_outreq( flags = SCTP_STREAM_RESET_INCOMING_SSN; } - nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2; + nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16); if (nums) { str_p = outreq->list_of_streams; for (i = 0; i < nums; i++) { @@ -627,7 +659,7 @@ struct sctp_chunk *sctp_process_strreset_inreq( goto out; } - nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2; + nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16); str_p = inreq->list_of_streams; for (i = 0; i < nums; i++) { if (ntohs(str_p[i]) >= stream->outcnt) { @@ -636,6 +668,12 @@ struct sctp_chunk *sctp_process_strreset_inreq( } } + if (!sctp_stream_outq_is_empty(stream, nums, str_p)) { + result = SCTP_STRRESET_IN_PROGRESS; + asoc->strreset_inseq--; + goto err; + } + chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0); if (!chunk) goto out; @@ -687,12 +725,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( i = asoc->strreset_inseq - request_seq - 1; result = asoc->strreset_result[i]; if (result == SCTP_STRRESET_PERFORMED) { - next_tsn = asoc->next_tsn; + next_tsn = asoc->ctsn_ack_point + 1; init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1; } goto err; } + + if (!sctp_outq_is_empty(&asoc->outqueue)) { + result = SCTP_STRRESET_IN_PROGRESS; + goto err; + } + asoc->strreset_inseq++; if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ)) @@ -703,9 +747,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( goto out; } - /* G3: The same processing as though a SACK chunk with no gap report - * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were - * received MUST be performed. + /* G4: The same processing as though a FWD-TSN chunk (as defined in + * [RFC3758]) with all streams affected and a new cumulative TSN + * ACK of the Receiver's Next TSN minus 1 were received MUST be + * performed. */ max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen); @@ -720,10 +765,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq( sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, init_tsn, GFP_ATOMIC); - /* G4: The same processing as though a FWD-TSN chunk (as defined in - * [RFC3758]) with all streams affected and a new cumulative TSN - * ACK of the Receiver's Next TSN minus 1 were received MUST be - * performed. + /* G3: The same processing as though a SACK chunk with no gap report + * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were + * received MUST be performed. */ sctp_outq_free(&asoc->outqueue); @@ -927,7 +971,8 @@ struct sctp_chunk *sctp_process_strreset_resp( outreq = (struct sctp_strreset_outreq *)req; str_p = outreq->list_of_streams; - nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2; + nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / + sizeof(__u16); if (result == SCTP_STRRESET_PERFORMED) { if (nums) { @@ -956,7 +1001,8 @@ struct sctp_chunk *sctp_process_strreset_resp( inreq = (struct sctp_strreset_inreq *)req; str_p = inreq->list_of_streams; - nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2; + nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / + sizeof(__u16); *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, nums, str_p, GFP_ATOMIC); @@ -975,6 +1021,7 @@ struct sctp_chunk *sctp_process_strreset_resp( if (result == SCTP_STRRESET_PERFORMED) { __u32 mtsn = sctp_tsnmap_get_max_tsn_seen( &asoc->peer.tsn_map); + LIST_HEAD(temp); sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn); sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); @@ -983,7 +1030,13 @@ struct sctp_chunk *sctp_process_strreset_resp( SCTP_TSN_MAP_INITIAL, stsn, GFP_ATOMIC); + /* Clean up sacked and abandoned queues only. As the + * out_chunk_list may not be empty, splice it to temp, + * then get it back after sctp_outq_free is done. + */ + list_splice_init(&asoc->outqueue.out_chunk_list, &temp); sctp_outq_free(&asoc->outqueue); + list_splice_init(&temp, &asoc->outqueue.out_chunk_list); asoc->next_tsn = rtsn; asoc->ctsn_ack_point = asoc->next_tsn - 1; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 2578fbd95664..94f21116dac5 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -562,7 +562,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) { struct smc_connection *conn = &smc->conn; struct smc_link_group *lgr = conn->lgr; - struct smc_buf_desc *buf_desc = NULL; + struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM); struct list_head *buf_list; int bufsize, bufsize_short; int sk_buf_size; @@ -575,7 +575,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_rmb) /* use socket send buffer size (w/o overhead) as start value */ sk_buf_size = smc->sk.sk_sndbuf / 2; - for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); + for (bufsize_short = smc_compress_bufsize(sk_buf_size); bufsize_short >= 0; bufsize_short--) { if (is_rmb) { diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 7b1ee5a0b03c..73165e9ca5bf 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -855,11 +855,13 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g return stat; if (integ_len > buf->len) return stat; - if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) - BUG(); + if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) { + WARN_ON_ONCE(1); + return stat; + } /* copy out mic... */ if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) - BUG(); + return stat; if (mic.len > RPC_MAX_AUTH_SIZE) return stat; mic.data = kmalloc(mic.len, GFP_KERNEL); @@ -1611,8 +1613,10 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) BUG_ON(integ_len % 4); *p++ = htonl(integ_len); *p++ = htonl(gc->gc_seq); - if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) - BUG(); + if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) { + WARN_ON_ONCE(1); + goto out_err; + } if (resbuf->tail[0].iov_base == NULL) { if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE) goto out_err; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 71de77bd4423..e8e0831229cf 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -250,9 +250,9 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) svc_xprt_received(new); } -int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, - struct net *net, const int family, - const unsigned short port, int flags) +static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, + struct net *net, const int family, + const unsigned short port, int flags) { struct svc_xprt_class *xcl; @@ -380,7 +380,6 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) struct svc_pool *pool; struct svc_rqst *rqstp = NULL; int cpu; - bool queued = false; if (!svc_xprt_has_something_to_do(xprt)) goto out; @@ -401,58 +400,25 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) atomic_long_inc(&pool->sp_stats.packets); -redo_search: + dprintk("svc: transport %p put into queue\n", xprt); + spin_lock_bh(&pool->sp_lock); + list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); + pool->sp_stats.sockets_queued++; + spin_unlock_bh(&pool->sp_lock); + /* find a thread for this xprt */ rcu_read_lock(); list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { - /* Do a lockless check first */ - if (test_bit(RQ_BUSY, &rqstp->rq_flags)) + if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) continue; - - /* - * Once the xprt has been queued, it can only be dequeued by - * the task that intends to service it. All we can do at that - * point is to try to wake this thread back up so that it can - * do so. - */ - if (!queued) { - spin_lock_bh(&rqstp->rq_lock); - if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) { - /* already busy, move on... */ - spin_unlock_bh(&rqstp->rq_lock); - continue; - } - - /* this one will do */ - rqstp->rq_xprt = xprt; - svc_xprt_get(xprt); - spin_unlock_bh(&rqstp->rq_lock); - } - rcu_read_unlock(); - atomic_long_inc(&pool->sp_stats.threads_woken); wake_up_process(rqstp->rq_task); - put_cpu(); - goto out; - } - rcu_read_unlock(); - - /* - * We didn't find an idle thread to use, so we need to queue the xprt. - * Do so and then search again. If we find one, we can't hook this one - * up to it directly but we can wake the thread up in the hopes that it - * will pick it up once it searches for a xprt to service. - */ - if (!queued) { - queued = true; - dprintk("svc: transport %p put into queue\n", xprt); - spin_lock_bh(&pool->sp_lock); - list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); - pool->sp_stats.sockets_queued++; - spin_unlock_bh(&pool->sp_lock); - goto redo_search; + goto out_unlock; } + set_bit(SP_CONGESTED, &pool->sp_flags); rqstp = NULL; +out_unlock: + rcu_read_unlock(); put_cpu(); out: trace_svc_xprt_do_enqueue(xprt, rqstp); @@ -721,38 +687,25 @@ rqst_should_sleep(struct svc_rqst *rqstp) static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) { - struct svc_xprt *xprt; struct svc_pool *pool = rqstp->rq_pool; long time_left = 0; /* rq_xprt should be clear on entry */ WARN_ON_ONCE(rqstp->rq_xprt); - /* Normally we will wait up to 5 seconds for any required - * cache information to be provided. - */ - rqstp->rq_chandle.thread_wait = 5*HZ; - - xprt = svc_xprt_dequeue(pool); - if (xprt) { - rqstp->rq_xprt = xprt; - - /* As there is a shortage of threads and this request - * had to be queued, don't allow the thread to wait so - * long for cache updates. - */ - rqstp->rq_chandle.thread_wait = 1*HZ; - clear_bit(SP_TASK_PENDING, &pool->sp_flags); - return xprt; - } + rqstp->rq_xprt = svc_xprt_dequeue(pool); + if (rqstp->rq_xprt) + goto out_found; /* * We have to be able to interrupt this wait * to bring down the daemons ... */ set_current_state(TASK_INTERRUPTIBLE); + smp_mb__before_atomic(); + clear_bit(SP_CONGESTED, &pool->sp_flags); clear_bit(RQ_BUSY, &rqstp->rq_flags); - smp_mb(); + smp_mb__after_atomic(); if (likely(rqst_should_sleep(rqstp))) time_left = schedule_timeout(timeout); @@ -761,13 +714,11 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) try_to_freeze(); - spin_lock_bh(&rqstp->rq_lock); set_bit(RQ_BUSY, &rqstp->rq_flags); - spin_unlock_bh(&rqstp->rq_lock); - - xprt = rqstp->rq_xprt; - if (xprt != NULL) - return xprt; + smp_mb__after_atomic(); + rqstp->rq_xprt = svc_xprt_dequeue(pool); + if (rqstp->rq_xprt) + goto out_found; if (!time_left) atomic_long_inc(&pool->sp_stats.threads_timedout); @@ -775,6 +726,15 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) if (signalled() || kthread_should_stop()) return ERR_PTR(-EINTR); return ERR_PTR(-EAGAIN); +out_found: + /* Normally we will wait up to 5 seconds for any required + * cache information to be provided. + */ + if (!test_bit(SP_CONGESTED, &pool->sp_flags)) + rqstp->rq_chandle.thread_wait = 5*HZ; + else + rqstp->rq_chandle.thread_wait = 1*HZ; + return rqstp->rq_xprt; } static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 992594b7cc6b..af7893501e40 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -133,6 +133,10 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, if (ret) goto out_err; + /* Bump page refcnt so Send completion doesn't release + * the rq_buffer before all retransmits are complete. + */ + get_page(virt_to_page(rqst->rq_buffer)); ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); if (ret) goto out_unmap; @@ -165,7 +169,6 @@ xprt_rdma_bc_allocate(struct rpc_task *task) return -EINVAL; } - /* svc_rdma_sendto releases this page */ page = alloc_page(RPCRDMA_DEF_GFP); if (!page) return -ENOMEM; @@ -184,6 +187,7 @@ xprt_rdma_bc_free(struct rpc_task *task) { struct rpc_rqst *rqst = task->tk_rqstp; + put_page(virt_to_page(rqst->rq_buffer)); kfree(rqst->rq_rbuffer); } diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 5caf8e722a11..46ec069150d5 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -290,6 +290,7 @@ static void qp_event_handler(struct ib_event *event, void *context) ib_event_msg(event->event), event->event, event->element.qp); set_bit(XPT_CLOSE, &xprt->xpt_flags); + svc_xprt_enqueue(xprt); break; } } @@ -322,8 +323,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) goto out; - svc_xprt_enqueue(&xprt->sc_xprt); - goto out; + goto out_enqueue; flushed: if (wc->status != IB_WC_WR_FLUSH_ERR) @@ -333,6 +333,8 @@ flushed: set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); svc_rdma_put_context(ctxt, 1); +out_enqueue: + svc_xprt_enqueue(&xprt->sc_xprt); out: svc_xprt_put(&xprt->sc_xprt); } @@ -358,6 +360,7 @@ void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) if (unlikely(wc->status != IB_WC_SUCCESS)) { set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); + svc_xprt_enqueue(&xprt->sc_xprt); if (wc->status != IB_WC_WR_FLUSH_ERR) pr_err("svcrdma: Send: %s (%u/0x%x)\n", ib_wc_status_msg(wc->status), @@ -569,8 +572,10 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id, case RDMA_CM_EVENT_DEVICE_REMOVAL: dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", xprt, cma_id); - if (xprt) + if (xprt) { set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); + svc_xprt_enqueue(&xprt->sc_xprt); + } break; default: diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 391775e3575c..56573dc85709 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -797,9 +797,13 @@ static void vmci_transport_handle_detach(struct sock *sk) /* We should not be sending anymore since the peer won't be * there to receive, but we can still receive if there is data - * left in our consume queue. + * left in our consume queue. If the local endpoint is a host, + * we can't call vsock_stream_has_data, since that may block, + * but a host endpoint can't read data once the VM has + * detached, so there is no available data in that case. */ - if (vsock_stream_has_data(vsk) <= 0) { + if (vsk->local_addr.svm_cid == VMADDR_CID_HOST || + vsock_stream_has_data(vsk) <= 0) { sk->sk_state = TCP_CLOSE; if (sk->sk_state == TCP_SYN_SENT) { @@ -2144,7 +2148,7 @@ module_exit(vmci_transport_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); -MODULE_VERSION("1.0.4.0-k"); +MODULE_VERSION("1.0.5.0-k"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("vmware_vsock"); MODULE_ALIAS_NETPROTO(PF_VSOCK); diff --git a/scripts/Makefile.build b/scripts/Makefile.build index f171225383cc..65ea1e6aaaf6 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -100,6 +100,10 @@ ifneq ($(KBUILD_CHECKSRC),0) endif endif +ifneq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),) + cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $< ; +endif + # Do section mismatch analysis for each module/built-in.o ifdef CONFIG_DEBUG_SECTION_MISMATCH cmd_secanalysis = ; scripts/mod/modpost $@ @@ -283,6 +287,7 @@ define rule_cc_o_c $(call echo-cmd,checksrc) $(cmd_checksrc) \ $(call cmd_and_fixdep,cc_o_c) \ $(cmd_modversions_c) \ + $(cmd_checkdoc) \ $(call echo-cmd,objtool) $(cmd_objtool) \ $(call echo-cmd,record_mcount) $(cmd_record_mcount) endef diff --git a/scripts/kernel-doc b/scripts/kernel-doc index 7bd52b8f63d4..bd29a92b4b48 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -58,6 +58,7 @@ Output format selection (mutually exclusive): -man Output troff manual page format. This is the default. -rst Output reStructuredText format. -text Output plain text format. + -none Do not output documentation, only warnings. Output selection (mutually exclusive): -export Only output documentation for symbols that have been @@ -532,6 +533,8 @@ while ($ARGV[0] =~ m/^-(.*)/) { $output_mode = "gnome"; @highlights = @highlights_gnome; $blankline = $blankline_gnome; + } elsif ($cmd eq "-none") { + $output_mode = "none"; } elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document $modulename = shift @ARGV; } elsif ($cmd eq "-function") { # to only output specific functions @@ -2117,6 +2120,24 @@ sub output_blockhead_list(%) { } } + +## none mode output functions + +sub output_function_none(%) { +} + +sub output_enum_none(%) { +} + +sub output_typedef_none(%) { +} + +sub output_struct_none(%) { +} + +sub output_blockhead_none(%) { +} + ## # generic output function for all types (function, struct/union, typedef, enum); # calls the generated, variable output_ function name based on @@ -3143,7 +3164,9 @@ sub process_file($) { } } if ($initial_section_counter == $section_counter) { - print STDERR "${file}:1: warning: no structured comments found\n"; + if ($output_mode ne "none") { + print STDERR "${file}:1: warning: no structured comments found\n"; + } if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) { print STDERR " Was looking for '$_'.\n" for keys %function_table; } diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index caaf51dda648..8542e9a55e1b 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -533,7 +533,7 @@ static ssize_t ns_revision_read(struct file *file, char __user *buf, long last_read; int avail; - mutex_lock(&rev->ns->lock); + mutex_lock_nested(&rev->ns->lock, rev->ns->level); last_read = rev->last_read; if (last_read == rev->ns->revision) { mutex_unlock(&rev->ns->lock); @@ -543,7 +543,7 @@ static ssize_t ns_revision_read(struct file *file, char __user *buf, last_read != READ_ONCE(rev->ns->revision))) return -ERESTARTSYS; - mutex_lock(&rev->ns->lock); + mutex_lock_nested(&rev->ns->lock, rev->ns->level); } avail = sprintf(buffer, "%ld\n", rev->ns->revision); @@ -577,7 +577,7 @@ static unsigned int ns_revision_poll(struct file *file, poll_table *pt) unsigned int mask = 0; if (rev) { - mutex_lock(&rev->ns->lock); + mutex_lock_nested(&rev->ns->lock, rev->ns->level); poll_wait(file, &rev->ns->wait, pt); if (rev->last_read < rev->ns->revision) mask |= POLLIN | POLLRDNORM; @@ -1643,7 +1643,7 @@ static int ns_mkdir_op(struct inode *dir, struct dentry *dentry, umode_t mode) */ inode_unlock(dir); error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count); - mutex_lock(&parent->lock); + mutex_lock_nested(&parent->lock, parent->level); inode_lock_nested(dir, I_MUTEX_PARENT); if (error) goto out; @@ -1692,7 +1692,7 @@ static int ns_rmdir_op(struct inode *dir, struct dentry *dentry) inode_unlock(dir); inode_unlock(dentry->d_inode); - mutex_lock(&parent->lock); + mutex_lock_nested(&parent->lock, parent->level); ns = aa_get_ns(__aa_findn_ns(&parent->sub_ns, dentry->d_name.name, dentry->d_name.len)); if (!ns) { @@ -1747,7 +1747,7 @@ void __aafs_ns_rmdir(struct aa_ns *ns) __aafs_profile_rmdir(child); list_for_each_entry(sub, &ns->sub_ns, base.list) { - mutex_lock(&sub->lock); + mutex_lock_nested(&sub->lock, sub->level); __aafs_ns_rmdir(sub); mutex_unlock(&sub->lock); } @@ -1877,7 +1877,7 @@ int __aafs_ns_mkdir(struct aa_ns *ns, struct dentry *parent, const char *name, /* subnamespaces */ list_for_each_entry(sub, &ns->sub_ns, base.list) { - mutex_lock(&sub->lock); + mutex_lock_nested(&sub->lock, sub->level); error = __aafs_ns_mkdir(sub, ns_subns_dir(ns), NULL, NULL); mutex_unlock(&sub->lock); if (error) @@ -1921,7 +1921,7 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns) /* is next namespace a child */ if (!list_empty(&ns->sub_ns)) { next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list); - mutex_lock(&next->lock); + mutex_lock_nested(&next->lock, next->level); return next; } @@ -1931,7 +1931,7 @@ static struct aa_ns *__next_ns(struct aa_ns *root, struct aa_ns *ns) mutex_unlock(&ns->lock); next = list_next_entry(ns, base.list); if (!list_entry_is_head(next, &parent->sub_ns, base.list)) { - mutex_lock(&next->lock); + mutex_lock_nested(&next->lock, next->level); return next; } ns = parent; @@ -2039,7 +2039,7 @@ static void *p_start(struct seq_file *f, loff_t *pos) f->private = root; /* find the first profile */ - mutex_lock(&root->lock); + mutex_lock_nested(&root->lock, root->level); profile = __first_profile(root, root); /* skip to position */ @@ -2491,7 +2491,7 @@ static int __init aa_create_aafs(void) ns_subrevision(root_ns) = dent; /* policy tree referenced by magic policy symlink */ - mutex_lock(&root_ns->lock); + mutex_lock_nested(&root_ns->lock, root_ns->level); error = __aafs_ns_mkdir(root_ns, aafs_mnt->mnt_root, ".policy", aafs_mnt->mnt_root); mutex_unlock(&root_ns->lock); diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index dd754b7850a8..04ba9d0718ea 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -305,6 +305,7 @@ static int change_profile_perms(struct aa_profile *profile, * __attach_match_ - find an attachment match * @name - to match against (NOT NULL) * @head - profile list to walk (NOT NULL) + * @info - info message if there was an error (NOT NULL) * * Do a linear search on the profiles in the list. There is a matching * preference where an exact match is preferred over a name which uses @@ -316,28 +317,46 @@ static int change_profile_perms(struct aa_profile *profile, * Returns: profile or NULL if no match found */ static struct aa_profile *__attach_match(const char *name, - struct list_head *head) + struct list_head *head, + const char **info) { int len = 0; + bool conflict = false; struct aa_profile *profile, *candidate = NULL; list_for_each_entry_rcu(profile, head, base.list) { - if (profile->label.flags & FLAG_NULL) + if (profile->label.flags & FLAG_NULL && + &profile->label == ns_unconfined(profile->ns)) continue; - if (profile->xmatch && profile->xmatch_len > len) { - unsigned int state = aa_dfa_match(profile->xmatch, - DFA_START, name); - u32 perm = dfa_user_allow(profile->xmatch, state); - /* any accepting state means a valid match. */ - if (perm & MAY_EXEC) { - candidate = profile; - len = profile->xmatch_len; + + if (profile->xmatch) { + if (profile->xmatch_len == len) { + conflict = true; + continue; + } else if (profile->xmatch_len > len) { + unsigned int state; + u32 perm; + + state = aa_dfa_match(profile->xmatch, + DFA_START, name); + perm = dfa_user_allow(profile->xmatch, state); + /* any accepting state means a valid match. */ + if (perm & MAY_EXEC) { + candidate = profile; + len = profile->xmatch_len; + conflict = false; + } } } else if (!strcmp(profile->base.name, name)) /* exact non-re match, no more searching required */ return profile; } + if (conflict) { + *info = "conflicting profile attachments"; + return NULL; + } + return candidate; } @@ -346,16 +365,17 @@ static struct aa_profile *__attach_match(const char *name, * @ns: the current namespace (NOT NULL) * @list: list to search (NOT NULL) * @name: the executable name to match against (NOT NULL) + * @info: info message if there was an error * * Returns: label or NULL if no match found */ static struct aa_label *find_attach(struct aa_ns *ns, struct list_head *list, - const char *name) + const char *name, const char **info) { struct aa_profile *profile; rcu_read_lock(); - profile = aa_get_profile(__attach_match(name, list)); + profile = aa_get_profile(__attach_match(name, list, info)); rcu_read_unlock(); return profile ? &profile->label : NULL; @@ -448,11 +468,11 @@ static struct aa_label *x_to_label(struct aa_profile *profile, if (xindex & AA_X_CHILD) /* released by caller */ new = find_attach(ns, &profile->base.profiles, - name); + name, info); else /* released by caller */ new = find_attach(ns, &ns->base.profiles, - name); + name, info); *lookupname = name; break; } @@ -516,7 +536,7 @@ static struct aa_label *profile_transition(struct aa_profile *profile, if (profile_unconfined(profile)) { new = find_attach(profile->ns, &profile->ns->base.profiles, - name); + name, &info); if (new) { AA_DEBUG("unconfined attached to new label"); return new; @@ -541,9 +561,21 @@ static struct aa_label *profile_transition(struct aa_profile *profile, } } else if (COMPLAIN_MODE(profile)) { /* no exec permission - learning mode */ - struct aa_profile *new_profile = aa_new_null_profile(profile, - false, name, - GFP_ATOMIC); + struct aa_profile *new_profile = NULL; + char *n = kstrdup(name, GFP_ATOMIC); + + if (n) { + /* name is ptr into buffer */ + long pos = name - buffer; + /* break per cpu buffer hold */ + put_buffers(buffer); + new_profile = aa_new_null_profile(profile, false, n, + GFP_KERNEL); + get_buffers(buffer); + name = buffer + pos; + strcpy((char *)name, n); + kfree(n); + } if (!new_profile) { error = -ENOMEM; info = "could not create null profile"; diff --git a/security/apparmor/file.c b/security/apparmor/file.c index 3382518b87fa..e79bf44396a3 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -226,18 +226,12 @@ static u32 map_old_perms(u32 old) struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state, struct path_cond *cond) { - struct aa_perms perms; - /* FIXME: change over to new dfa format * currently file perms are encoded in the dfa, new format * splits the permissions from the dfa. This mapping can be * done at profile load */ - perms.deny = 0; - perms.kill = perms.stop = 0; - perms.complain = perms.cond = 0; - perms.hide = 0; - perms.prompt = 0; + struct aa_perms perms = { }; if (uid_eq(current_fsuid(), cond->uid)) { perms.allow = map_old_perms(dfa_user_allow(dfa, state)); diff --git a/security/apparmor/label.c b/security/apparmor/label.c index ad28e03a6f30..324fe5c60f87 100644 --- a/security/apparmor/label.c +++ b/security/apparmor/label.c @@ -2115,7 +2115,7 @@ void __aa_labelset_update_subtree(struct aa_ns *ns) __labelset_update(ns); list_for_each_entry(child, &ns->sub_ns, base.list) { - mutex_lock(&child->lock); + mutex_lock_nested(&child->lock, child->level); __aa_labelset_update_subtree(child); mutex_unlock(&child->lock); } diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 08ca26bcca77..4d5e98e49d5e 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -317,14 +317,11 @@ static u32 map_other(u32 x) void aa_compute_perms(struct aa_dfa *dfa, unsigned int state, struct aa_perms *perms) { - perms->deny = 0; - perms->kill = perms->stop = 0; - perms->complain = perms->cond = 0; - perms->hide = 0; - perms->prompt = 0; - perms->allow = dfa_user_allow(dfa, state); - perms->audit = dfa_user_audit(dfa, state); - perms->quiet = dfa_user_quiet(dfa, state); + *perms = (struct aa_perms) { + .allow = dfa_user_allow(dfa, state), + .audit = dfa_user_audit(dfa, state), + .quiet = dfa_user_quiet(dfa, state), + }; /* for v5 perm mapping in the policydb, the other set is used * to extend the general perm set @@ -426,7 +423,6 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms, void (*cb)(struct audit_buffer *, void *)) { int type, error; - bool stop = false; u32 denied = request & (~perms->allow | perms->deny); if (likely(!denied)) { @@ -447,8 +443,6 @@ int aa_check_perms(struct aa_profile *profile, struct aa_perms *perms, else type = AUDIT_APPARMOR_DENIED; - if (denied & perms->stop) - stop = true; if (denied == (denied & perms->hide)) error = -ENOENT; diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 17893fde4487..9a65eeaf7dfa 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -846,7 +846,7 @@ module_param_call(audit, param_set_audit, param_get_audit, /* Determines if audit header is included in audited messages. This * provides more context if the audit daemon is not running */ -bool aa_g_audit_header = 1; +bool aa_g_audit_header = true; module_param_named(audit_header, aa_g_audit_header, aabool, S_IRUSR | S_IWUSR); @@ -871,7 +871,7 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR); * DEPRECATED: read only as strict checking of load is always done now * that none root users (user namespaces) can load policy. */ -bool aa_g_paranoid_load = 1; +bool aa_g_paranoid_load = true; module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO); /* Boot time disable flag */ @@ -1119,7 +1119,7 @@ static int __init apparmor_init(void) if (!apparmor_enabled || !security_module_enable("apparmor")) { aa_info_message("AppArmor disabled by boot time parameter"); - apparmor_enabled = 0; + apparmor_enabled = false; return 0; } @@ -1175,7 +1175,7 @@ alloc_out: aa_destroy_aafs(); aa_teardown_dfa_engine(); - apparmor_enabled = 0; + apparmor_enabled = false; return error; } diff --git a/security/apparmor/mount.c b/security/apparmor/mount.c index 82a64b58041d..ed9b4d0f9f7e 100644 --- a/security/apparmor/mount.c +++ b/security/apparmor/mount.c @@ -216,13 +216,12 @@ static unsigned int match_mnt_flags(struct aa_dfa *dfa, unsigned int state, static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa, unsigned int state) { - struct aa_perms perms; - - perms.kill = 0; - perms.allow = dfa_user_allow(dfa, state); - perms.audit = dfa_user_audit(dfa, state); - perms.quiet = dfa_user_quiet(dfa, state); - perms.xindex = dfa_user_xindex(dfa, state); + struct aa_perms perms = { + .allow = dfa_user_allow(dfa, state), + .audit = dfa_user_audit(dfa, state), + .quiet = dfa_user_quiet(dfa, state), + .xindex = dfa_user_xindex(dfa, state), + }; return perms; } diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 4243b0c3f0e4..b0b58848c248 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -502,7 +502,7 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, bool hat, { struct aa_profile *p, *profile; const char *bname; - char *name; + char *name = NULL; AA_BUG(!parent); @@ -545,7 +545,7 @@ name: profile->file.dfa = aa_get_dfa(nulldfa); profile->policy.dfa = aa_get_dfa(nulldfa); - mutex_lock(&profile->ns->lock); + mutex_lock_nested(&profile->ns->lock, profile->ns->level); p = __find_child(&parent->base.profiles, bname); if (p) { aa_free_profile(profile); @@ -562,6 +562,7 @@ out: return profile; fail: + kfree(name); aa_free_profile(profile); return NULL; } @@ -905,7 +906,7 @@ ssize_t aa_replace_profiles(struct aa_ns *policy_ns, struct aa_label *label, } else ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label)); - mutex_lock(&ns->lock); + mutex_lock_nested(&ns->lock, ns->level); /* check for duplicate rawdata blobs: space and file dedup */ list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) { if (aa_rawdata_eq(rawdata_ent, udata)) { @@ -1116,13 +1117,13 @@ ssize_t aa_remove_profiles(struct aa_ns *policy_ns, struct aa_label *subj, if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ - mutex_lock(&ns->parent->lock); + mutex_lock_nested(&ns->parent->lock, ns->level); __aa_remove_ns(ns); __aa_bump_ns_revision(ns); mutex_unlock(&ns->parent->lock); } else { /* remove profile */ - mutex_lock(&ns->lock); + mutex_lock_nested(&ns->lock, ns->level); profile = aa_get_profile(__lookup_profile(&ns->base, name)); if (!profile) { error = -ENOENT; diff --git a/security/apparmor/policy_ns.c b/security/apparmor/policy_ns.c index 62a3589c62ab..b1e629cba70b 100644 --- a/security/apparmor/policy_ns.c +++ b/security/apparmor/policy_ns.c @@ -256,7 +256,8 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name, ns = alloc_ns(parent->base.hname, name); if (!ns) return NULL; - mutex_lock(&ns->lock); + ns->level = parent->level + 1; + mutex_lock_nested(&ns->lock, ns->level); error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir); if (error) { AA_ERROR("Failed to create interface for ns %s\n", @@ -266,7 +267,6 @@ static struct aa_ns *__aa_create_ns(struct aa_ns *parent, const char *name, return ERR_PTR(error); } ns->parent = aa_get_ns(parent); - ns->level = parent->level + 1; list_add_rcu(&ns->base.list, &parent->sub_ns); /* add list ref */ aa_get_ns(ns); @@ -313,7 +313,7 @@ struct aa_ns *aa_prepare_ns(struct aa_ns *parent, const char *name) { struct aa_ns *ns; - mutex_lock(&parent->lock); + mutex_lock_nested(&parent->lock, parent->level); /* try and find the specified ns and if it doesn't exist create it */ /* released by caller */ ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name)); @@ -336,7 +336,7 @@ static void destroy_ns(struct aa_ns *ns) if (!ns) return; - mutex_lock(&ns->lock); + mutex_lock_nested(&ns->lock, ns->level); /* release all profiles in this namespace */ __aa_profile_list_release(&ns->base.profiles); diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 4ede87c30f8b..59a1a25b7d43 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -157,7 +157,7 @@ static void do_loaddata_free(struct work_struct *work) struct aa_ns *ns = aa_get_ns(d->ns); if (ns) { - mutex_lock(&ns->lock); + mutex_lock_nested(&ns->lock, ns->level); __aa_fs_remove_rawdata(d); mutex_unlock(&ns->lock); aa_put_ns(ns); diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c index d8bc842594ed..cf4d234febe9 100644 --- a/security/apparmor/resource.c +++ b/security/apparmor/resource.c @@ -47,7 +47,7 @@ static void audit_cb(struct audit_buffer *ab, void *va) /** * audit_resource - audit setting resource limit * @profile: profile being enforced (NOT NULL) - * @resoure: rlimit being auditing + * @resource: rlimit being auditing * @value: value being set * @error: error value * @@ -128,7 +128,7 @@ int aa_task_setrlimit(struct aa_label *label, struct task_struct *task, error = fn_for_each(label, profile, audit_resource(profile, resource, new_rlim->rlim_max, peer, - "cap_sys_resoure", -EACCES)); + "cap_sys_resource", -EACCES)); else error = fn_for_each_confined(label, profile, profile_setrlimit(profile, resource, new_rlim)); diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c index ec7dfa02c051..65fbcf3c32c7 100644 --- a/security/integrity/ima/ima_appraise.c +++ b/security/integrity/ima/ima_appraise.c @@ -320,6 +320,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) if (iint->flags & IMA_DIGSIG) return; + if (iint->ima_file_status != INTEGRITY_PASS) + return; + rc = ima_collect_measurement(iint, file, NULL, 0, ima_hash_algo); if (rc < 0) return; diff --git a/security/keys/gc.c b/security/keys/gc.c index afb3a9175d76..6713fee893fb 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -32,7 +32,7 @@ DECLARE_WORK(key_gc_work, key_garbage_collector); static void key_gc_timer_func(unsigned long); static DEFINE_TIMER(key_gc_timer, key_gc_timer_func); -static time_t key_gc_next_run = LONG_MAX; +static time64_t key_gc_next_run = TIME64_MAX; static struct key_type *key_gc_dead_keytype; static unsigned long key_gc_flags; @@ -53,12 +53,12 @@ struct key_type key_type_dead = { * Schedule a garbage collection run. * - time precision isn't particularly important */ -void key_schedule_gc(time_t gc_at) +void key_schedule_gc(time64_t gc_at) { unsigned long expires; - time_t now = current_kernel_time().tv_sec; + time64_t now = ktime_get_real_seconds(); - kenter("%ld", gc_at - now); + kenter("%lld", gc_at - now); if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { kdebug("IMMEDIATE"); @@ -87,7 +87,7 @@ void key_schedule_gc_links(void) static void key_gc_timer_func(unsigned long data) { kenter(""); - key_gc_next_run = LONG_MAX; + key_gc_next_run = TIME64_MAX; key_schedule_gc_links(); } @@ -184,11 +184,11 @@ static void key_garbage_collector(struct work_struct *work) struct rb_node *cursor; struct key *key; - time_t new_timer, limit; + time64_t new_timer, limit; kenter("[%lx,%x]", key_gc_flags, gc_state); - limit = current_kernel_time().tv_sec; + limit = ktime_get_real_seconds(); if (limit > key_gc_delay) limit -= key_gc_delay; else @@ -204,7 +204,7 @@ static void key_garbage_collector(struct work_struct *work) gc_state |= KEY_GC_REAPING_DEAD_1; kdebug("new pass %x", gc_state); - new_timer = LONG_MAX; + new_timer = TIME64_MAX; /* As only this function is permitted to remove things from the key * serial tree, if cursor is non-NULL then it will always point to a @@ -235,7 +235,7 @@ continue_scanning: if (gc_state & KEY_GC_SET_TIMER) { if (key->expiry > limit && key->expiry < new_timer) { - kdebug("will expire %x in %ld", + kdebug("will expire %x in %lld", key_serial(key), key->expiry - limit); new_timer = key->expiry; } @@ -276,7 +276,7 @@ maybe_resched: */ kdebug("pass complete"); - if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) { + if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) { new_timer += key_gc_delay; key_schedule_gc(new_timer); } diff --git a/security/keys/internal.h b/security/keys/internal.h index 503adbae7b0d..9f8208dc0e55 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -130,7 +130,7 @@ struct keyring_search_context { int skipped_ret; bool possessed; key_ref_t result; - struct timespec now; + time64_t now; }; extern bool key_default_cmp(const struct key *key, @@ -169,10 +169,10 @@ extern void key_change_session_keyring(struct callback_head *twork); extern struct work_struct key_gc_work; extern unsigned key_gc_delay; -extern void keyring_gc(struct key *keyring, time_t limit); +extern void keyring_gc(struct key *keyring, time64_t limit); extern void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type); -extern void key_schedule_gc(time_t gc_at); +extern void key_schedule_gc(time64_t gc_at); extern void key_schedule_gc_links(void); extern void key_gc_keytype(struct key_type *ktype); @@ -211,7 +211,7 @@ extern struct key *key_get_instantiation_authkey(key_serial_t target_id); /* * Determine whether a key is dead. */ -static inline bool key_is_dead(const struct key *key, time_t limit) +static inline bool key_is_dead(const struct key *key, time64_t limit) { return key->flags & ((1 << KEY_FLAG_DEAD) | diff --git a/security/keys/key.c b/security/keys/key.c index 83bf4b4afd49..66049183ad89 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -460,7 +460,7 @@ static int __key_instantiate_and_link(struct key *key, if (authkey) key_revoke(authkey); - if (prep->expiry != TIME_T_MAX) { + if (prep->expiry != TIME64_MAX) { key->expiry = prep->expiry; key_schedule_gc(prep->expiry + key_gc_delay); } @@ -506,7 +506,7 @@ int key_instantiate_and_link(struct key *key, prep.data = data; prep.datalen = datalen; prep.quotalen = key->type->def_datalen; - prep.expiry = TIME_T_MAX; + prep.expiry = TIME64_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) @@ -570,7 +570,6 @@ int key_reject_and_link(struct key *key, struct key *authkey) { struct assoc_array_edit *edit; - struct timespec now; int ret, awaken, link_ret = 0; key_check(key); @@ -593,8 +592,7 @@ int key_reject_and_link(struct key *key, /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); mark_key_instantiated(key, -error); - now = current_kernel_time(); - key->expiry = now.tv_sec + timeout; + key->expiry = ktime_get_real_seconds() + timeout; key_schedule_gc(key->expiry + key_gc_delay); if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) @@ -710,16 +708,13 @@ found_kernel_type: void key_set_timeout(struct key *key, unsigned timeout) { - struct timespec now; - time_t expiry = 0; + time64_t expiry = 0; /* make the changes with the locks held to prevent races */ down_write(&key->sem); - if (timeout > 0) { - now = current_kernel_time(); - expiry = now.tv_sec + timeout; - } + if (timeout > 0) + expiry = ktime_get_real_seconds() + timeout; key->expiry = expiry; key_schedule_gc(key->expiry + key_gc_delay); @@ -850,7 +845,7 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, prep.data = payload; prep.datalen = plen; prep.quotalen = index_key.type->def_datalen; - prep.expiry = TIME_T_MAX; + prep.expiry = TIME64_MAX; if (index_key.type->preparse) { ret = index_key.type->preparse(&prep); if (ret < 0) { @@ -994,7 +989,7 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen) prep.data = payload; prep.datalen = plen; prep.quotalen = key->type->def_datalen; - prep.expiry = TIME_T_MAX; + prep.expiry = TIME64_MAX; if (key->type->preparse) { ret = key->type->preparse(&prep); if (ret < 0) @@ -1028,8 +1023,7 @@ EXPORT_SYMBOL(key_update); */ void key_revoke(struct key *key) { - struct timespec now; - time_t time; + time64_t time; key_check(key); @@ -1044,8 +1038,7 @@ void key_revoke(struct key *key) key->type->revoke(key); /* set the death time to no more than the expiry time */ - now = current_kernel_time(); - time = now.tv_sec; + time = ktime_get_real_seconds(); if (key->revoked_at == 0 || key->revoked_at > time) { key->revoked_at = time; key_schedule_gc(key->revoked_at + key_gc_delay); diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 36f842ec87f0..d0bccebbd3b5 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -565,7 +565,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data) /* skip invalidated, revoked and expired keys */ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { - time_t expiry = READ_ONCE(key->expiry); + time64_t expiry = READ_ONCE(key->expiry); if (kflags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED))) { @@ -574,7 +574,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data) goto skipped; } - if (expiry && ctx->now.tv_sec >= expiry) { + if (expiry && ctx->now >= expiry) { if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) ctx->result = ERR_PTR(-EKEYEXPIRED); kleave(" = %d [expire]", ctx->skipped_ret); @@ -834,10 +834,10 @@ found: key = key_ref_to_ptr(ctx->result); key_check(key); if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { - key->last_used_at = ctx->now.tv_sec; - keyring->last_used_at = ctx->now.tv_sec; + key->last_used_at = ctx->now; + keyring->last_used_at = ctx->now; while (sp > 0) - stack[--sp].keyring->last_used_at = ctx->now.tv_sec; + stack[--sp].keyring->last_used_at = ctx->now; } kleave(" = true"); return true; @@ -898,7 +898,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref, } rcu_read_lock(); - ctx->now = current_kernel_time(); + ctx->now = ktime_get_real_seconds(); if (search_nested_keyrings(keyring, ctx)) __key_get(key_ref_to_ptr(ctx->result)); rcu_read_unlock(); @@ -1149,7 +1149,7 @@ struct key *find_keyring_by_name(const char *name, bool uid_keyring) * (ie. it has a zero usage count) */ if (!refcount_inc_not_zero(&keyring->usage)) continue; - keyring->last_used_at = current_kernel_time().tv_sec; + keyring->last_used_at = ktime_get_real_seconds(); goto out; } } @@ -1489,7 +1489,7 @@ static void keyring_revoke(struct key *keyring) static bool keyring_gc_select_iterator(void *object, void *iterator_data) { struct key *key = keyring_ptr_to_key(object); - time_t *limit = iterator_data; + time64_t *limit = iterator_data; if (key_is_dead(key, *limit)) return false; @@ -1500,7 +1500,7 @@ static bool keyring_gc_select_iterator(void *object, void *iterator_data) static int keyring_gc_check_iterator(const void *object, void *iterator_data) { const struct key *key = keyring_ptr_to_key(object); - time_t *limit = iterator_data; + time64_t *limit = iterator_data; key_check(key); return key_is_dead(key, *limit); @@ -1512,7 +1512,7 @@ static int keyring_gc_check_iterator(const void *object, void *iterator_data) * Not called with any locks held. The keyring's key struct will not be * deallocated under us as only our caller may deallocate it. */ -void keyring_gc(struct key *keyring, time_t limit) +void keyring_gc(struct key *keyring, time64_t limit) { int result; diff --git a/security/keys/permission.c b/security/keys/permission.c index a72b4dd70c8a..f68dc04d614e 100644 --- a/security/keys/permission.c +++ b/security/keys/permission.c @@ -89,7 +89,7 @@ EXPORT_SYMBOL(key_task_permission); int key_validate(const struct key *key) { unsigned long flags = READ_ONCE(key->flags); - time_t expiry = READ_ONCE(key->expiry); + time64_t expiry = READ_ONCE(key->expiry); if (flags & (1 << KEY_FLAG_INVALIDATED)) return -ENOKEY; @@ -101,8 +101,7 @@ int key_validate(const struct key *key) /* check it hasn't expired */ if (expiry) { - struct timespec now = current_kernel_time(); - if (now.tv_sec >= expiry) + if (ktime_get_real_seconds() >= expiry) return -EKEYEXPIRED; } diff --git a/security/keys/proc.c b/security/keys/proc.c index 6d1fcbba1e09..fbc4af5c6c9f 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -178,13 +178,12 @@ static int proc_keys_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); - struct timespec now; - time_t expiry; - unsigned long timo; unsigned long flags; key_ref_t key_ref, skey_ref; + time64_t now, expiry; char xbuf[16]; short state; + u64 timo; int rc; struct keyring_search_context ctx = { @@ -215,7 +214,7 @@ static int proc_keys_show(struct seq_file *m, void *v) if (rc < 0) return 0; - now = current_kernel_time(); + now = ktime_get_real_seconds(); rcu_read_lock(); @@ -223,21 +222,21 @@ static int proc_keys_show(struct seq_file *m, void *v) expiry = READ_ONCE(key->expiry); if (expiry == 0) { memcpy(xbuf, "perm", 5); - } else if (now.tv_sec >= expiry) { + } else if (now >= expiry) { memcpy(xbuf, "expd", 5); } else { - timo = expiry - now.tv_sec; + timo = expiry - now; if (timo < 60) - sprintf(xbuf, "%lus", timo); + sprintf(xbuf, "%llus", timo); else if (timo < 60*60) - sprintf(xbuf, "%lum", timo / 60); + sprintf(xbuf, "%llum", div_u64(timo, 60)); else if (timo < 60*60*24) - sprintf(xbuf, "%luh", timo / (60*60)); + sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60)); else if (timo < 60*60*24*7) - sprintf(xbuf, "%lud", timo / (60*60*24)); + sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24)); else - sprintf(xbuf, "%luw", timo / (60*60*24*7)); + sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7)); } state = key_read_state(key); diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 740affd65ee9..d5b25e535d3a 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -738,7 +738,7 @@ try_again: if (ret < 0) goto invalid_key; - key->last_used_at = current_kernel_time().tv_sec; + key->last_used_at = ktime_get_real_seconds(); error: put_cred(ctx.cred); diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index a93a4235a332..10e7ef7a8804 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -248,8 +248,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream, runtime->rate); *audio_tstamp = ns_to_timespec(audio_nsecs); } - runtime->status->audio_tstamp = *audio_tstamp; - runtime->status->tstamp = *curr_tstamp; + if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) { + runtime->status->audio_tstamp = *audio_tstamp; + runtime->status->tstamp = *curr_tstamp; + } /* * re-take a driver timestamp to let apps detect if the reference tstamp diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c index 59127b6ef39e..e00f7e399e46 100644 --- a/sound/core/timer_compat.c +++ b/sound/core/timer_compat.c @@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file, struct snd_timer *t; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; t = tu->timeri->timer; - if (snd_BUG_ON(!t)) - return -ENXIO; + if (!t) + return -EBADFD; memset(&info, 0, sizeof(info)); info.card = t->card ? t->card->number : -1; if (t->hw.flags & SNDRV_TIMER_HW_SLAVE) @@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file, struct snd_timer_status32 status; tu = file->private_data; - if (snd_BUG_ON(!tu->timeri)) - return -ENXIO; + if (!tu->timeri) + return -EBADFD; memset(&status, 0, sizeof(status)); status.tstamp.tv_sec = tu->tstamp.tv_sec; status.tstamp.tv_nsec = tu->tstamp.tv_nsec; diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c index e43af18d4383..8632301489fa 100644 --- a/sound/core/vmaster.c +++ b/sound/core/vmaster.c @@ -495,7 +495,9 @@ EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster); * Returns 0 if successful, or a negative error code. */ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, - int (*func)(struct snd_kcontrol *, void *), + int (*func)(struct snd_kcontrol *vslave, + struct snd_kcontrol *slave, + void *arg), void *arg) { struct link_master *master; @@ -507,7 +509,7 @@ int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl, if (err < 0) return err; list_for_each_entry(slave, &master->slaves, list) { - err = func(&slave->slave, arg); + err = func(slave->kctl, &slave->slave, arg); if (err < 0) return err; } diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c index 81acc20c2535..f21633cd9b38 100644 --- a/sound/hda/hdmi_chmap.c +++ b/sound/hda/hdmi_chmap.c @@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol, memset(pcm_chmap, 0, sizeof(pcm_chmap)); chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap); - for (i = 0; i < sizeof(chmap); i++) + for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++) ucontrol->value.integer.value[i] = pcm_chmap[i]; return 0; diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index c1f8e5479bf3..e018ecbf78a8 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -1823,7 +1823,9 @@ struct slave_init_arg { }; /* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */ -static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg) +static int init_slave_0dB(struct snd_kcontrol *slave, + struct snd_kcontrol *kctl, + void *_arg) { struct slave_init_arg *arg = _arg; int _tlv[4]; @@ -1860,7 +1862,7 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg) arg->step = step; val = -tlv[2] / step; if (val > 0) { - put_kctl_with_value(kctl, val); + put_kctl_with_value(slave, val); return val; } @@ -1868,7 +1870,9 @@ static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg) } /* unmute the slave via snd_ctl_apply_vmaster_slaves() */ -static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg) +static int init_slave_unmute(struct snd_kcontrol *slave, + struct snd_kcontrol *kctl, + void *_arg) { return put_kctl_with_value(slave, 1); } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index f958d8d54d15..c71dcacea807 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2463,6 +2463,9 @@ static const struct pci_device_id azx_ids[] = { /* AMD Hudson */ { PCI_DEVICE(0x1022, 0x780d), .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, + /* AMD Raven */ + { PCI_DEVICE(0x1022, 0x15e3), + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, /* ATI HDMI */ { PCI_DEVICE(0x1002, 0x0002), .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index db1a376e27c0..921a10eff43a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -341,6 +341,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) case 0x10ec0299: alc_update_coef_idx(codec, 0x10, 1<<9, 0); break; + case 0x10ec0275: + alc_update_coef_idx(codec, 0xe, 0, 1<<0); + break; case 0x10ec0293: alc_update_coef_idx(codec, 0xa, 1<<13, 0); break; @@ -6452,6 +6455,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { ALC225_STANDARD_PINS, {0x12, 0xb7a60130}, {0x1b, 0x90170110}), + SND_HDA_PIN_QUIRK(0x10ec0233, 0x8086, "Intel NUC Skull Canyon", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, + {0x1b, 0x01111010}, + {0x1e, 0x01451130}, + {0x21, 0x02211020}), SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, {0x12, 0x90a60140}, {0x14, 0x90170110}, @@ -6887,7 +6894,7 @@ static int patch_alc269(struct hda_codec *codec) case 0x10ec0703: spec->codec_variant = ALC269_TYPE_ALC700; spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ - alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */ + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ break; } diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index bb8be10b8437..7b49d04e3c60 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -34,6 +34,11 @@ config SND_SOC_INTEL_SST_TOPLEVEL depends on X86 || COMPILE_TEST select SND_SOC_INTEL_MACH select SND_SOC_INTEL_COMMON + help + Intel ASoC Audio Drivers. If you have a Intel machine that + has audio controller with a DSP and I2S or DMIC port, then + enable this option by saying Y or M + If unsure select "N". config SND_SOC_INTEL_HASWELL tristate "Intel ASoC SST driver for Haswell/Broadwell" diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 26dd5f20f149..eb3396ffba4c 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SOURCE))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } @@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_SELECTOR))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) { + if (cs->bLength < 5 + cs->bNrInPins) + return NULL; return cs; + } } return NULL; @@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor * while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra, ctrl_iface->extralen, cs, UAC2_CLOCK_MULTIPLIER))) { - if (cs->bClockID == clock_id) + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) return cs; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 91bc8f18791e..0537c6322990 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1469,10 +1469,16 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, __u8 *bmaControls; if (state->mixer->protocol == UAC_VERSION_1) { + if (hdr->bLength < 7) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = hdr->bControlSize; - if (!csize) { + if (csize <= 1) { usb_audio_dbg(state->chip, - "unit %u: invalid bControlSize == 0\n", + "unit %u: invalid bControlSize <= 1\n", unitid); return -EINVAL; } @@ -1486,6 +1492,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, } } else { struct uac2_feature_unit_descriptor *ftr = _ftr; + if (hdr->bLength < 6) { + usb_audio_err(state->chip, + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n", + unitid); + return -EINVAL; + } csize = 4; channels = (hdr->bLength - 6) / 4 - 1; bmaControls = ftr->bmaControls; @@ -2086,7 +2098,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, const struct usbmix_name_map *map; char **namelist; - if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) { + if (desc->bLength < 5 || !desc->bNrInPins || + desc->bLength < 5 + desc->bNrInPins) { usb_audio_err(state->chip, "invalid SELECTOR UNIT descriptor %d\n", unitid); return -EINVAL; @@ -2330,9 +2343,14 @@ void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid) { struct usb_mixer_elem_list *list; - for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) + for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) { + struct usb_mixer_elem_info *info = + (struct usb_mixer_elem_info *)list; + /* invalidate cache, so the value is read from the device */ + info->cached = 0; snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &list->kctl->id); + } } static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer, diff --git a/tools/Makefile b/tools/Makefile index c03b4f69d5b7..be02c8b904db 100644 --- a/tools/Makefile +++ b/tools/Makefile @@ -30,6 +30,7 @@ help: @echo ' usb - USB testing tools' @echo ' virtio - vhost test module' @echo ' vm - misc vm tools' + @echo ' wmi - WMI interface examples' @echo ' x86_energy_perf_policy - Intel energy policy tool' @echo '' @echo 'You can do:' @@ -58,7 +59,7 @@ acpi: FORCE cpupower: FORCE $(call descend,power/$@) -cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds: FORCE +cgroup firewire hv guest spi usb virtio vm bpf iio gpio objtool leds wmi: FORCE $(call descend,$@) liblockdep: FORCE @@ -93,7 +94,7 @@ kvm_stat: FORCE all: acpi cgroup cpupower gpio hv firewire liblockdep \ perf selftests spi turbostat usb \ virtio vm bpf x86_energy_perf_policy \ - tmon freefall iio objtool kvm_stat + tmon freefall iio objtool kvm_stat wmi acpi_install: $(call descend,power/$(@:_install=),install) @@ -101,7 +102,7 @@ acpi_install: cpupower_install: $(call descend,power/$(@:_install=),install) -cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install: +cgroup_install firewire_install gpio_install hv_install iio_install perf_install spi_install usb_install virtio_install vm_install bpf_install objtool_install wmi_install: $(call descend,$(@:_install=),install) liblockdep_install: @@ -126,7 +127,8 @@ install: acpi_install cgroup_install cpupower_install gpio_install \ hv_install firewire_install iio_install liblockdep_install \ perf_install selftests_install turbostat_install usb_install \ virtio_install vm_install bpf_install x86_energy_perf_policy_install \ - tmon_install freefall_install objtool_install kvm_stat_install + tmon_install freefall_install objtool_install kvm_stat_install \ + wmi_install acpi_clean: $(call descend,power/acpi,clean) @@ -134,7 +136,7 @@ acpi_clean: cpupower_clean: $(call descend,power/cpupower,clean) -cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean: +cgroup_clean hv_clean firewire_clean spi_clean usb_clean virtio_clean vm_clean wmi_clean bpf_clean iio_clean gpio_clean objtool_clean leds_clean: $(call descend,$(@:_clean=),clean) liblockdep_clean: @@ -172,6 +174,6 @@ clean: acpi_clean cgroup_clean cpupower_clean hv_clean firewire_clean \ perf_clean selftests_clean turbostat_clean spi_clean usb_clean virtio_clean \ vm_clean bpf_clean iio_clean x86_energy_perf_policy_clean tmon_clean \ freefall_clean build_clean libbpf_clean libsubcmd_clean liblockdep_clean \ - gpio_clean objtool_clean leds_clean + gpio_clean objtool_clean leds_clean wmi_clean .PHONY: FORCE diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index f45c44ef9bec..ad619b96c276 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -41,7 +41,6 @@ #include <string.h> #include <time.h> #include <unistd.h> -#include <net/if.h> #include <sys/types.h> #include <sys/stat.h> @@ -230,21 +229,6 @@ static void print_prog_json(struct bpf_prog_info *info, int fd) info->tag[0], info->tag[1], info->tag[2], info->tag[3], info->tag[4], info->tag[5], info->tag[6], info->tag[7]); - if (info->status & BPF_PROG_STATUS_DEV_BOUND) { - jsonw_name(json_wtr, "dev"); - if (info->ifindex) { - char name[IF_NAMESIZE]; - - if (!if_indextoname(info->ifindex, name)) - jsonw_printf(json_wtr, "\"ifindex:%d\"", - info->ifindex); - else - jsonw_printf(json_wtr, "\"%s\"", name); - } else { - jsonw_printf(json_wtr, "\"unknown\""); - } - } - if (info->load_time) { char buf[32]; @@ -302,21 +286,6 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd) printf("tag "); fprint_hex(stdout, info->tag, BPF_TAG_SIZE, ""); - printf(" "); - - if (info->status & BPF_PROG_STATUS_DEV_BOUND) { - printf("dev "); - if (info->ifindex) { - char name[IF_NAMESIZE]; - - if (!if_indextoname(info->ifindex, name)) - printf("ifindex:%d ", info->ifindex); - else - printf("%s ", name); - } else { - printf("unknown "); - } - } printf("\n"); if (info->load_time) { diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index e880ae6434ee..4c223ab30293 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -262,7 +262,7 @@ union bpf_attr { __u32 kern_version; /* checked when prog_type=kprobe */ __u32 prog_flags; char prog_name[BPF_OBJ_NAME_LEN]; - __u32 prog_target_ifindex; /* ifindex of netdev to prep for */ + __u32 prog_ifindex; /* ifindex of netdev to prep for */ }; struct { /* anonymous struct used by BPF_OBJ_* commands */ @@ -897,10 +897,6 @@ enum sk_action { #define BPF_TAG_SIZE 8 -enum bpf_prog_status { - BPF_PROG_STATUS_DEV_BOUND = (1 << 0), -}; - struct bpf_prog_info { __u32 type; __u32 id; @@ -914,8 +910,6 @@ struct bpf_prog_info { __u32 nr_map_ids; __aligned_u64 map_ids; char name[BPF_OBJ_NAME_LEN]; - __u32 ifindex; - __u32 status; } __attribute__((aligned(8))); struct bpf_map_info { diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index bf092b83e453..3c64f30cf63c 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -4377,11 +4377,10 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, @@ -4481,14 +4480,12 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct test_val, foo)), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, @@ -4618,18 +4615,16 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_MOV64_IMM(BPF_REG_3, 0), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "R2 min value is outside of the array range", + .errstr = "R1 min value is outside of the array range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -4760,20 +4755,18 @@ static struct bpf_test tests[] = { BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), BPF_LD_MAP_FD(BPF_REG_1, 0), BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), BPF_JMP_IMM(BPF_JGT, BPF_REG_3, - offsetof(struct test_val, foo), 4), + offsetof(struct test_val, foo), 3), BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), - BPF_MOV64_IMM(BPF_REG_1, 0), - BPF_MOV64_IMM(BPF_REG_3, 0), - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_EMIT_CALL(BPF_FUNC_trace_printk), BPF_EXIT_INSN(), }, .fixup_map2 = { 3 }, - .errstr = "R2 min value is outside of the array range", + .errstr = "R1 min value is outside of the array range", .result = REJECT, .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, @@ -5638,7 +5631,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, { - "helper access to variable memory: size = 0 allowed on NULL", + "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", .insns = { BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_2, 0), @@ -5652,7 +5645,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to variable memory: size > 0 not allowed on NULL", + "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", .insns = { BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_2, 0), @@ -5670,7 +5663,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to variable memory: size = 0 allowed on != NULL stack pointer", + "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", .insns = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), @@ -5687,7 +5680,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to variable memory: size = 0 allowed on != NULL map pointer", + "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5709,7 +5702,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer", + "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5734,7 +5727,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer", + "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", .insns = { BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), @@ -5757,7 +5750,7 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { - "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer", + "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, offsetof(struct __sk_buff, data)), @@ -5779,6 +5772,105 @@ static struct bpf_test tests[] = { .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "R1 type=inv expected=fp", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_IMM(BPF_REG_1, 0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .errstr = "R1 type=inv expected=fp", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { + "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", + .insns = { + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_EMIT_CALL(BPF_FUNC_probe_read), + BPF_EXIT_INSN(), + }, + .fixup_map1 = { 3 }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_TRACEPOINT, + }, + { "helper access to variable memory: 8 bytes leak", .insns = { BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), diff --git a/tools/wmi/Makefile b/tools/wmi/Makefile new file mode 100644 index 000000000000..e664f1167388 --- /dev/null +++ b/tools/wmi/Makefile @@ -0,0 +1,18 @@ +PREFIX ?= /usr +SBINDIR ?= sbin +INSTALL ?= install +CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include +CC = $(CROSS_COMPILE)gcc + +TARGET = dell-smbios-example + +all: $(TARGET) + +%: %.c + $(CC) $(CFLAGS) $(LDFLAGS) -o $@ $< + +clean: + $(RM) $(TARGET) + +install: dell-smbios-example + $(INSTALL) -D -m 755 $(TARGET) $(DESTDIR)$(PREFIX)/$(SBINDIR)/$(TARGET) diff --git a/tools/wmi/dell-smbios-example.c b/tools/wmi/dell-smbios-example.c new file mode 100644 index 000000000000..9d3bde081249 --- /dev/null +++ b/tools/wmi/dell-smbios-example.c @@ -0,0 +1,210 @@ +/* + * Sample application for SMBIOS communication over WMI interface + * Performs the following: + * - Simple cmd_class/cmd_select lookup for TPM information + * - Simple query of known tokens and their values + * - Simple activation of a token + * + * Copyright (C) 2017 Dell, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <errno.h> +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <sys/ioctl.h> +#include <unistd.h> + +/* if uapi header isn't installed, this might not yet exist */ +#ifndef __packed +#define __packed __attribute__((packed)) +#endif +#include <linux/wmi.h> + +/* It would be better to discover these using udev, but for a simple + * application they're hardcoded + */ +static const char *ioctl_devfs = "/dev/wmi/dell-smbios"; +static const char *token_sysfs = + "/sys/bus/platform/devices/dell-smbios.0/tokens"; + +static void show_buffer(struct dell_wmi_smbios_buffer *buffer) +{ + printf("Call: %x/%x [%x,%x,%x,%x]\nResults: [%8x,%8x,%8x,%8x]\n", + buffer->std.cmd_class, buffer->std.cmd_select, + buffer->std.input[0], buffer->std.input[1], + buffer->std.input[2], buffer->std.input[3], + buffer->std.output[0], buffer->std.output[1], + buffer->std.output[2], buffer->std.output[3]); +} + +static int run_wmi_smbios_cmd(struct dell_wmi_smbios_buffer *buffer) +{ + int fd; + int ret; + + fd = open(ioctl_devfs, O_NONBLOCK); + ret = ioctl(fd, DELL_WMI_SMBIOS_CMD, buffer); + close(fd); + return ret; +} + +static int find_token(__u16 token, __u16 *location, __u16 *value) +{ + char location_sysfs[60]; + char value_sysfs[57]; + char buf[4096]; + FILE *f; + int ret; + + ret = sprintf(value_sysfs, "%s/%04x_value", token_sysfs, token); + if (ret < 0) { + printf("sprintf value failed\n"); + return 2; + } + f = fopen(value_sysfs, "rb"); + if (!f) { + printf("failed to open %s\n", value_sysfs); + return 2; + } + fread(buf, 1, 4096, f); + fclose(f); + *value = (__u16) strtol(buf, NULL, 16); + + ret = sprintf(location_sysfs, "%s/%04x_location", token_sysfs, token); + if (ret < 0) { + printf("sprintf location failed\n"); + return 1; + } + f = fopen(location_sysfs, "rb"); + if (!f) { + printf("failed to open %s\n", location_sysfs); + return 2; + } + fread(buf, 1, 4096, f); + fclose(f); + *location = (__u16) strtol(buf, NULL, 16); + + if (*location) + return 0; + return 2; +} + +static int token_is_active(__u16 *location, __u16 *cmpvalue, + struct dell_wmi_smbios_buffer *buffer) +{ + int ret; + + buffer->std.cmd_class = CLASS_TOKEN_READ; + buffer->std.cmd_select = SELECT_TOKEN_STD; + buffer->std.input[0] = *location; + ret = run_wmi_smbios_cmd(buffer); + if (ret != 0 || buffer->std.output[0] != 0) + return ret; + ret = (buffer->std.output[1] == *cmpvalue); + return ret; +} + +static int query_token(__u16 token, struct dell_wmi_smbios_buffer *buffer) +{ + __u16 location; + __u16 value; + int ret; + + ret = find_token(token, &location, &value); + if (ret != 0) { + printf("unable to find token %04x\n", token); + return 1; + } + return token_is_active(&location, &value, buffer); +} + +static int activate_token(struct dell_wmi_smbios_buffer *buffer, + __u16 token) +{ + __u16 location; + __u16 value; + int ret; + + ret = find_token(token, &location, &value); + if (ret != 0) { + printf("unable to find token %04x\n", token); + return 1; + } + buffer->std.cmd_class = CLASS_TOKEN_WRITE; + buffer->std.cmd_select = SELECT_TOKEN_STD; + buffer->std.input[0] = location; + buffer->std.input[1] = 1; + ret = run_wmi_smbios_cmd(buffer); + return ret; +} + +static int query_buffer_size(__u64 *buffer_size) +{ + FILE *f; + + f = fopen(ioctl_devfs, "rb"); + if (!f) + return -EINVAL; + fread(buffer_size, sizeof(__u64), 1, f); + fclose(f); + return EXIT_SUCCESS; +} + +int main(void) +{ + struct dell_wmi_smbios_buffer *buffer; + int ret; + __u64 value = 0; + + ret = query_buffer_size(&value); + if (ret == EXIT_FAILURE || !value) { + printf("Unable to read buffer size\n"); + goto out; + } + printf("Detected required buffer size %lld\n", value); + + buffer = malloc(value); + if (buffer == NULL) { + printf("failed to alloc memory for ioctl\n"); + ret = -ENOMEM; + goto out; + } + buffer->length = value; + + /* simple SMBIOS call for looking up TPM info */ + buffer->std.cmd_class = CLASS_FLASH_INTERFACE; + buffer->std.cmd_select = SELECT_FLASH_INTERFACE; + buffer->std.input[0] = 2; + ret = run_wmi_smbios_cmd(buffer); + if (ret) { + printf("smbios ioctl failed: %d\n", ret); + ret = EXIT_FAILURE; + goto out; + } + show_buffer(buffer); + + /* query some tokens */ + ret = query_token(CAPSULE_EN_TOKEN, buffer); + printf("UEFI Capsule enabled token is: %d\n", ret); + ret = query_token(CAPSULE_DIS_TOKEN, buffer); + printf("UEFI Capsule disabled token is: %d\n", ret); + + /* activate UEFI capsule token if disabled */ + if (ret) { + printf("Enabling UEFI capsule token"); + if (activate_token(buffer, CAPSULE_EN_TOKEN)) { + printf("activate failed\n"); + ret = -1; + goto out; + } + } + ret = EXIT_SUCCESS; +out: + free(buffer); + return ret; +} |